hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0eb17efd5225690289eb667bc09f65ce62f022e0 | 384 | py | Python | test/net/Caremore_Message/config/__init__.py | wolfbolin/Caremore | a38c24a9c0321ec442cff94110699d52466753dc | [
"Apache-2.0"
] | 1 | 2018-06-19T10:58:38.000Z | 2018-06-19T10:58:38.000Z | test/net/Caremore_Message/config/__init__.py | wolfbolin/Caremore | a38c24a9c0321ec442cff94110699d52466753dc | [
"Apache-2.0"
] | null | null | null | test/net/Caremore_Message/config/__init__.py | wolfbolin/Caremore | a38c24a9c0321ec442cff94110699d52466753dc | [
"Apache-2.0"
] | null | null | null | import os
def load_config():
mode = os.environ.get('MODE')
try:
if mode == 'PRODUCTION':
from .production import ProductionConfig
return ProductionConfig
else:
from .development import DevelopmentConfig
return DevelopmentConfig
except ImportError:
from .default import Config
return Config
| 24 | 54 | 0.614583 |
cad9f70640cfcefa3333d6d7486d4d239c092a84 | 6,158 | py | Python | cuckoo/agent/agent.py | tykimdream/2021_PBL | c615db57cc433915b5cea396d696c5277b29a6f9 | [
"MIT"
] | 71 | 2016-11-13T03:26:45.000Z | 2022-02-22T08:13:04.000Z | agent/agent.pyw | xuna123/Bold-Falcon | bef7dfc3103143bd51ca82838565877097fecc49 | [
"BSD-3-Clause"
] | 1 | 2017-05-19T10:57:48.000Z | 2017-06-13T11:29:31.000Z | agent/agent.pyw | xuna123/Bold-Falcon | bef7dfc3103143bd51ca82838565877097fecc49 | [
"BSD-3-Clause"
] | 36 | 2016-12-13T11:37:56.000Z | 2021-11-11T12:20:10.000Z | # Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import os
import sys
import time
import socket
import string
import random
import platform
import subprocess
import ConfigParser
from StringIO import StringIO
from zipfile import ZipFile
from SimpleXMLRPCServer import SimpleXMLRPCServer
BIND_IP = "0.0.0.0"
BIND_PORT = 8000
STATUS_INIT = 0x0001
STATUS_RUNNING = 0x0002
STATUS_COMPLETED = 0x0003
STATUS_FAILED = 0x0004
class Agent(object):
"""Cuckoo agent, it runs inside guest."""
def __init__(self):
self.system = platform.system().lower()
self.analyzer_path = ""
self.analyzer_pid = 0
self.error_message = None
self.current_status = STATUS_INIT
self.analyzer_folder = ""
self.results_folder = ""
def _initialize(self):
if not self.analyzer_folder:
random.seed(time.time())
container = "".join(random.choice(string.ascii_lowercase) for x in range(random.randint(5, 10)))
if self.system == "windows":
system_drive = os.environ["SYSTEMDRIVE"] + os.sep
self.analyzer_folder = os.path.join(system_drive, container)
elif self.system == "linux" or self.system == "darwin":
self.analyzer_folder = \
os.path.join(os.environ.get("HOME", os.environ.get("PWD", "/tmp")), container)
else:
self.error_message = "Unable to identify operating system"
return False
try:
os.makedirs(self.analyzer_folder)
except OSError as e:
self.error_message = e
return False
return True
def get_status(self):
"""Get current status.
@return: status.
"""
return self.current_status
def get_error(self):
"""Get error message.
@return: error message.
"""
return str(self.error_message)
def add_malware(self, data, name):
"""Get analysis data.
@param data: analysis data.
@param name: file name.
@return: operation status.
"""
data = data.data
if self.system == "windows":
root = os.environ["TEMP"]
elif self.system == "linux" or self.system == "darwin":
root = "/tmp"
else:
self.error_message = \
"Unable to write malware to disk because the operating " \
"system could not be identified."
return False
file_path = os.path.join(root, name)
try:
with open(file_path, "wb") as sample:
sample.write(data)
except IOError as e:
self.error_message = \
"Unable to write sample to disk: {0}".format(e)
return False
return True
def add_config(self, options):
"""Creates analysis.conf file from current analysis options.
@param options: current configuration options, dict format.
@return: operation status.
"""
if not isinstance(options, dict):
return False
config = ConfigParser.RawConfigParser()
config.add_section("analysis")
try:
for key, value in options.items():
# Options can be UTF encoded.
if isinstance(value, basestring):
try:
value = value.encode("utf-8")
except UnicodeEncodeError:
pass
config.set("analysis", key, value)
config_path = os.path.join(self.analyzer_folder, "analysis.conf")
with open(config_path, "wb") as config_file:
config.write(config_file)
except Exception as e:
self.error_message = e
return False
return True
def add_analyzer(self, data):
"""Add analyzer.
@param data: analyzer data.
@return: operation status.
"""
data = data.data
if not self._initialize():
return False
try:
zip_data = StringIO()
zip_data.write(data)
with ZipFile(zip_data, "r") as archive:
archive.extractall(self.analyzer_folder)
finally:
zip_data.close()
self.analyzer_path = os.path.join(self.analyzer_folder, "analyzer.py")
return True
def execute(self):
"""Execute analysis.
@return: analyzer PID.
"""
if not self.analyzer_path or not os.path.exists(self.analyzer_path):
return False
try:
proc = subprocess.Popen([sys.executable, self.analyzer_path],
cwd=os.path.dirname(self.analyzer_path))
self.analyzer_pid = proc.pid
except OSError as e:
self.error_message = e
return False
self.current_status = STATUS_RUNNING
return self.analyzer_pid
def complete(self, success=True, error="", results=""):
"""Complete analysis.
@param success: success status.
@param error: error status.
"""
if success:
self.current_status = STATUS_COMPLETED
else:
self.current_status = STATUS_FAILED
if error:
self.error_message = error
self.results_folder = results
return True
if __name__ == "__main__":
try:
if not BIND_IP:
BIND_IP = socket.gethostbyname(socket.gethostname())
print("[+] Starting agent on %s:%s ..." % (BIND_IP, BIND_PORT))
# Disable DNS lookup, by Scott D.
def FakeGetFQDN(name=""):
return name
socket.getfqdn = FakeGetFQDN
server = SimpleXMLRPCServer((BIND_IP, BIND_PORT), allow_none=True)
server.register_instance(Agent())
server.serve_forever()
except KeyboardInterrupt:
server.shutdown()
| 29.184834 | 108 | 0.572751 |
0502320d139893a2a396cc8a1026f78eafc90567 | 1,308 | py | Python | aoc/year_2021/day_01/solver.py | logan-connolly/AoC | 23f47e72abaf438cc97897616be4d6b057a01bf3 | [
"MIT"
] | 2 | 2020-12-06T10:59:52.000Z | 2021-09-29T22:14:03.000Z | aoc/year_2021/day_01/solver.py | logan-connolly/AoC | 23f47e72abaf438cc97897616be4d6b057a01bf3 | [
"MIT"
] | null | null | null | aoc/year_2021/day_01/solver.py | logan-connolly/AoC | 23f47e72abaf438cc97897616be4d6b057a01bf3 | [
"MIT"
] | 2 | 2021-09-29T22:14:18.000Z | 2022-01-18T02:20:26.000Z | """This is the Solution for Year 2021 Day 01"""
from typing import Optional
from aoc.abstracts.solver import Answers, IntLines
def compare(prev: Optional[int], curr: int) -> bool:
if prev is not None:
return curr > prev
return False
def create_three_measurement_window(data: IntLines) -> IntLines:
new_data = []
for start_idx, _ in enumerate(data[:-2]):
subset = data[start_idx : start_idx + 3]
new_data.append(sum(subset))
return new_data
class Solver:
def __init__(self, data: str) -> None:
self.data = data
def _preprocess(self) -> IntLines:
return [int(line) for line in self.data.splitlines()]
def _solve_part_one(self, lines: IntLines) -> int:
lagged = [None, *lines[:-1]]
return sum(compare(prev, curr) for prev, curr in zip(lagged, lines))
def _solve_part_two(self, lines: IntLines) -> int:
window_data = create_three_measurement_window(lines)
lagged = [None, *window_data[:-1]]
return sum(compare(prev, curr) for prev, curr in zip(lagged, window_data))
def solve(self) -> Answers:
lines = self._preprocess()
ans_one = self._solve_part_one(lines)
ans_two = self._solve_part_two(lines)
return Answers(part_one=ans_one, part_two=ans_two)
| 30.418605 | 82 | 0.658257 |
46477c0345636748bcaa40678200fb764113593b | 6,015 | py | Python | py/tests/swagger_client/models/name_transfer_tx.py | AndyHongSir/epoch | 3ac26ee08c12d5d923c88eb5db5479f3b9d9b294 | [
"ISC"
] | null | null | null | py/tests/swagger_client/models/name_transfer_tx.py | AndyHongSir/epoch | 3ac26ee08c12d5d923c88eb5db5479f3b9d9b294 | [
"ISC"
] | null | null | null | py/tests/swagger_client/models/name_transfer_tx.py | AndyHongSir/epoch | 3ac26ee08c12d5d923c88eb5db5479f3b9d9b294 | [
"ISC"
] | null | null | null | # coding: utf-8
"""
Aeternity Epoch
This is the [Aeternity](https://www.aeternity.com/) Epoch API. # noqa: E501
OpenAPI spec version: 0.7.0
Contact: apiteam@aeternity.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.encoded_hash import EncodedHash # noqa: F401,E501
class NameTransferTx(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name_hash': 'str',
'recipient_pubkey': 'str',
'fee': 'int',
'account': 'EncodedHash',
'nonce': 'int'
}
attribute_map = {
'name_hash': 'name_hash',
'recipient_pubkey': 'recipient_pubkey',
'fee': 'fee',
'account': 'account',
'nonce': 'nonce'
}
def __init__(self, name_hash=None, recipient_pubkey=None, fee=None, account=None, nonce=None): # noqa: E501
"""NameTransferTx - a model defined in Swagger""" # noqa: E501
self._name_hash = None
self._recipient_pubkey = None
self._fee = None
self._account = None
self._nonce = None
self.discriminator = None
self.name_hash = name_hash
self.recipient_pubkey = recipient_pubkey
self.fee = fee
if account is not None:
self.account = account
if nonce is not None:
self.nonce = nonce
@property
def name_hash(self):
"""Gets the name_hash of this NameTransferTx. # noqa: E501
:return: The name_hash of this NameTransferTx. # noqa: E501
:rtype: str
"""
return self._name_hash
@name_hash.setter
def name_hash(self, name_hash):
"""Sets the name_hash of this NameTransferTx.
:param name_hash: The name_hash of this NameTransferTx. # noqa: E501
:type: str
"""
if name_hash is None:
raise ValueError("Invalid value for `name_hash`, must not be `None`") # noqa: E501
self._name_hash = name_hash
@property
def recipient_pubkey(self):
"""Gets the recipient_pubkey of this NameTransferTx. # noqa: E501
:return: The recipient_pubkey of this NameTransferTx. # noqa: E501
:rtype: str
"""
return self._recipient_pubkey
@recipient_pubkey.setter
def recipient_pubkey(self, recipient_pubkey):
"""Sets the recipient_pubkey of this NameTransferTx.
:param recipient_pubkey: The recipient_pubkey of this NameTransferTx. # noqa: E501
:type: str
"""
if recipient_pubkey is None:
raise ValueError("Invalid value for `recipient_pubkey`, must not be `None`") # noqa: E501
self._recipient_pubkey = recipient_pubkey
@property
def fee(self):
"""Gets the fee of this NameTransferTx. # noqa: E501
:return: The fee of this NameTransferTx. # noqa: E501
:rtype: int
"""
return self._fee
@fee.setter
def fee(self, fee):
"""Sets the fee of this NameTransferTx.
:param fee: The fee of this NameTransferTx. # noqa: E501
:type: int
"""
if fee is None:
raise ValueError("Invalid value for `fee`, must not be `None`") # noqa: E501
self._fee = fee
@property
def account(self):
"""Gets the account of this NameTransferTx. # noqa: E501
:return: The account of this NameTransferTx. # noqa: E501
:rtype: EncodedHash
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this NameTransferTx.
:param account: The account of this NameTransferTx. # noqa: E501
:type: EncodedHash
"""
self._account = account
@property
def nonce(self):
"""Gets the nonce of this NameTransferTx. # noqa: E501
:return: The nonce of this NameTransferTx. # noqa: E501
:rtype: int
"""
return self._nonce
@nonce.setter
def nonce(self, nonce):
"""Sets the nonce of this NameTransferTx.
:param nonce: The nonce of this NameTransferTx. # noqa: E501
:type: int
"""
self._nonce = nonce
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NameTransferTx):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.094595 | 112 | 0.576725 |
2f0b3ef5e5dc39353b2ec49827cc42e67e6111c7 | 2,356 | py | Python | testbyxcj/mix/trashbin/buffer_manager.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | null | null | null | testbyxcj/mix/trashbin/buffer_manager.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | null | null | null | testbyxcj/mix/trashbin/buffer_manager.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | 1 | 2021-05-08T02:23:00.000Z | 2021-05-08T02:23:00.000Z | import redis
import socket
import logging
import threading
import multiprocessing
import time
import cPickle as pickle
import logger as logger
from ryu.lib.packet import *
import consts
#Process
class BufferManager(multiprocessing.Process):
def __init__(self,name,conn):
# class BufferManager(threading.Thread):
# def __init__(self,name):
super(BufferManager,self).__init__()
self.conn = conn
self.daemon = True
# threading.Thread.__init__(self)
self.pool = redis.ConnectionPool(host='localhost',port=6379)
self.rds = redis.Redis(connection_pool=self.pool)
self.name = name
self.pkg_to_save = []
logger.init('./buf.log',logging.INFO)
self.logger=logger.getLogger('bm',logging.INFO)
def run(self):
if(self.conn):
print("i have a conn")
while(True):
# print("but in the list" + str(len(self.pkg_to_save)))
cmd_to_me = None
try:
cmd_to_me = self.conn.recv()
except Exception as err:
# print("Nothing sent to me, i'm boring")
# time.sleep(0.01)
pass
if(cmd_to_me):
# print("i can read!!!!")
msg_type,key,dpid,pkg,in_port = self.read_and_make_key(cmd_to_me)
if(msg_type == consts.BUF_PUSH):
value = self.make_value(dpid,pkg,in_port)
self.save_to_buffer(key,value)
elif(msg_type == consts.BUF_POP):
self.get_from_buffer(key)
def save_to_buffer(self,key,value):
self.rds.rpush(key,value)
def get_from_buffer(self,key):
msg = self.rds.lpop(key)
while(msg):
self.conn.send(msg)
msg = self.rds.lpop(key)
def read_and_make_key(self,cmd_to_me):
cmd_json = pickle.loads(cmd_to_me)
msg_type = cmd_json["msg_type"]
key = cmd_json["src"]+cmd_json["dst"]+str(cmd_json["dst_port"])
dpid = cmd_json["dpid"]
pkg = cmd_json["pkg"]
in_port = cmd_json["in_port"]
return msg_type,key,dpid,pkg,in_port
def make_value(self,dpid,pkg,in_port):
v = {
"dpid":dpid,
"pkg":pkg,
"in_port":in_port
}
return pickle.dumps(v)
| 29.822785 | 81 | 0.575127 |
bce00330cce9d65aaf37fe2a90c7642692a99dcd | 10,850 | py | Python | conans/test/unittests/client/build/cpp_std_flags_test.py | noverby/conan | 5e560ce806be28416e80544e767b1bca3f48d11e | [
"MIT"
] | 2 | 2020-02-12T09:56:25.000Z | 2022-03-03T06:41:35.000Z | conans/test/unittests/client/build/cpp_std_flags_test.py | noverby/conan | 5e560ce806be28416e80544e767b1bca3f48d11e | [
"MIT"
] | 6 | 2016-03-08T22:06:45.000Z | 2020-06-02T15:22:19.000Z | conans/test/unittests/client/build/cpp_std_flags_test.py | noverby/conan | 5e560ce806be28416e80544e767b1bca3f48d11e | [
"MIT"
] | 2 | 2018-09-05T11:58:44.000Z | 2018-09-05T12:14:11.000Z | import unittest
from conans.client.build.cppstd_flags import cppstd_default, cppstd_flag
class CompilerFlagsTest(unittest.TestCase):
def test_gcc_cppstd_flags(self):
self.assertEqual(cppstd_flag("gcc", "4.2", "98"), "-std=c++98")
self.assertEqual(cppstd_flag("gcc", "4.2", "gnu98"), "-std=gnu++98")
self.assertEqual(cppstd_flag("gcc", "4.2", "11"), None)
self.assertEqual(cppstd_flag("gcc", "4.2", "14"), None)
self.assertEqual(cppstd_flag("gcc", "4.3", "98"), "-std=c++98")
self.assertEqual(cppstd_flag("gcc", "4.3", "gnu98"), "-std=gnu++98")
self.assertEqual(cppstd_flag("gcc", "4.3", "11"), "-std=c++0x")
self.assertEqual(cppstd_flag("gcc", "4.3", "14"), None)
self.assertEqual(cppstd_flag("gcc", "4.6", "11"), '-std=c++0x')
self.assertEqual(cppstd_flag("gcc", "4.6", "14"), None)
self.assertEqual(cppstd_flag("gcc", "4.7", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("gcc", "4.7", "14"), None)
self.assertEqual(cppstd_flag("gcc", "4.8", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("gcc", "4.8", "14"), '-std=c++1y')
self.assertEqual(cppstd_flag("gcc", "4.8", "17"), None)
self.assertEqual(cppstd_flag("gcc", "4.9", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("gcc", "4.9", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("gcc", "4.9", "17"), None)
self.assertEqual(cppstd_flag("gcc", "5", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("gcc", "5", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("gcc", "5", "gnu14"), '-std=gnu++14')
self.assertEqual(cppstd_flag("gcc", "5", "17"), None)
self.assertEqual(cppstd_flag("gcc", "5.1", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("gcc", "5.1", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("gcc", "5.1", "17"), '-std=c++1z')
self.assertEqual(cppstd_flag("gcc", "7", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("gcc", "7", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("gcc", "7", "17"), '-std=c++17')
self.assertEqual(cppstd_flag("gcc", "8", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("gcc", "8", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("gcc", "8", "17"), '-std=c++17')
self.assertEqual(cppstd_flag("gcc", "8", "20"), '-std=c++2a')
def test_gcc_cppstd_defaults(self):
self.assertEqual(cppstd_default("gcc", "4"), "gnu98")
self.assertEqual(cppstd_default("gcc", "5"), "gnu98")
self.assertEqual(cppstd_default("gcc", "6"), "gnu14")
self.assertEqual(cppstd_default("gcc", "6.1"), "gnu14")
self.assertEqual(cppstd_default("gcc", "7.3"), "gnu14")
self.assertEqual(cppstd_default("gcc", "8.1"), "gnu14")
def test_clang_cppstd_flags(self):
self.assertEqual(cppstd_flag("clang", "2.0", "98"), None)
self.assertEqual(cppstd_flag("clang", "2.0", "gnu98"), None)
self.assertEqual(cppstd_flag("clang", "2.0", "11"), None)
self.assertEqual(cppstd_flag("clang", "2.0", "14"), None)
self.assertEqual(cppstd_flag("clang", "2.1", "98"), "-std=c++98")
self.assertEqual(cppstd_flag("clang", "2.1", "gnu98"), "-std=gnu++98")
self.assertEqual(cppstd_flag("clang", "2.1", "11"), "-std=c++0x")
self.assertEqual(cppstd_flag("clang", "2.1", "14"), None)
self.assertEqual(cppstd_flag("clang", "3.0", "11"), '-std=c++0x')
self.assertEqual(cppstd_flag("clang", "3.0", "14"), None)
self.assertEqual(cppstd_flag("clang", "3.1", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("clang", "3.1", "14"), None)
self.assertEqual(cppstd_flag("clang", "3.4", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("clang", "3.4", "14"), '-std=c++1y')
self.assertEqual(cppstd_flag("clang", "3.4", "17"), None)
self.assertEqual(cppstd_flag("clang", "3.5", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("clang", "3.5", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("clang", "3.5", "17"), '-std=c++1z')
self.assertEqual(cppstd_flag("clang", "5", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("clang", "5", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("clang", "5", "gnu14"), '-std=gnu++14')
self.assertEqual(cppstd_flag("clang", "5", "17"), '-std=c++17')
self.assertEqual(cppstd_flag("clang", "5.1", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("clang", "5.1", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("clang", "5.1", "17"), '-std=c++17')
self.assertEqual(cppstd_flag("clang", "6", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("clang", "6", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("clang", "6", "17"), '-std=c++17')
self.assertEqual(cppstd_flag("clang", "6", "20"), '-std=c++2a')
self.assertEqual(cppstd_flag("clang", "7", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("clang", "7", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("clang", "7", "17"), '-std=c++17')
self.assertEqual(cppstd_flag("clang", "7", "20"), '-std=c++2a')
self.assertEqual(cppstd_flag("clang", "8", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("clang", "8", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("clang", "8", "17"), '-std=c++17')
self.assertEqual(cppstd_flag("clang", "8", "20"), '-std=c++2a')
def test_clang_cppstd_defaults(self):
self.assertEqual(cppstd_default("clang", "2"), "gnu98")
self.assertEqual(cppstd_default("clang", "2.1"), "gnu98")
self.assertEqual(cppstd_default("clang", "3.0"), "gnu98")
self.assertEqual(cppstd_default("clang", "3.1"), "gnu98")
self.assertEqual(cppstd_default("clang", "3.4"), "gnu98")
self.assertEqual(cppstd_default("clang", "3.5"), "gnu98")
self.assertEqual(cppstd_default("clang", "5"), "gnu98")
self.assertEqual(cppstd_default("clang", "5.1"), "gnu98")
self.assertEqual(cppstd_default("clang", "6"), "gnu14")
self.assertEqual(cppstd_default("clang", "7"), "gnu14")
def test_apple_clang_cppstd_flags(self):
self.assertEqual(cppstd_flag("apple-clang", "3.9", "98"), None)
self.assertEqual(cppstd_flag("apple-clang", "3.9", "gnu98"), None)
self.assertEqual(cppstd_flag("apple-clang", "3.9", "11"), None)
self.assertEqual(cppstd_flag("apple-clang", "3.9", "14"), None)
self.assertEqual(cppstd_flag("apple-clang", "4.0", "98"), "-std=c++98")
self.assertEqual(cppstd_flag("apple-clang", "4.0", "gnu98"), "-std=gnu++98")
self.assertEqual(cppstd_flag("apple-clang", "4.0", "11"), "-std=c++11")
self.assertEqual(cppstd_flag("apple-clang", "4.0", "14"), None)
self.assertEqual(cppstd_flag("apple-clang", "5.0", "98"), "-std=c++98")
self.assertEqual(cppstd_flag("apple-clang", "5.0", "gnu98"), "-std=gnu++98")
self.assertEqual(cppstd_flag("apple-clang", "5.0", "11"), "-std=c++11")
self.assertEqual(cppstd_flag("apple-clang", "5.0", "14"), None)
self.assertEqual(cppstd_flag("apple-clang", "5.1", "98"), "-std=c++98")
self.assertEqual(cppstd_flag("apple-clang", "5.1", "gnu98"), "-std=gnu++98")
self.assertEqual(cppstd_flag("apple-clang", "5.1", "11"), "-std=c++11")
self.assertEqual(cppstd_flag("apple-clang", "5.1", "14"), "-std=c++1y")
self.assertEqual(cppstd_flag("apple-clang", "6.1", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("apple-clang", "6.1", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("apple-clang", "6.1", "17"), "-std=c++1z")
self.assertEqual(cppstd_flag("apple-clang", "7", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("apple-clang", "7", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("apple-clang", "7", "17"), "-std=c++1z")
self.assertEqual(cppstd_flag("apple-clang", "8", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("apple-clang", "8", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("apple-clang", "8", "17"), "-std=c++1z")
self.assertEqual(cppstd_flag("apple-clang", "9", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("apple-clang", "9", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("apple-clang", "9", "17"), "-std=c++1z")
self.assertEqual(cppstd_flag("apple-clang", "9.1", "11"), '-std=c++11')
self.assertEqual(cppstd_flag("apple-clang", "9.1", "14"), '-std=c++14')
self.assertEqual(cppstd_flag("apple-clang", "9.1", "17"), "-std=c++17")
self.assertEqual(cppstd_flag("apple-clang", "10.0", "17"), "-std=c++17")
self.assertEqual(cppstd_flag("apple-clang", "11.0", "17"), "-std=c++17")
def test_apple_clang_cppstd_defaults(self):
self.assertEqual(cppstd_default("apple-clang", "2"), "gnu98")
self.assertEqual(cppstd_default("apple-clang", "3"), "gnu98")
self.assertEqual(cppstd_default("apple-clang", "4"), "gnu98")
self.assertEqual(cppstd_default("apple-clang", "5"), "gnu98")
self.assertEqual(cppstd_default("apple-clang", "6"), "gnu98")
self.assertEqual(cppstd_default("apple-clang", "7"), "gnu98")
self.assertEqual(cppstd_default("apple-clang", "8"), "gnu98")
self.assertEqual(cppstd_default("apple-clang", "9"), "gnu98")
self.assertEqual(cppstd_default("apple-clang", "10"), "gnu98")
self.assertEqual(cppstd_default("apple-clang", "11"), "gnu98")
def test_visual_cppstd_flags(self):
self.assertEqual(cppstd_flag("Visual Studio", "12", "11"), None)
self.assertEqual(cppstd_flag("Visual Studio", "12", "14"), None)
self.assertEqual(cppstd_flag("Visual Studio", "12", "17"), None)
self.assertEqual(cppstd_flag("Visual Studio", "14", "11"), None)
self.assertEqual(cppstd_flag("Visual Studio", "14", "14"), '/std:c++14')
self.assertEqual(cppstd_flag("Visual Studio", "14", "17"), '/std:c++latest')
self.assertEqual(cppstd_flag("Visual Studio", "17", "11"), None)
self.assertEqual(cppstd_flag("Visual Studio", "17", "14"), '/std:c++14')
self.assertEqual(cppstd_flag("Visual Studio", "17", "17"), '/std:c++17')
self.assertEqual(cppstd_flag("Visual Studio", "17", "20"), '/std:c++latest')
def test_visual_cppstd_defaults(self):
self.assertEqual(cppstd_default("Visual Studio", "11"), None)
self.assertEqual(cppstd_default("Visual Studio", "12"), None)
self.assertEqual(cppstd_default("Visual Studio", "13"), None)
self.assertEqual(cppstd_default("Visual Studio", "14"), "14")
self.assertEqual(cppstd_default("Visual Studio", "15"), "14")
| 55.357143 | 84 | 0.59235 |
b8fa4a5b8bd209aefecd624526d83a9f6876c270 | 2,176 | py | Python | visualize.py | AniketBajpai/DeepVideo | ef83f632eb920bf5606831137cc76a95b101a958 | [
"MIT"
] | 9 | 2017-09-07T09:55:29.000Z | 2020-01-14T11:25:49.000Z | visualize.py | AniketBajpai/DeepVideo | ef83f632eb920bf5606831137cc76a95b101a958 | [
"MIT"
] | 1 | 2017-10-18T10:24:03.000Z | 2017-10-18T10:24:03.000Z | visualize.py | AniketBajpai/DeepVideo | ef83f632eb920bf5606831137cc76a95b101a958 | [
"MIT"
] | null | null | null | from __future__ import print_function
import h5py
import numpy as np
import imageio
import glob
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--train_dir', type=str, default=None)
parser.add_argument('--output_prefix', type=str, default='output')
parser.add_argument('--n', type=int, default=5)
parser.add_argument('--num_frames', type=int, default=10)
parser.add_argument('--h', type=int, default=64)
parser.add_argument('--w', type=int, default=64)
parser.add_argument('--c', type=int, default=3)
args = parser.parse_args()
if not args.train_dir:
raise ValueError("Please specify train_dir")
if not os.path.exists('./outputs'):
os.mkdir('./outputs')
def visualize(name):
for counter, file in enumerate(sorted(glob.glob(os.path.join(args.train_dir, '{}_*.hy'.format(name))), key=os.path.getmtime)[-args.n:]):
print (file)
f = h5py.File(file, 'r')
# I = np.zeros((args.h, args.num_frames * args.w, args.c))
generated_frames = f[f.keys()[0]]
_, _, h, w, c = generated_frames.shape
h_low = (h - args.h) / 2
h_high = (h + args.h) / 2
w_low = (w - args.w) / 2
w_high = (w + args.w) / 2
# Take only first set of frames from batch
II = []
if args.c == 1:
for j in range(args.num_frames):
I = np.reshape(generated_frames[0, j, h_low:h_high, w_low:w_high, 0], (args.h, args.w))
if (I < 1.0).all() and (I > -1.0).all():
print ('Image in [-1, 1]')
I = ((I + 1.0) / 2 * 255).astype(np.int32)
II.append(I)
else:
for j in range(args.num_frames):
I = np.reshape(generated_frames[0, j, h_low:h_high, w_low:w_high, 0:args.c], (args.h, args.w, args.c))
II.append(I)
# II = np.stack(II)
output_img_path = './outputs/{}_{}_{}.png'.format(args.output_prefix, name, str(counter))
print ('Writing image:', output_img_path)
print (len(II), II[0].shape)
imageio.mimwrite(output_img_path, II)
visualize('generated_current')
visualize('generated_future')
| 34.539683 | 140 | 0.598346 |
849307c3cb178e32a6b25b088fa08c4e81fec08f | 12,026 | py | Python | ravens/models/transport_goal.py | EricCousineau-TRI/ravens | d7f9db3214ed730c6d16e5c248684688555c6d23 | [
"Apache-2.0"
] | null | null | null | ravens/models/transport_goal.py | EricCousineau-TRI/ravens | d7f9db3214ed730c6d16e5c248684688555c6d23 | [
"Apache-2.0"
] | null | null | null | ravens/models/transport_goal.py | EricCousineau-TRI/ravens | d7f9db3214ed730c6d16e5c248684688555c6d23 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2020 The Ravens Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Goal-conditioned transport Module."""
import cv2
import matplotlib.pyplot as plt
import numpy as np
from ravens.models.resnet import ResNet43_8s
from ravens.utils import utils
import tensorflow as tf
from tensorflow_addons import image as tfa_image
class TransportGoal:
"""Goal-conditioned transport Module."""
def __init__(self, input_shape, num_rotations, crop_size, preprocess): # pylint: disable=g-doc-args
"""Inits transport module with separate goal FCN.
Assumes the presence of a goal image, that cropping is done after the
query, that per-pixel loss is not used, and SE(2) grasping.
"""
self.num_rotations = num_rotations
self.crop_size = crop_size # crop size must be N*16 (e.g. 96)
self.preprocess = preprocess
self.lr = 1e-5
self.pad_size = int(self.crop_size / 2)
self.padding = np.zeros((3, 2), dtype=int)
self.padding[:2, :] = self.pad_size
input_shape = np.array(input_shape)
input_shape[0:2] += self.pad_size * 2
input_shape = tuple(input_shape)
# Output dimension (i.e., number of channels) of 3.
self.odim = output_dim = 3
# 3 fully convolutional ResNets. Third one is for the goal.
in0, out0 = ResNet43_8s(input_shape, output_dim, prefix='s0_')
in1, out1 = ResNet43_8s(input_shape, output_dim, prefix='s1_')
in2, out2 = ResNet43_8s(input_shape, output_dim, prefix='s2_')
self.model = tf.keras.Model(
inputs=[in0, in1, in2], outputs=[out0, out1, out2])
self.optim = tf.keras.optimizers.Adam(learning_rate=self.lr)
self.metric = tf.keras.metrics.Mean(name='transport_loss')
def forward(self, in_img, goal_img, p, apply_softmax=True): # pylint: disable=g-doc-args
"""Forward pass of goal-conditioned Transporters.
Runs input through all three networks, to get output of the same
shape, except the last channel is 3 (output_dim). Then, the output
for one stream has the convolutional kernels for another. Call
tf.nn.convolution, and the operation is be differentiable, so that
gradients apply to all the FCNs.
Cropping after passing the input image to the query network is
easier, because otherwise we need to do a forward pass, then call
tf.multiply, then do a second forward pass after that.
Returns:
ouput tensor
"""
assert in_img.shape == goal_img.shape, f'{in_img.shape}, {goal_img.shape}'
# input image --> TF tensor, shape (384,224,6) --> (1,384,224,6)
input_unproc = np.pad(in_img, self.padding, mode='constant')
input_data = self.preprocess(input_unproc.copy())
input_shape = (1,) + input_data.shape
input_data = input_data.reshape(input_shape)
in_tensor = tf.convert_to_tensor(input_data, dtype=tf.float32)
# goal image --> TF tensor, shape (384,224,6) --> (1,384,224,6)
goal_unproc = np.pad(goal_img, self.padding, mode='constant')
goal_data = self.preprocess(goal_unproc.copy())
goal_shape = (1,) + goal_data.shape
goal_data = goal_data.reshape(goal_shape)
goal_tensor = tf.convert_to_tensor(goal_data, dtype=tf.float32)
# Get SE2 rotation vectors for cropping.
pivot = np.array([p[1], p[0]]) + self.pad_size
rvecs = self.get_se2(self.num_rotations, pivot)
# Forward pass through three separate FCNs. All logits: (1,384,224,3).
in_logits, kernel_nocrop_logits, goal_logits = self.model(
[in_tensor, in_tensor, goal_tensor])
# Use features from goal logits and combine with input and kernel.
goal_x_in_logits = tf.multiply(goal_logits, in_logits)
goal_x_kernel_logits = tf.multiply(goal_logits, kernel_nocrop_logits)
# Crop the kernel_logits about the picking point and get rotations.
crop = tf.identity(goal_x_kernel_logits) # (1,384,224,3)
crop = tf.repeat(crop, repeats=self.num_rotations, axis=0) # (24,384,224,3)
crop = tfa_image.transform(crop, rvecs, interpolation='NEAREST')
kernel = crop[:, p[0]:(p[0] + self.crop_size),
p[1]:(p[1] + self.crop_size), :]
assert kernel.shape == (self.num_rotations, self.crop_size, self.crop_size,
self.odim)
# Cross-convolve `in_x_goal_logits`. Padding kernel: (24,64,64,3) -->
# (65,65,3,24).
kernel_paddings = tf.constant([[0, 0], [0, 1], [0, 1], [0, 0]])
kernel = tf.pad(kernel, kernel_paddings, mode='CONSTANT')
kernel = tf.transpose(kernel, [1, 2, 3, 0])
output = tf.nn.convolution(goal_x_in_logits, kernel, data_format='NHWC')
output = (1 / (self.crop_size**2)) * output
if apply_softmax:
output_shape = output.shape
output = tf.reshape(output, (1, np.prod(output.shape)))
output = tf.nn.softmax(output)
output = np.float32(output).reshape(output_shape[1:])
# Daniel: visualize crops and kernels, for Transporter-Goal figure.
# self.visualize_images(p, in_img, input_data, crop)
# self.visualize_transport(p, in_img, input_data, crop, kernel)
# self.visualize_logits(in_logits, name='input')
# self.visualize_logits(goal_logits, name='goal')
# self.visualize_logits(kernel_nocrop_logits, name='kernel')
# self.visualize_logits(goal_x_in_logits, name='goal_x_in')
# self.visualize_logits(goal_x_kernel_logits, name='goal_x_kernel')
return output
def train(self, in_img, goal_img, p, q, theta):
"""Transport Goal training.
Both `in_img` and `goal_img` have the color and depth. Much is
similar to the attention model: (a) forward pass, (b) get angle
discretizations, (c) make the label consider rotations in the last
axis, but only provide the label to one single (pixel,rotation).
Args:
in_img:
goal_img:
p:
q:
theta:
Returns:
Transport loss as a numpy float32.
"""
self.metric.reset_states()
with tf.GradientTape() as tape:
output = self.forward(in_img, goal_img, p, apply_softmax=False)
# Compute label
itheta = theta / (2 * np.pi / self.num_rotations)
itheta = np.int32(np.round(itheta)) % self.num_rotations
label_size = in_img.shape[:2] + (self.num_rotations,)
label = np.zeros(label_size)
label[q[0], q[1], itheta] = 1
label = label.reshape(1, np.prod(label.shape))
label = tf.convert_to_tensor(label, dtype=tf.float32)
# Compute loss after re-shaping the output.
output = tf.reshape(output, (1, np.prod(output.shape)))
loss = tf.nn.softmax_cross_entropy_with_logits(label, output)
loss = tf.reduce_mean(loss)
grad = tape.gradient(loss, self.model.trainable_variables)
self.optim.apply_gradients(zip(grad, self.model.trainable_variables))
self.metric(loss)
return np.float32(loss)
def get_se2(self, num_rotations, pivot):
"""Get SE2 rotations discretized into num_rotations angles counter-clockwise."""
rvecs = []
for i in range(num_rotations):
theta = i * 2 * np.pi / num_rotations
rmat = utils.get_image_transform(theta, (0, 0), pivot)
rvec = rmat.reshape(-1)[:-1]
rvecs.append(rvec)
return np.array(rvecs, dtype=np.float32)
def save(self, fname):
self.model.save(fname)
def load(self, fname):
self.model.load_weights(fname)
#-------------------------------------------------------------------------
# Visualization.
#-------------------------------------------------------------------------
def visualize_images(self, p, in_img, input_data, crop):
"""Visualize images."""
def get_itheta(theta):
itheta = theta / (2 * np.pi / self.num_rotations)
return np.int32(np.round(itheta)) % self.num_rotations
plt.subplot(1, 3, 1)
plt.title('Perturbed', fontsize=15)
plt.imshow(np.array(in_img[:, :, :3]).astype(np.uint8))
plt.subplot(1, 3, 2)
plt.title('Process/Pad', fontsize=15)
plt.imshow(input_data[0, :, :, :3])
plt.subplot(1, 3, 3)
# Let's stack two crops together.
theta1 = 0.0
theta2 = 90.0
itheta1 = get_itheta(theta1)
itheta2 = get_itheta(theta2)
crop1 = crop[itheta1, :, :, :3]
crop2 = crop[itheta2, :, :, :3]
barrier = np.ones_like(crop1)
barrier = barrier[:4, :, :] # white barrier of 4 pixels
stacked = np.concatenate((crop1, barrier, crop2), axis=0)
plt.imshow(stacked)
plt.title(f'{theta1}, {theta2}', fontsize=15)
plt.suptitle(f'pick: {p}', fontsize=15)
plt.tight_layout()
plt.show()
# plt.savefig('viz.png')
def visualize_transport(self, p, in_img, input_data, crop, kernel): # pylint: disable=g-doc-args
"""Like the attention map visualize the transport data from a trained model.
https://docs.opencv.org/master/d3/d50/group__imgproc__colormap.html
In my normal usage, the attention is already softmax-ed but just be
aware in case it's not. Also be aware of RGB vs BGR mode. We should
ensure we're in BGR mode before saving. Also with RAINBOW mode,
red=hottest (highest attention values), green=medium, blue=lowest.
See also:
https://matplotlib.org/3.3.0/api/_as_gen/matplotlib.pyplot.subplot.html
crop.shape: (24,64,64,6)
kernel.shape = (65,65,3,24)
"""
del p
del in_img
del input_data
def colorize(img):
# I don't think we have to convert to BGR here...
img = img - np.min(img)
img = 255 * img / np.max(img)
img = cv2.applyColorMap(np.uint8(img), cv2.COLORMAP_RAINBOW)
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return img
kernel = (tf.transpose(kernel, [3, 0, 1, 2])).numpy()
# Top two rows: crops from processed RGBD. Bottom two: output from FCN.
nrows = 4
ncols = 12
assert self.num_rotations == nrows * (ncols / 2)
idx = 0
_, _ = plt.subplots(nrows, ncols, figsize=(12, 6))
for _ in range(nrows):
for _ in range(ncols):
plt.subplot(nrows, ncols, idx + 1)
plt.axis('off') # Ah, you need to put this here ...
if idx < self.num_rotations:
plt.imshow(crop[idx, :, :, :3])
else:
# Offset because idx goes from 0 to (rotations * 2) - 1.
idx_ = idx - self.num_rotations
processed = colorize(img=kernel[idx_, :, :, :])
plt.imshow(processed)
idx += 1
plt.tight_layout()
plt.show()
def visualize_logits(self, logits, name): # pylint: disable=g-doc-args
"""Given logits (BEFORE tf.nn.convolution), get a heatmap.
Here we apply a softmax to make it more human-readable. However, the
tf.nn.convolution with the learned kernels happens without a softmax
on the logits. [Update: wait, then why should we have a softmax,
then? I forgot why we did this ...]
"""
original_shape = logits.shape
logits = tf.reshape(logits, (1, np.prod(original_shape)))
# logits = tf.nn.softmax(logits) # Is this necessary?
vis_transport = np.float32(logits).reshape(original_shape)
vis_transport = vis_transport[0]
vis_transport = vis_transport - np.min(vis_transport)
vis_transport = 255 * vis_transport / np.max(vis_transport)
vis_transport = cv2.applyColorMap(
np.uint8(vis_transport), cv2.COLORMAP_RAINBOW)
# Only if we're saving with cv2.imwrite()
vis_transport = cv2.cvtColor(vis_transport, cv2.COLOR_RGB2BGR)
cv2.imwrite(f'tmp/logits_{name}.png', vis_transport)
plt.subplot(1, 1, 1)
plt.title(f'Logits: {name}', fontsize=15)
plt.imshow(vis_transport)
plt.tight_layout()
plt.show()
| 38.793548 | 102 | 0.664228 |
3301ec94e7c24923e40545098b164d835865b5c7 | 1,731 | py | Python | Combine/combine_miplist.py | risqueslab/PolyG-MIP | cfa2a6ab302bc6ba027b8661f9016ab8882dc46c | [
"BSL-1.0"
] | null | null | null | Combine/combine_miplist.py | risqueslab/PolyG-MIP | cfa2a6ab302bc6ba027b8661f9016ab8882dc46c | [
"BSL-1.0"
] | null | null | null | Combine/combine_miplist.py | risqueslab/PolyG-MIP | cfa2a6ab302bc6ba027b8661f9016ab8882dc46c | [
"BSL-1.0"
] | null | null | null | import sys
import os.path
miplist_counter = 0
combined_mip_data = {} #mip key, read/unused tag/used tag tuple data
output_file = open("miplist.combined.txt", 'w')
#output header line with filenames
output_file.write ("MIP\t",)
for filename in sys.argv:
if miplist_counter > 0:
#output_file.write (os.path.basename(filename).split("_")[0] + " Reads\t",)
output_file.write (os.path.basename(filename).split(".")[2] + " Reads\t",)
output_file.write (os.path.basename(filename).split(".")[2] + " Unused Tags\t",)
output_file.write (os.path.basename(filename).split(".")[2] + " Used Tags\t",)
#output_file.write (os.path.basename(filename).split("_")[0] + " Tags < 3\t",)
#output_file.write (os.path.basename(filename).split("_")[0] + " Tags > 2\t",)
miplist_counter += 1
output_file.write("\n")
miplist_counter = 0
for filename in sys.argv:
if miplist_counter > 0:
miplist_file = open(filename)
next(miplist_file)
for line in miplist_file:
mip, reads, unused_tags, used_tags = line.rstrip().split()
if mip in combined_mip_data:
combined_mip_data[mip][miplist_counter - 1] = (reads, unused_tags, used_tags)
else:
combined_mip_data[mip] = [(0, 0, 0)] * (len(sys.argv) -1)
combined_mip_data[mip][miplist_counter - 1] = (reads, unused_tags, used_tags)
miplist_file.close()
miplist_counter += 1
for mip in combined_mip_data:
output_file.write (str(mip) + "\t",)
for (reads, unused_tags, used_tags) in combined_mip_data[mip]:
output_file.write(str(reads) + "\t" + str(unused_tags) + "\t" + str(used_tags) + "\t",)
output_file.write("\n") | 43.275 | 95 | 0.633738 |
d054631a7d1d8adf5d8de9eab159b5a1b724b348 | 4,207 | py | Python | pirates/effects/SparksTrail.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 81 | 2018-04-08T18:14:24.000Z | 2022-01-11T07:22:15.000Z | pirates/effects/SparksTrail.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 4 | 2018-09-13T20:41:22.000Z | 2022-01-08T06:57:00.000Z | pirates/effects/SparksTrail.py | Willy5s/Pirates-Online-Rewritten | 7434cf98d9b7c837d57c181e5dabd02ddf98acb7 | [
"BSD-3-Clause"
] | 26 | 2018-05-26T12:49:27.000Z | 2021-09-11T09:11:59.000Z | from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from PooledEffect import PooledEffect
from EffectController import EffectController
class SparksTrail(PooledEffect, EffectController):
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/particleCards')
self.card = model.find('**/particleStars')
self.cardScale = 64.0
self.effectColor = Vec4(1, 1, 1, 1)
self.effectScale = 1.0
self.lifespan = 1.0
if not SparksTrail.particleDummy:
SparksTrail.particleDummy = render.attachNewNode(ModelNode('SparksTrailParticleDummy'))
SparksTrail.particleDummy.setDepthWrite(0)
SparksTrail.particleDummy.setLightOff()
SparksTrail.particleDummy.setFogOff()
self.f = ParticleEffect.ParticleEffect('SparksTrail')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('ZSpinParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('PointEmitter')
self.f.addParticles(self.p0)
self.p0.setPoolSize(64)
self.p0.setBirthRate(0.02)
self.p0.setLitterSize(1)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(0)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(0.5)
self.p0.factory.setLifespanSpread(0.1)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.factory.setInitialAngle(0.0)
self.p0.factory.setInitialAngleSpread(90.0)
self.p0.factory.enableAngularVelocity(1)
self.p0.factory.setAngularVelocity(0.0)
self.p0.factory.setAngularVelocitySpread(25.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p0.renderer.setUserAlpha(1.0)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(1)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingAlpha, ColorBlendAttrib.OOne)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, -2.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.setEffectScale(self.effectScale)
def createTrack(self):
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.01), Func(self.p0.clearToInitial), Func(self.f.start, self, self.particleDummy))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 100.0), Wait(1.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(1.0), self.endEffect)
def setEffectColor(self, color):
self.effectColor = color
self.p0.renderer.setColor(self.effectColor)
def setEffectScale(self, scale):
self.effectScale = scale
self.p0.renderer.setInitialXScale(0.1 * self.cardScale * scale)
self.p0.renderer.setFinalXScale(0.2 * self.cardScale * scale)
self.p0.renderer.setInitialYScale(0.1 * self.cardScale * scale)
self.p0.renderer.setFinalYScale(0.2 * self.cardScale * scale)
self.p0.emitter.setAmplitude(20.0 * scale)
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self) | 46.230769 | 145 | 0.69836 |
e2d58c1fa87814e0bfe25b129916e1585f20e58c | 741 | py | Python | process_images.py | HayetBD/Text-to-image | 7ead7e03bb8ee42f457281bc250cd88161fb5dcd | [
"MIT"
] | null | null | null | process_images.py | HayetBD/Text-to-image | 7ead7e03bb8ee42f457281bc250cd88161fb5dcd | [
"MIT"
] | null | null | null | process_images.py | HayetBD/Text-to-image | 7ead7e03bb8ee42f457281bc250cd88161fb5dcd | [
"MIT"
] | null | null | null | import utils
import pathlib
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
matplotlib.rcParams['interactive'] == True
img_set = pathlib.Path("Data/images_sample/")
# Convert all paths into a string
all_image_paths = [str(img_path) for img_path in list(img_set.glob("*.jpg"))]
#print(all_image_paths)
tensors=[]
for i in all_image_paths:
tensors.append(utils.load_and_preprocess_images(i))
#fig = plt.figure(1, figsize=(10, 10))
#plt.imshow(tensors[3])
#plt.savefig('figure_3_paysage.png')
# create tensor vectors with skipthought vectors as input
print('Save images vector : loading ....')
np.save('Data/vectors_files/imgvectors_sample.npy', tensors)
print('Save images vector : DONE !') | 32.217391 | 78 | 0.74359 |
dfe753ab59bca8199a5010d30665b240f131af95 | 7,969 | py | Python | ir/main.py | lswank/incremental-reading | 423abc02ce037db7458795f9a8db9df561c74763 | [
"0BSD"
] | 4 | 2018-09-18T09:05:30.000Z | 2018-10-19T02:58:17.000Z | ir/main.py | lswank/incremental-reading | 423abc02ce037db7458795f9a8db9df561c74763 | [
"0BSD"
] | 3 | 2018-09-27T06:31:35.000Z | 2018-10-22T18:55:08.000Z | ir/main.py | lswank/incremental-reading | 423abc02ce037db7458795f9a8db9df561c74763 | [
"0BSD"
] | null | null | null | # Copyright 2013 Tiago Barroso
# Copyright 2013 Frank Kmiec
# Copyright 2013-2016 Aleksej
# Copyright 2018 Timothée Chauvin
# Copyright 2017-2018 Luo Li-Yan <joseph.lorimer13@gmail.com>
#
# Permission to use, copy, modify, and distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright
# notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
from anki.hooks import addHook, wrap
from aqt import mw
from aqt.browser import Browser
from aqt.reviewer import Reviewer
import sip
from .about import showAbout
from .gui import SettingsDialog
from .importer import Importer
from .schedule import Scheduler
from .settings import SettingsManager
from .text import TextManager
from .util import addMenuItem, isIrCard, loadFile
from .view import ViewManager
class ReadingManager:
def __init__(self):
self.importer = Importer()
self.scheduler = Scheduler()
self.textManager = TextManager()
self.viewManager = ViewManager()
addHook('profileLoaded', self.onProfileLoaded)
addHook('overviewStateShortcuts', self.setShortcuts)
addHook('reviewStateShortcuts', self.setShortcuts)
addHook('prepareQA', self.onPrepareQA)
addHook('showAnswer', self.onShowAnswer)
addHook('reviewCleanup', self.onReviewCleanup)
self.qshortcuts = []
def onProfileLoaded(self):
self.settings = SettingsManager()
mw.addonManager.setConfigAction(
__name__, lambda: SettingsDialog(self.settings))
self.importer.settings = self.settings
self.scheduler.settings = self.settings
self.textManager.settings = self.settings
self.viewManager.settings = self.settings
self.viewManager.resetZoom('deckBrowser')
self.addModel()
self.loadMenuItems()
self.shortcuts = [
('Down', self.viewManager.lineDown),
('PgDown', self.viewManager.pageDown),
('PgUp', self.viewManager.pageUp),
('Up', self.viewManager.lineUp),
(self.settings['extractKey'], self.textManager.extract),
(self.settings['highlightKey'], self.textManager.highlight),
(self.settings['removeKey'], self.textManager.remove),
(self.settings['undoKey'], self.textManager.undo),
(self.settings['overlaySeq'], self.textManager.toggleOverlay),
(self.settings['boldSeq'],
lambda: self.textManager.format('bold')),
(self.settings['italicSeq'],
lambda: self.textManager.format('italic')),
(self.settings['strikeSeq'],
lambda: self.textManager.format('strike')),
(self.settings['underlineSeq'],
lambda: self.textManager.format('underline')),
]
def loadMenuItems(self):
if hasattr(mw, 'customMenus') and 'Read' in mw.customMenus:
mw.customMenus['Read'].clear()
addMenuItem('Read',
'Options...',
lambda: SettingsDialog(self.settings),
'Alt+1')
addMenuItem('Read', 'Organizer...', self.scheduler.showDialog, 'Alt+2')
addMenuItem('Read',
'Import Webpage',
self.importer.importWebpage,
'Alt+3')
addMenuItem('Read', 'Import Feed', self.importer.importFeed, 'Alt+4')
addMenuItem('Read',
'Import Pocket',
self.importer.importPocket,
'Alt+5')
addMenuItem('Read', 'Zoom In', self.viewManager.zoomIn, 'Ctrl++')
addMenuItem('Read', 'Zoom Out', self.viewManager.zoomOut, 'Ctrl+-')
addMenuItem('Read', 'About...', showAbout)
self.settings.loadMenuItems()
def onPrepareQA(self, html, card, context):
if self.settings['prioEnabled']:
answerShortcuts = ['1', '2', '3', '4']
else:
answerShortcuts = ['4']
activeAnswerShortcuts = [next(
(s for s in mw.stateShortcuts if s.key().toString() == i), None)
for i in answerShortcuts]
if isIrCard(card):
if context == 'reviewQuestion':
self.qshortcuts = mw.applyShortcuts(self.shortcuts)
mw.stateShortcuts += self.qshortcuts
for shortcut in activeAnswerShortcuts:
if shortcut:
mw.stateShortcuts.remove(shortcut)
sip.delete(shortcut)
else:
for shortcut in answerShortcuts:
if not activeAnswerShortcuts[answerShortcuts.index(shortcut)]:
mw.stateShortcuts += mw.applyShortcuts(
[(shortcut,
lambda: mw.reviewer._answerCard(int(shortcut)))])
return html
def onShowAnswer(self):
for qs in self.qshortcuts:
mw.stateShortcuts.remove(qs)
sip.delete(qs)
def onReviewCleanup(self):
self.qshortcuts = []
def setShortcuts(self, shortcuts):
shortcuts.append(('Ctrl+=', self.viewManager.zoomIn))
def addModel(self):
if mw.col.models.byName(self.settings['modelName']):
return
model = mw.col.models.new(self.settings['modelName'])
model['css'] = loadFile('web', 'model.css')
titleField = mw.col.models.newField(self.settings['titleField'])
textField = mw.col.models.newField(self.settings['textField'])
sourceField = mw.col.models.newField(self.settings['sourceField'])
sourceField['sticky'] = True
mw.col.models.addField(model, titleField)
if self.settings['prioEnabled']:
prioField = mw.col.models.newField(self.settings['priorityField'])
mw.col.models.addField(model, prioField)
mw.col.models.addField(model, textField)
mw.col.models.addField(model, sourceField)
template = mw.col.models.newTemplate('IR Card')
template['qfmt'] = '<div class="ir-text">{{%s}}</div>' % (
self.settings['textField'])
if self.settings['prioEnabled']:
template['afmt'] = 'Hit space to move to the next article'
else:
template['afmt'] = 'When do you want to see this card again?'
mw.col.models.addTemplate(model, template)
mw.col.models.add(model)
def answerButtonList(self, _old):
if isIrCard(self.card):
if mw.readingManager.settings['prioEnabled']:
return ((1, _('Next')),)
else:
return ((1, _('Soon')), (2, _('Later')), (3, _('Custom')))
else:
return _old(self)
def answerCard(self, ease, _old):
card = self.card
_old(self, ease)
if isIrCard(card):
mw.readingManager.scheduler.answer(card, ease)
def buttonTime(self, i, _old):
if isIrCard(mw.reviewer.card):
return '<div class=spacer></div>'
else:
return _old(self, i)
def onBrowserClosed(self):
try:
mw.readingManager.scheduler._updateListItems()
except:
return
Reviewer._answerButtonList = wrap(Reviewer._answerButtonList,
answerButtonList,
'around')
Reviewer._answerCard = wrap(Reviewer._answerCard, answerCard, 'around')
Reviewer._buttonTime = wrap(Reviewer._buttonTime, buttonTime, 'around')
Browser._closeWindow = wrap(Browser._closeWindow, onBrowserClosed)
| 37.238318 | 79 | 0.624043 |
80495ff87531b1fdb31d4d5bc5b912c188e36e31 | 176 | py | Python | colors.py | mariaakarim/Connect-Four | 424e1a7c911a28c8cd0f139020818347e2377376 | [
"MIT"
] | null | null | null | colors.py | mariaakarim/Connect-Four | 424e1a7c911a28c8cd0f139020818347e2377376 | [
"MIT"
] | null | null | null | colors.py | mariaakarim/Connect-Four | 424e1a7c911a28c8cd0f139020818347e2377376 | [
"MIT"
] | null | null | null | # Colors that are used for the connect four board.
YELLOW = [255, 255, 0]
WHITE = [255, 255, 255]
RED = [255, 0, 0]
PURPLE = [255, 0, 255]
BLACK = [0, 0, 0]
BLUE = [0, 0, 255]
| 22 | 50 | 0.590909 |
a81772f9a7c2901a3ec3862f466597bdf922c16e | 1,152 | py | Python | app/src/components/html_components.py | Maria0496/batch7_beges | a612341978f961dd712919f7d1730f3ba36aa6eb | [
"MIT"
] | null | null | null | app/src/components/html_components.py | Maria0496/batch7_beges | a612341978f961dd712919f7d1730f3ba36aa6eb | [
"MIT"
] | null | null | null | app/src/components/html_components.py | Maria0496/batch7_beges | a612341978f961dd712919f7d1730f3ba36aa6eb | [
"MIT"
] | null | null | null | import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
def build_figure_container(title, id, footer):
fig_containter = dbc.Card(
[
dbc.CardHeader(html.H4(title)),
dbc.CardBody([dbc.Col(dcc.Graph(id=id))]), # , style={"padding": "0px"}),
dbc.CardFooter(footer),
],
className="m-2 pretty_container",
)
return fig_containter
def build_table_container(title, id, footer):
fig_containter = dbc.Card(
[
dbc.CardHeader(html.H4(title)),
dbc.CardBody(
[
dash_table.DataTable(
id=id, style_table={"overflowX": "auto"}, css=[{"selector": ".row", "rule": "margin: 0"}]
)
] # style={"padding": "0px"}
),
dbc.CardFooter(footer),
],
className="m-2 pretty_container",
)
return fig_containter
def build_card_indicateur(title, value):
return dbc.Card(dbc.CardBody([html.P(title), html.H3(value)]), className="m-2 pretty_container")
| 29.538462 | 113 | 0.572917 |
d2e906e7bfd432ceba97c72e6561443c2839740d | 87 | py | Python | src/statsd/client/__init__.py | wakemaster39/pystatsd-go | d366afa42068ce843ada3227dc85c8ddb4e95e9d | [
"MIT"
] | null | null | null | src/statsd/client/__init__.py | wakemaster39/pystatsd-go | d366afa42068ce843ada3227dc85c8ddb4e95e9d | [
"MIT"
] | null | null | null | src/statsd/client/__init__.py | wakemaster39/pystatsd-go | d366afa42068ce843ada3227dc85c8ddb4e95e9d | [
"MIT"
] | null | null | null | from .stream import TCPStatsClient, UnixSocketStatsClient
from .udp import StatsClient
| 29 | 57 | 0.862069 |
bc33de7ea199b81e57a7a3ecfd70fba3fb46fe65 | 399 | py | Python | setup.py | ChristophReich1996/Swin-Transformer-V2 | d71c1b412cd0fe13dc2557ad090cf0f027e54d47 | [
"MIT"
] | 43 | 2022-01-11T07:29:07.000Z | 2022-03-31T02:49:17.000Z | setup.py | simonlevine/Swin-Transformer-V2 | d9e3a61fbcca67eb4145eae6741a78e4a693fe42 | [
"MIT"
] | 7 | 2022-01-20T12:21:48.000Z | 2022-03-21T23:24:06.000Z | setup.py | simonlevine/Swin-Transformer-V2 | d9e3a61fbcca67eb4145eae6741a78e4a693fe42 | [
"MIT"
] | 8 | 2022-01-13T20:53:58.000Z | 2022-03-13T10:13:36.000Z | from setuptools import setup
setup(
name="swin_transformer_v2",
version="0.1",
url="https://github.com/ChristophReich1996/Swin-Transformer-V2",
license="MIT License",
author="Christoph Reich",
author_email="ChristophReich@gmx.net",
description="PyTorch Swin Transformer V2",
packages=["swin_transformer_v2"],
install_requires=["torch>=1.7.0", "timm>=0.4.12"],
)
| 28.5 | 68 | 0.694236 |
8b994e1bfe06592495daa6ac26fd3efd2d6f7555 | 76 | py | Python | src/pyqreg/__init__.py | mozjay0619/pyqreg | 33ea8965d1af55d08f313a52c5e88750b94d1f57 | [
"BSD-3-Clause"
] | null | null | null | src/pyqreg/__init__.py | mozjay0619/pyqreg | 33ea8965d1af55d08f313a52c5e88750b94d1f57 | [
"BSD-3-Clause"
] | null | null | null | src/pyqreg/__init__.py | mozjay0619/pyqreg | 33ea8965d1af55d08f313a52c5e88750b94d1f57 | [
"BSD-3-Clause"
] | null | null | null | from .formula_api import quantreg
from .quantile_regression import QuantReg
| 25.333333 | 41 | 0.868421 |
0e09cf332259df9de906e7043f1204c8d61ab2f9 | 5,655 | py | Python | tests/local_mode/test_lexeme.py | codeboy5/SyferText | ddad6ad40e3ef8161eb3b3f8c7522d30d7bc7098 | [
"Apache-2.0"
] | null | null | null | tests/local_mode/test_lexeme.py | codeboy5/SyferText | ddad6ad40e3ef8161eb3b3f8c7522d30d7bc7098 | [
"Apache-2.0"
] | null | null | null | tests/local_mode/test_lexeme.py | codeboy5/SyferText | ddad6ad40e3ef8161eb3b3f8c7522d30d7bc7098 | [
"Apache-2.0"
] | null | null | null | import syft as sy
import torch
import syfertext
from syfertext.attrs import Attributes
import numpy as np
hook = sy.TorchHook(torch)
me = hook.local_worker
lang = "en_core_web_lg"
nlp = syfertext.load(lang, owner=me)
# Get the vocab instance
vocab = nlp.vocab
def test_check_flag():
""" Test the check flag method for tokens"""
text1 = "Apple"
text2 = "Hello"
lexeme1 = vocab[text1]
lexeme2 = vocab[text2]
# check same attribute value is returned using check_flag method
# and lexeme attribute
assert lexeme1.is_digit == lexeme1.check_flag(Attributes.IS_DIGIT)
assert lexeme2.is_bracket == lexeme2.check_flag(Attributes.IS_BRACKET)
def test_set_flag():
""" Test if you can set/update the existing token attribute"""
text = "Apple"
lexeme = vocab[text]
# override an attribute value for a token
lexeme.set_flag(flag_id=Attributes.IS_DIGIT, value=True)
# the actual token is not digit but you can override the flag to set it True
assert lexeme.is_digit
def test_lex_text():
text = "Apple"
# Get the Lexeme object of text from Vocab
lexeme = vocab[text]
# test the text attribute of lexeme
assert lexeme.text == text
# test the orth_ attribute of lexeme
assert lexeme.orth_ == text
def test_lex_orth():
text = "Apple"
# Get the Lexeme object of text from Vocab
lexeme = vocab[text]
# get text token
token = nlp(text)[0]
# test orth of lexeme is same as
# that of the original string
assert lexeme.orth == token.orth
def test_lex_lower():
text = "APple"
# Get the Lexeme object of text from Vocab
lexeme = vocab[text]
# get the token of lowercase text
token = nlp(text.lower())[0]
# test the lower attribute (lowercase string orth)
assert lexeme.lower == token.orth
# test if lower_ attribute (lowercase string)
assert lexeme.lower_ == text.lower()
def test_lang_name():
text = "apple"
# Get the Lexeme object of text from Vocab
lexeme = vocab[text]
# test the language model name of lexeme
assert lexeme.lang_ == lang
def test_lex_bool_attrs():
# define strings for checking
# corresponding attributes
text = "apple"
stop = "did"
alpha = "Apple"
not_alpha = "5Apple"
ascii = ","
not_ascii = "£"
punct = "'"
right_punct = "’"
left_punct = "‛"
oov = "outofvocabulary"
digit = "104"
lower = "apple"
upper = "APPLE"
space = " "
bracket = "("
quote = "'"
currency = "¥"
title = "Openmined Syfertext"
# Test is_oov (if out of vocabulary)
assert vocab[oov].is_oov == True
assert vocab[text].is_oov == False
# test is_stop (if string is in SyferText stop words list defined)
assert vocab[stop].is_stop == True
assert vocab[text].is_stop == False
# test is_alpha (if string contains alpha chars)
assert vocab[alpha].is_alpha == True
assert vocab[not_alpha].is_alpha == False
# test is_ascii (if string is composed of ascii characters)
assert vocab[ascii].is_ascii == True
assert vocab[not_ascii].is_ascii == False
# test is_digit (if string is a digit)
assert vocab[digit].is_digit == True
assert vocab[text].is_digit == False
# test is_lower (if string is in lowercase)
assert vocab[lower].is_lower == True
assert vocab[upper].is_lower == False
# test is_title (if string is in title case)
assert vocab[title].is_title == True
assert vocab[text].is_title == False
# test is_punct (if string is a punctuation)
assert vocab[punct].is_punct == True
assert vocab[text].is_punct == False
# test is_space (if string is composed of space character only )
assert vocab[space].is_space == True
assert vocab[text].is_space == False
# test is_quote (if string is a quote char)
assert vocab[quote].is_quote == True
assert vocab[text].is_quote == False
# test is_left_punct (if string is a left punctuation char)
assert vocab[left_punct].is_left_punct == True
assert vocab[text].is_left_punct == False
# test is_right_punct (if string is a right punctuation char)
assert vocab[right_punct].is_right_punct == True
assert vocab[text].is_right_punct == False
# test is_currency (if string is a currency char)
assert vocab[currency].is_currency == True
assert vocab[text].is_currency == False
# test is_bracket (if string is a bracket char)
assert vocab[bracket].is_bracket == True
assert vocab[text].is_bracket == False
def test_lex_like_num():
num = "10.8"
text = "apple"
# test if string is like number
assert vocab[num].like_num == True
assert vocab[text].like_num == False
def test_lex_like_email():
text1 = "noobmaster69@endgame.com"
text2 = "noobmaster@endgame"
# test if the string is like an email
assert vocab[text1].like_email == True
assert vocab[text2].like_email == False
def test_lex_like_url():
texts = {
"http://ninjaflex_meta.com/": True,
"google.com": True,
"www.google.com": True,
"https://amazondating.co/": True,
"apple": False,
"a.b": False,
}
for url, match in texts.items():
# test for each string is a like url
assert vocab[url].like_url == match
def test_lex_word_shape():
words = {
"Apple": "Xxxxx",
"APPLE": "XXXX",
"noobmaster69": "xxxxdd",
"123456": "dddd",
",": ",",
}
for word, shape in words.items():
# test shape of each word in the dict
assert vocab[word].shape_ == shape
| 25.472973 | 80 | 0.654111 |
39d057f1b4e81b14d2541ec771b107e605bdec5a | 261 | py | Python | configs/pspnet_wsss/pspnet_r50-d8_40kx32_coco.py | XMed-Lab/URN | 5e56e5e6cc4e11a2885a0fc465d60246c747e621 | [
"Apache-2.0"
] | 28 | 2021-12-15T04:00:10.000Z | 2022-03-07T07:57:01.000Z | configs/pspnet_wsss/pspnet_r50-d8_40kx32_coco.py | XMed-Lab/URN | 5e56e5e6cc4e11a2885a0fc465d60246c747e621 | [
"Apache-2.0"
] | 7 | 2021-09-09T07:46:49.000Z | 2022-02-11T03:04:19.000Z | configs/pspnet_wsss/pspnet_r50-d8_40kx32_coco.py | Eli-YiLi/WSSS_MMSeg | 2cbb54e6bafffb02c65327effe267cfac0ac5a92 | [
"Apache-2.0"
] | 3 | 2021-12-14T03:11:36.000Z | 2022-03-28T19:20:29.000Z | _base_ = [
'../_base_/models/pspnet_r50_wsss.py',
'../_base_/datasets/ms_coco_wsss.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_40k.py'
]
model = dict(
decode_head=dict(num_classes=91), auxiliary_head=dict(num_classes=91))
| 32.625 | 74 | 0.701149 |
f406b7ad8e761d5c5d1fa2d49478407652540a6d | 1,087 | py | Python | consoleme/default_plugins/plugins/internal_routes/internal_routes.py | shyovn/consoleme | 471592b718b22f83244609ab47d5bf3f9a715a4d | [
"Apache-2.0"
] | 2,835 | 2020-12-09T19:07:24.000Z | 2022-03-31T06:38:44.000Z | consoleme/default_plugins/plugins/internal_routes/internal_routes.py | shyovn/consoleme | 471592b718b22f83244609ab47d5bf3f9a715a4d | [
"Apache-2.0"
] | 179 | 2020-12-10T01:51:25.000Z | 2022-03-31T02:06:06.000Z | consoleme/default_plugins/plugins/internal_routes/internal_routes.py | shyovn/consoleme | 471592b718b22f83244609ab47d5bf3f9a715a4d | [
"Apache-2.0"
] | 219 | 2020-12-09T21:30:56.000Z | 2022-03-31T05:57:36.000Z | from consoleme.default_plugins.plugins.internal_routes.handlers.internal_demo_route import (
InternalDemoRouteHandler,
)
class InternalRoutes:
ui_modules = {}
def get_internal_routes(self, make_jwt_validator, jwt_validator=None):
# The below code can be used with your ConsoleMe Internal package name to generate a path to your internal
# JavaScript and HTML files, if you wish to render these for the handler.
# path = pkg_resources.resource_filename("consoleme_internal", "templates")
internal_routes = [
(r"/internal_demo_route/?", InternalDemoRouteHandler),
# An example of serving static internal content is below, which would make use of the template path variable
# You defined above.
# (
# r"/static_internal/(.*)",
# NoCacheStaticFileHandler,
# dict(path=os.path.join(path, "static")),
# ),
]
return internal_routes
def init():
"""Initialize the internal routes plugin."""
return InternalRoutes()
| 37.482759 | 120 | 0.655014 |
d98ddc81942678123202b070783571b92a160a46 | 1,049 | py | Python | app/backend/gwells/views/registry_view.py | stephenhillier/gwells | 235d35f1f40dd845f8fecd0d7c3371c4564567c6 | [
"Apache-2.0"
] | null | null | null | app/backend/gwells/views/registry_view.py | stephenhillier/gwells | 235d35f1f40dd845f8fecd0d7c3371c4564567c6 | [
"Apache-2.0"
] | null | null | null | app/backend/gwells/views/registry_view.py | stephenhillier/gwells | 235d35f1f40dd845f8fecd0d7c3371c4564567c6 | [
"Apache-2.0"
] | null | null | null | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.views import generic
from gwells.models import Survey
class RegistryView(generic.TemplateView):
template_name = 'gwells/registry.html'
def get_context_data(self, **kwargs):
"""
Return the context for the page.
"""
context = super(RegistryView, self).get_context_data(**kwargs)
surveys = Survey.objects.order_by('create_date')
context['surveys'] = surveys
context['page'] = 'r'
return context
| 32.78125 | 76 | 0.694948 |
77cf7fbb364be027948e583c31d1ba0674e037c1 | 430 | py | Python | epi_judge_python/reverse_sublist.py | shobhitmishra/CodingProblems | 0fc8c5037eef95b3ec9826b3a6e48885fc86659e | [
"MIT"
] | null | null | null | epi_judge_python/reverse_sublist.py | shobhitmishra/CodingProblems | 0fc8c5037eef95b3ec9826b3a6e48885fc86659e | [
"MIT"
] | null | null | null | epi_judge_python/reverse_sublist.py | shobhitmishra/CodingProblems | 0fc8c5037eef95b3ec9826b3a6e48885fc86659e | [
"MIT"
] | null | null | null | from typing import Optional
from list_node import ListNode
from test_framework import generic_test
def reverse_sublist(L: ListNode, start: int,
finish: int) -> Optional[ListNode]:
# TODO - you fill in here.
return None
if __name__ == '__main__':
exit(
generic_test.generic_test_main('reverse_sublist.py',
'reverse_sublist.tsv', reverse_sublist))
| 25.294118 | 79 | 0.639535 |
d882378ec9d5ce32cce34a0bbcc7827a0bd8eeed | 1,920 | py | Python | monai/handlers/validation_handler.py | xidchen/MONAI | 4eae383f1abbbb62a69e054c016a3b4e53af3ea7 | [
"Apache-2.0"
] | null | null | null | monai/handlers/validation_handler.py | xidchen/MONAI | 4eae383f1abbbb62a69e054c016a3b4e53af3ea7 | [
"Apache-2.0"
] | null | null | null | monai/handlers/validation_handler.py | xidchen/MONAI | 4eae383f1abbbb62a69e054c016a3b4e53af3ea7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ignite.engine import Events, Engine
from monai.engines import Evaluator
class ValidationHandler:
"""
Attach validator to the trainer engine in Ignite.
It can support to execute validation every N epochs or every N iterations.
"""
def __init__(self, validator: Evaluator, interval: int, epoch_level: bool = True) -> None: # type: ignore
"""
Args:
validator (Evaluator): run the validator when trigger validation, suppose to be Evaluator.
interval: do validation every N epochs or every N iterations during training.
epoch_level (bool): execute validation every N epochs or N iterations.
`True` is epoch level, `False` is iteration level.
"""
if not isinstance(validator, Evaluator): # type: ignore
raise ValueError("validator must be Evaluator ignite engine.")
self.validator = validator
self.interval = interval
self.epoch_level = epoch_level
def attach(self, engine: Engine):
if self.epoch_level:
engine.add_event_handler(Events.EPOCH_COMPLETED(every=self.interval), self)
else:
engine.add_event_handler(Events.ITERATION_COMPLETED(every=self.interval), self)
def __call__(self, engine: Engine):
self.validator.run(engine.state.epoch)
| 41.73913 | 110 | 0.701563 |
fb1fac03099e7e5c17baa56ecf6c4228f4a74547 | 1,763 | py | Python | content/en/api/synthetics/code_snippets/post_test.py | bigjazzsound/documentation | e3488059c8333459a7139c08f2f0cae0b1cd0277 | [
"BSD-3-Clause"
] | 1 | 2020-04-09T01:40:33.000Z | 2020-04-09T01:40:33.000Z | content/en/api/synthetics/code_snippets/post_test.py | bigjazzsound/documentation | e3488059c8333459a7139c08f2f0cae0b1cd0277 | [
"BSD-3-Clause"
] | null | null | null | content/en/api/synthetics/code_snippets/post_test.py | bigjazzsound/documentation | e3488059c8333459a7139c08f2f0cae0b1cd0277 | [
"BSD-3-Clause"
] | null | null | null | from datadog import initialize, api
options = {
'api_key': '<DATADOG_API_KEY>',
'app_key': '<DATADOG_APPLICATION_KEY>'
}
initialize(**options)
# To create an API test
name = "test"
api_test_type = "api"
api_config = {
"assertions": [
{"operator": "is", "type": "statusCode", "target": 403},
{"operator": "is", "property": "content-type", "type": "header", "target": "text/html"},
{"operator": "lessThan", "type": "responseTime", "target": 2000}
],
"request": {"method": "GET", "url": "https://datadoghq.com", "timeout": 30, "headers": {"header1": "value1", "header2": "value2"}}
}
message = "test"
api_test_options = {"tick_every": 60, "min_failure_duration": 0,
"min_location_failed": 1, "follow_redirects": True}
locations = ["aws:us-east-2", "aws:eu-central-1", "aws:ca-central-1",
"aws:eu-west-2", "aws:ap-northeast-1", "aws:us-west-2", "aws:ap-southeast-2"]
tags = ["foo:bar"]
api.Synthetics.create_test(name=name, type=api_test_type, config=api_config, options=api_test_options,
message=message, locations=locations, tags=tags)
# To create a browser test
name = "test"
browser_test_type = "browser"
browser_config = {
"request": {"method": "GET", "url": "https://example.com/"},
"assertions": []
}
message = "test"
browser_test_options = {"device_ids": ["laptop_large"], "tick_every": 3600, "min_failure_duration": 0,
"min_location_failed": 1, "monitor_options": {"renotify_interval": 30}, "retry": {"count": 2, "interval": 30}}
locations = ["aws:ca-central-1", "aws:us-east-2"]
tags = []
api.Synthetics.create_test(name=name, type=browser_test_type, config=browser_config, options=browser_test_options,
message=message, locations=locations, tags=tags)
| 36.729167 | 132 | 0.655133 |
3ead483099c924630f6ace60ed1b4184992b4a32 | 639 | py | Python | uma/api/routes/roster.py | Rexians/uma | 9964afa37b26949916d0a2ba752f806220b26d6c | [
"MIT"
] | 3 | 2022-02-27T16:23:34.000Z | 2022-03-21T18:20:38.000Z | uma/api/routes/roster.py | Rexians/uma | 9964afa37b26949916d0a2ba752f806220b26d6c | [
"MIT"
] | null | null | null | uma/api/routes/roster.py | Rexians/uma | 9964afa37b26949916d0a2ba752f806220b26d6c | [
"MIT"
] | 3 | 2022-01-12T16:35:10.000Z | 2022-03-21T18:20:10.000Z | from fastapi import FastAPI, HTTPException, APIRouter
from ..models.mcoc_roster import Roster
# from pydantic import BaseModel
from dotenv import load_dotenv
import os
router = APIRouter()
@router.get("/roster/get/")
def get_roster(gamename: str):
"""
Get Roster Details of a user by Gamename
"""
print(gamename)
roster = Roster()
roster.get_roster(gamename)
if roster.error == "":
roster_dict = roster.roster_dict
roster_dict["status"] = 200
roster_dict["detail"] = "Successful"
return roster_dict
else:
raise HTTPException(status_code=404, detail=roster.error)
| 24.576923 | 65 | 0.685446 |
ba2fcd12cb53aee3856a6417c20b728bb7ebd328 | 6,943 | py | Python | src/VAE.py | Cyril-Grl/MuGen | 3f4d5b104f3f3b4dffe884e9cbf2e30625ba0d75 | [
"MIT"
] | null | null | null | src/VAE.py | Cyril-Grl/MuGen | 3f4d5b104f3f3b4dffe884e9cbf2e30625ba0d75 | [
"MIT"
] | 1 | 2021-11-11T08:26:32.000Z | 2021-11-11T08:26:32.000Z | src/VAE.py | Cyril-Grl/MuGen | 3f4d5b104f3f3b4dffe884e9cbf2e30625ba0d75 | [
"MIT"
] | 1 | 2020-07-15T19:33:29.000Z | 2020-07-15T19:33:29.000Z | import math
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pretty_midi
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Lambda, Input
# from tensorflow.keras.losses import mse
from tensorflow.keras.models import Model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
# reparameterization trick
# instead of sampling from Q(z|X), sample epsilon = N(0,I)
# z = z_mean + sqrt(var) * epsilon
def sampling(args):
"""Reparameterization trick by sampling from an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def get_coord(models, data, batch_size=128):
encoder, decoder = models
x_test, y_test = data
# display a 2D plot of the digit classes in the latent space
z_mean, _, _ = encoder.predict(x_test, batch_size=batch_size)
# print(f'z_mean : {z_mean}')
fig, ax = plt.subplots()
scatter = ax.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)
legend1 = ax.legend(*scatter.legend_elements(), title="Classes")
ax.add_artist(legend1)
plt.show()
return z_mean
def get_features_file(file, class_label):
midi_data = pretty_midi.PrettyMIDI(file)
a = None
for instrument in midi_data.instruments:
if instrument.is_drum:
instrument.is_drum = False
a = instrument.get_piano_roll()[36:48]
a[a > 0] = 1
a = np.pad(a, [(0, 0), (0, 400 - a.shape[1])], 'constant')
a = a.astype(dtype=bool)
a.resize(4800)
# print(a[0])
# np.savetxt(file[:-4] + ".mtr", a, fmt='%.1i')
return [a, class_label]
# midi_data = pretty_midi.PrettyMIDI(file)
#
# for instrument in midi_data.instruments:
# instrument.is_drum = False
# if len(midi_data.instruments) > 0:
# data = midi_data.get_piano_roll(fs=8)
# data.resize(3968)
# return [data, class_label]
def get_features_all_data():
features = []
path = 'ressources/dataset_csv/midi_files/'
filepath = 'ressources/dataset_csv/dataset.csv'
metadata = pd.read_csv(filepath)
# Iterate through each midi file and extract the features
for index, row in metadata.iterrows():
path_midi_file = path + str(row["File"])
if row["Score"] in [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]: # [0,50,100]:
# if row["Score"] != 89:
class_label = float(row["Score"]) / 100
features.append(get_features_file(path_midi_file, class_label))
return features
def model(input_shape):
print(input_shape)
intermediate_dim = 512
latent_dim = 2
# VAE model = encoder + decoder
# build encoder model
inputs = Input(shape=input_shape, name='encoder_input')
x = Dense(intermediate_dim, activation='relu')(inputs)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
# encoder.summary()
# plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True)
# build decoder model
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(intermediate_dim, activation='relu')(latent_inputs)
outputs = Dense(input_shape[0], activation='sigmoid')(x)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
# decoder.summary()
# plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True)
# instantiate VAE model
outputs = decoder(encoder(inputs)[2])
vae = Model(inputs, outputs) # , name='vae_mlp')
vae.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])
# vae.summary()
# plot_model(vae, to_file='vae_mlp.png', show_shapes=True)
return vae, encoder, decoder
def load_data(training, path_to_plot=""):
if training:
features = get_features_all_data()
else:
features = [get_features_file(path_to_plot, 0)]
# Convert into a Panda dataframe
featuresdf = pd.DataFrame(features, columns=['feature', 'class_label'])
# print('Finished feature extraction from ', len(featuresdf), ' files')
# Convert features & labels into numpy arrays
X = np.array(featuresdf.feature.tolist())
y = np.array(featuresdf.class_label.tolist())
# print(X.shape, y.shape)
if training:
# split the dataset
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
else:
x_train = X
y_train = y
# network parameters
input_shape = (x_train.shape[1],)
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
if training:
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
vae, encoder, decoder = model(input_shape)
if training:
return vae, encoder, decoder, x_train, y_train, x_test, y_test
else:
return vae, encoder, decoder, x_train, y_train
def train():
batch_size = 128
epochs = 100
batch_size = 128
vae, encoder, decoder, x_train, y_train, x_test, y_test = load_data(True)
data = (x_train, y_train)
# train the autoencoder
vae.fit(x_train, x_train, epochs=epochs, batch_size=batch_size, validation_data=(x_test, x_test))
# validation_data=(x_test, None))
vae.save_weights('vae_midi.h5')
models = (encoder, decoder)
coord = get_coord(models, data, batch_size=batch_size)
x = coord[:, 0]
y = coord[:, 1]
# print(x, y)
# distance = math.sqrt(((0 - x) ** 2) + ((0 - y) ** 2))
def give_distance(model, file):
model = tf.keras.models.load_model('vae_midi.h5')
def get_distance(file):
batch_size = 128
vae, encoder, decoder, x_train, y_train = load_data(False, file)
data = (x_train, y_train)
vae.load_weights('vae_midi.h5')
models = (encoder, decoder)
coord = get_coord(models, data, batch_size=batch_size)
x = coord[:, 0]
y = coord[:, 1]
# print(x, y)
distance = math.sqrt(((0 - x) ** 2) + ((0 - y) ** 2))
# print(f'distance : {distance}')
return distance
#
# def main():
# print(f'distance : {get_distance("2519_60.mid")}')
#
#
# if __name__ == '__main__':
# main()
| 31.274775 | 101 | 0.651015 |
ff7ad1f27fb9cdf369cb54c0fd27284944acad8a | 289 | py | Python | src/blog/migrations/0005_delete_image.py | myth/overflow | 269f950b6584b327832deb9f9309c2eea527612b | [
"MIT"
] | 4 | 2018-08-21T05:33:40.000Z | 2019-05-06T09:03:06.000Z | src/blog/migrations/0005_delete_image.py | myth/overflow | 269f950b6584b327832deb9f9309c2eea527612b | [
"MIT"
] | 1 | 2020-08-09T10:33:58.000Z | 2020-08-09T10:33:58.000Z | src/blog/migrations/0005_delete_image.py | myth/overflow | 269f950b6584b327832deb9f9309c2eea527612b | [
"MIT"
] | 1 | 2019-05-06T13:33:06.000Z | 2019-05-06T13:33:06.000Z | # Generated by Django 2.2 on 2019-04-17 10:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_auto_20190416_1327'),
]
operations = [
migrations.DeleteModel(
name='Image',
),
]
| 17 | 45 | 0.598616 |
a791bfd48a0d7fb26cb6e4bf398e696dce24051a | 3,381 | py | Python | kamodo_ccmc/readers/kameleon_gateway.py | EnsembleGovServices/Kamodo-ccmc-readers | 75841f7ad832997159046d4b2523e0a244316e9d | [
"NASA-1.3"
] | 28 | 2019-08-30T16:18:53.000Z | 2022-01-13T16:33:51.000Z | kamodo_ccmc/readers/kameleon_gateway.py | EnsembleGovServices/Kamodo-ccmc-readers | 75841f7ad832997159046d4b2523e0a244316e9d | [
"NASA-1.3"
] | 22 | 2020-02-13T20:46:13.000Z | 2022-01-18T17:00:15.000Z | kamodo_ccmc/readers/kameleon_gateway.py | EnsembleGovServices/Kamodo-ccmc-readers | 75841f7ad832997159046d4b2523e0a244316e9d | [
"NASA-1.3"
] | 20 | 2019-08-16T21:22:40.000Z | 2022-03-07T15:39:18.000Z |
import os
store = dict()
def initialize(fname, *variables):
"""Opens a kameleon-compatible file and initializes an interpolator"""
# ccmc module must be imported here since it is unavailable in python3
from ccmc import _CCMC as ccmc
kameleon = ccmc.Kameleon()
kameleon.open(fname)
load_variables(kameleon, *variables)
interpolator = kameleon.createNewInterpolator()
store['interpolator'] = interpolator
store['kameleon'] = kameleon
metadata = get_global_metadata(kameleon)
metadata['variables'] = get_variable_metadata(kameleon, *variables)
return metadata
def get_global_metadata(kameleon):
metadata = dict()
for i in range(kameleon.getNumberOfGlobalAttributes()):
gname = kameleon.getGlobalAttributeName(i)
gattr = kameleon.getGlobalAttribute(gname)
metadata[gname] = gattr.toString()
return metadata
def get_variable_metadata(kameleon, *variables):
metadata = dict()
for varname in variables:
metadata[varname] = dict()
metadata[varname]['min'] = kameleon.getVariableAttribute(varname, 'actual_min').getAttributeFloat()
metadata[varname]['max'] = kameleon.getVariableAttribute(varname, 'actual_max').getAttributeFloat()
metadata[varname]['units'] = kameleon.getVisUnit(varname)
metadata[varname]['native_units'] = kameleon.getNativeUnit(varname)
return metadata
def create_interpolator():
nvar = store['kameleon'].getNumberOfVariables()
for i in range(nvar):
varname = store['kameleon'].getVariableName(i)
store['kameleon'].loadVariable(varname)
store['interpolator'] = store['kameleon'].createNewInterpolator()
return fname
def load_variables(kameleon, *variables):
for var_name in variables:
if kameleon.doesVariableExist(var_name):
kameleon.loadVariable(var_name)
else:
raise IOError('{} does not exist!'.format(var_name))
def interpolate(varname, *point):
return store['interpolator'].interpolate(varname, *point)
if __name__ == '__channelexec__':
for item in channel:
try:
channel.send(eval(item))
except:
if type(item) == tuple:
channel.send(store['interpolator'].interpolate(
str(item[0]),
float(item[1]),
float(item[2]),
float(item[3])))
elif type(item) == dict:
if 'points' in item:
results = []
if 'variable' in item:
variable_name = str(item['variable'])
for point in item['points']:
results.append(store['interpolator'].interpolate(variable_name, *point))
elif 'variables' in item:
for point in item['points']:
result = []
for variable in item['variables']:
variable_name = str(variable)
result.append(store['interpolator'].interpolate(variable_name, *point))
results.append(result)
channel.send(results)
else:
channel.send(item)
else:
channel.send(item)
| 35.589474 | 107 | 0.590949 |
d8f005022750e41c461f083c56c41837b6936ae1 | 738 | py | Python | setup.py | TrustCodes/gs1-compression | 74c20141ab57025bda21092fbfaa922f8ca0a7ec | [
"Apache-2.0"
] | 3 | 2021-03-11T23:35:21.000Z | 2021-08-04T04:16:12.000Z | setup.py | TrustCodes/gs1-compression | 74c20141ab57025bda21092fbfaa922f8ca0a7ec | [
"Apache-2.0"
] | null | null | null | setup.py | TrustCodes/gs1-compression | 74c20141ab57025bda21092fbfaa922f8ca0a7ec | [
"Apache-2.0"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="gs1-compression",
version="0.1.3",
author="Di Zhu",
author_email="di.zhu@trust.codes",
description=("A Python package to handle compression"
" and decompression of GS1 digital links"),
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Trust-Codes-Ltd/gs1-compression",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 30.75 | 61 | 0.658537 |
fd4f908f53808c2385a50757b2b47120befe1337 | 16,452 | py | Python | src/model/ExReNet.py | DLR-RM/SyntheticDataLocalization | 68eda127a0ae7d6a2c98e9b6fc8b49d0e761af9c | [
"MIT"
] | 3 | 2021-06-08T14:57:32.000Z | 2021-12-03T17:20:11.000Z | src/model/ExReNet.py | DLR-RM/SyntheticDataLocalization | 68eda127a0ae7d6a2c98e9b6fc8b49d0e761af9c | [
"MIT"
] | null | null | null | src/model/ExReNet.py | DLR-RM/SyntheticDataLocalization | 68eda127a0ae7d6a2c98e9b6fc8b49d0e761af9c | [
"MIT"
] | null | null | null | from tensorflow.keras.layers import Dense, Flatten,concatenate, Conv2D, Dropout, GlobalAveragePooling2D, Concatenate, UpSampling2D, BatchNormalization, Activation, LayerNormalization
from tensorflow.keras.models import Sequential
import tensorflow as tf
from tensorflow.keras import Model as KerasModel
import numpy as np
from tensorflow.python.keras.applications.resnet import stack1
from src.utils.RMatrix import RMatrix
from src.utils.TMatrix import TMatrix
from classification_models.tfkeras import Classifiers
class ExReNet(KerasModel):
def __init__(self, config, data):
super(ExReNet, self).__init__()
self.data = data
self.config = config
self.latent_dim = 50
self.src_att_iters = self.config.get_list("src_att_iters")
self.dest_att_iters = self.config.get_list("dest_att_iters")
self.dest_att_border = self.config.get_list("dest_att_border")
self._build()
self.init_model()
def init_model(self):
# Create model parameters by doing one fake forward pass
reference_images = tf.ones((2, 128, 128, 3))
query_images = tf.ones((2, 128, 128, 3))
self(reference_images, query_images, True)
def _build_feature_network(self, load_pretrained=True):
# Build resnet50
feature_extractor = tf.keras.applications.ResNet50(include_top=False, weights='imagenet' if load_pretrained else None, pooling=None, input_shape=(self.data.image_size, self.data.image_size, 3))
# Collect the layers whose outputs will be used
emb_set_output_layers = self.config.get_list("emb_set_output_layer")
output_layers = []
for layer in feature_extractor.layers:
if layer.name in emb_set_output_layers:
output_layers.append(layer.output)
# Declare a new model based on the new outputs
feature_extractor = tf.keras.models.Model(feature_extractor.input, output_layers)
output_shape = feature_extractor.output_shape
feature_extractor.summary()
# Do the upscaling
if self.config.get_bool("unet_style"):
# Go over all upscale layers
self.upscale_layers = []
last_channels = output_layers[-1].shape[-1]
for filters, output_layer in zip([128, 64, 16, 4], output_layers[::-1][1:]):
# Determine input shape
in_shape = list(output_layer.shape)
in_shape[-1] += last_channels
# Build one resnet stack for that level
inp = tf.keras.layers.Input(shape=in_shape[1:])
x = stack1(inp, filters, 1, stride1=1, name='conv1')
self.upscale_layers.append(tf.keras.models.Model(inp, x))
self.upscale_layers[-1].summary()
last_channels = x.shape[-1]
return feature_extractor, output_shape
def _build(self):
# Build the feature extractor
self.feature_extractor, feature_shape = self._build_feature_network()
# Determine pose dimension (7+1)
pose_dim = self.data.cam_pose_dim
if self.config.get_bool("pred_scale_extra"):
pose_dim += 1
# Determine the number of channels the tensor has after the feature matching
channels = 0
for n in self.dest_att_iters:
channels += 1 * n * n
# Build regression part (resnet18)
ResNet18, preprocess_input = Classifiers.get('resnet18')
resnet18 = ResNet18((128, 128, channels), include_top=False, weights=None)
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(resnet18.output)
# Fully connected layers
x = tf.keras.layers.Dense(512, activation="relu")(x)
# Dropout for MC dropout
if self.config.get_float("dropout") > 0:
x = Dropout(self.config.get_float("dropout"))(x, training=True)
x = Dense(512, activation="relu")(x)
x = tf.keras.layers.Dense(pose_dim)(x)
# Declare full regression part as one model
self.decoder_pose_estimator = tf.keras.models.Model(resnet18.input, x)
def corr_layer(self, feature_map_1, feature_map_2, last_layer_res, src_res, dest_res, upscale_facs, dest_att_border):
batch_size = tf.shape(feature_map_1[0])[0]
# Split up feature maps according to last corr. layer resolution
# We gonna compute correlations for each tile individually
feature_map_1[0] = tf.stack(tf.split(tf.stack(tf.split(feature_map_1[0], last_layer_res, 1), 1), last_layer_res, 3), 2)
feature_map_2[0] = tf.stack(tf.split(tf.stack(tf.split(feature_map_2[0], last_layer_res, 1), 1), last_layer_res, 3), 2)
# Collapse x,y dimensions into one dimension
feature_map_1[0] = tf.reshape(feature_map_1[0], [batch_size, (last_layer_res) ** 2, src_res ** 2, tf.shape(feature_map_1[0])[-1]])
feature_map_2[0] = tf.reshape(feature_map_2[0], [batch_size, (last_layer_res) ** 2, dest_res ** 2, tf.shape(feature_map_2[0])[-1]])
# Compute dot products
dot = tf.matmul(feature_map_1[0], feature_map_2[0], transpose_b=True)
dot /= tf.math.sqrt(tf.cast(tf.shape(feature_map_1[0])[-1], tf.float32))
# Find max dot products
argmax_dot = tf.argmax(tf.reshape(dot, [-1, dest_res ** 2]), -1)
argmax_dot = tf.reshape(argmax_dot, tf.shape(dot)[:-1])
# Compute feature coordinates of max dot products
match_coord = tf.stack([argmax_dot // dest_res, argmax_dot % dest_res], -1)
# Subtract border
match_coord -= dest_att_border
match_coord = tf.maximum(0, tf.minimum(dest_res - 1 - 2 * dest_att_border, tf.cast(match_coord, tf.int32)))
# Now update the higher resolution feature maps according to the best matches
tile_size = 1 + dest_att_border * 2
upscale_fac = 1
for p in range(1, len(feature_map_2)):
# Updates tile size and upscale factor for this feature level
tile_size *= upscale_facs[p]
upscale_fac *= upscale_facs[p]
# Split up feature maps according to last corr. layer resolution
feature_map_2[p] = tf.stack(tf.split(tf.stack(tf.split(feature_map_2[p], last_layer_res, 1), 1), last_layer_res, 3), 2)
# Collapse x,y dimensions into one dimension
feature_map_2[p] = tf.reshape(feature_map_2[p], [batch_size, (last_layer_res) ** 2, upscale_fac * dest_res, upscale_fac * dest_res, tf.shape(feature_map_2[p])[-1]])
# Upscale coordinates of matches to feature resolution of this level
scaled_match_coord = tf.cast(upscale_fac * match_coord, tf.int32)
# Build coordinates inside one tile
base_indices = tf.cast(tf.transpose(tf.stack(tf.meshgrid(tf.range(0, tile_size), tf.range(0, tile_size)), -1), [1, 0, 2]), tf.int32)
# Collapse x,y dimensions into one dimension
base_indices = tf.reshape(base_indices, [-1, 2])
# Repeat tile coordinates for each batch
base_indices = tf.tile(base_indices[None, None, None], tf.concat((tf.shape(argmax_dot), [1, 1]), 0))
# Add the matched coordinates to the offset coordinates per tile, so we get full coordinates per feature vector
base_indices += scaled_match_coord[..., None, :]
base_indices = tf.reshape(base_indices, tf.concat((tf.shape(base_indices)[:2], [-1, 2]), 0))
# Now reorder feature map according to the matching coordinates
res = tf.gather_nd(feature_map_2[p], base_indices, batch_dims=2)
res = tf.reshape(res, [batch_size, last_layer_res, last_layer_res, src_res, src_res, tile_size, tile_size, tf.shape(feature_map_2[p])[-1]])
# Now glue one feature map together from all these tiles
res = tf.concat(tf.unstack(res, axis=3), 4)
res = tf.concat(tf.unstack(res, axis=3), 4)
res = tf.concat(tf.unstack(res, axis=1), 2)
res = tf.concat(tf.unstack(res, axis=1), 2)
# Use the result further as new feature map
feature_map_2[p] = res
return dot
def match(self, preds1, preds2):
last_layer_res = 1
target_size = np.prod(np.array(self.src_att_iters))
matching = None
all_dots = []
matched_coordinates = []
# Go over all correlation layers
for i, (src_res, dest_res) in enumerate(zip(self.src_att_iters, self.dest_att_iters)):
# Apply the corr. layer
dot = self.corr_layer(preds1, preds2, last_layer_res, src_res, dest_res, self.src_att_iters[i:] + [128 // target_size], self.dest_att_border[i])
# Remember dot products
all_dots.append(dot)
# Remember the matched last feature map which are the coordinates added via coord()
matched_coordinates.append(preds2[-1])
# Remove processed feature maps
preds1 = preds1[1:]
preds2 = preds2[1:]
# Reshape dot products to the resolution of the feature maps
dot = tf.reshape(dot, [tf.shape(dot)[0], last_layer_res, last_layer_res, src_res, src_res, dest_res ** 2 * 1])
dot = tf.concat(tf.unstack(dot, axis=1), 2)
dot = tf.concat(tf.unstack(dot, axis=1), 2)
# Resize them to the target resolution
dot = tf.image.resize(dot, [target_size, target_size], method='nearest')
# Concatenate the outputs of all corr. layers
if matching is None:
matching = dot
else:
matching = tf.concat((matching, dot), -1)
# Update last layer resolution
last_layer_res *= src_res
return matching, matched_coordinates, all_dots
def coord(self, image):
""" Creates tensor with same resolution as given image and with coordinates in cells """
ramp = (np.arange(image.shape[-2])).astype(np.int32)
x_ramp = np.tile(np.reshape(ramp, [1, 1, -1]), [1, image.shape[-2], 1])
y_ramp = np.tile(np.reshape(ramp, [1, -1, 1]), [1, 1, image.shape[-2]])
coord = tf.tile(tf.stack((x_ramp, y_ramp), -1), [tf.shape(image)[0], 1, 1, 1])
return tf.cast(coord, tf.float32)
def encode(self, image, training):
# Extract features using the resnet50
outputs = self.feature_extractor(image, training=training)
if type(outputs) != list:
outputs = [outputs]
# Reverse list, so low dim features come first
outputs = outputs[::-1]
# Apply upscaling layers
if self.config.get_bool("unet_style"):
# Go over all upscaling layers
new_outputs = [outputs[0]]
x = outputs[0]
for output, upscale_layer in zip(outputs[1:], self.upscale_layers):
# Upsample by factor of 2
x = tf.keras.layers.UpSampling2D(size=(2, 2))(x)
# Skip connection
x = tf.concat((x, output), axis=-1)
# Apply resnet stack
x = upscale_layer(x, training=training)
new_outputs.append(x)
# Define new output
if len(self.config.get_list("src_att_iters")) == len(self.config.get_list("emb_set_output_layer")):
outputs = new_outputs[:]
elif len(self.config.get_list("src_att_iters")) == 1:
outputs = new_outputs[-1:]
elif len(self.config.get_list("emb_set_output_layer")) < 5:
outputs = new_outputs[:1] + new_outputs[2:]#3] + new_outputs[4:]
else:
outputs = new_outputs[:1] + new_outputs[2:3] + new_outputs[4:]
else:
outputs = outputs[:1] + outputs[2:]
# Add another output which has the same resolution as the input image, but contains the coordinates in each pixel
# This is used in the matching step for computing the auxiliary loss
coord = self.coord(image)
outputs.append(coord)
return outputs
def call(self, reference_images, query_images, training, number_of_samples=1):
# Extract features from first image
first_features = self.encode(reference_images, training)
# Extract features from second image
second_features = self.encode(query_images, training)
# Match the features
matching, matched_coordinates, all_dots = self.match(first_features, second_features)
# Scale up the feature matches
matching = tf.image.resize(matching, [128, 128], 'nearest')
# If MC dropout is activated, repeat these matches N times
if number_of_samples > 1 and self.config.get_float("dropout") > 0:
matching = tf.tile(matching[None], [number_of_samples, 1, 1, 1, 1])
matching = tf.reshape(matching, tf.concat(([-1], tf.shape(matching)[2:]), 0))
# Regress the relative pose
cam_pose = self.decoder_pose_estimator(matching, training=training)
return cam_pose, matched_coordinates, all_dots, matching
@tf.function
def _predict_using_raw_data(self, reference_images, query_image, use_uncertainty, legacy_pose_transform=False):
# Preprocess (normalize + resize) images
reference_images = self.data.preprocess_model_input(reference_images)
query_image = self.data.preprocess_model_input(query_image)
# If uncertainty estimation is enabled, predict multiple samples (MC dropout)
N = 100 if use_uncertainty else 1
# Forward pass
pred, _, _, _ = self.call(reference_images, query_image, False, number_of_samples=N)
# If scale is predicted explicitly, scale the translational direction accordingly
if self.config.get_bool("pred_scale_extra"):
pred = tf.concat((pred[:, :3] / tf.linalg.norm(pred[:, :3], axis=-1, keepdims=True) * pred[:, -1:], pred[:, 3:-1]), -1)
# Post process prediction (convert to tmat)
ref_to_query_T = self.data.postprocess_model_output(pred, legacy_pose_transform)
# If inverse representation is predicted by model, inverse it
if self.data.inverse_pose_representation:
ref_to_query_T = TMatrix.inverse(ref_to_query_T, num_batch_dim=1)
# Reshape, if multiple poses were predicted for unc. estimation
if use_uncertainty:
ref_to_query_T = tf.reshape(ref_to_query_T, tf.concat(([N, -1], tf.shape(ref_to_query_T)[1:]), 0))
return ref_to_query_T
def predict_using_raw_data(self, reference_images, query_image, use_uncertainty, legacy_pose_transform=False):
# Regress relative pose for each reference query combination
ref_to_query_T = []
for i in range(len(reference_images)):
ref_to_query_T.append(self._predict_using_raw_data(reference_images[i:i+1], query_image[None], use_uncertainty, legacy_pose_transform))
ref_to_query_T = tf.concat(ref_to_query_T, 1 if use_uncertainty else 0)
# Optional: Estimate uncertainty based on mean deviation
if use_uncertainty:
ref_to_query_T = TMatrix.to_quaternion(ref_to_query_T, 2)
# Compute mean camera pose
mean_pose = tf.reduce_mean(ref_to_query_T[..., :3], 0)
# Compute mean deviation from mean and use as unc.
uncertainty = tf.reduce_mean(tf.linalg.norm(ref_to_query_T[..., :3] - mean_pose, axis=-1), 0)
# Compute mean of relative rotation estimates
mean_quats = []
for i in range(ref_to_query_T.shape[1]):
mean_quats.append(RMatrix.average_quaternions(ref_to_query_T[:, i, ..., 3:]))
mean_quats = tf.cast(tf.stack(mean_quats, 0), tf.float32)
# Rebuild relative transformation mat
ref_to_query_T = TMatrix.from_quaternion(tf.concat((mean_pose, mean_quats), -1))
# Form nice dicts for each relative pose estimation (here R1=R2)
ref_to_query = []
for i in range(len(ref_to_query_T)):
ref_to_query.append({
"t": np.stack([ref_to_query_T[i, 0, 3], ref_to_query_T[i, 1, 3], ref_to_query_T[i, 2, 3]], -1),
"R1": np.array(ref_to_query_T[i, :3, :3]),
"R2": np.array(ref_to_query_T[i, :3, :3])
})
if use_uncertainty:
return ref_to_query, uncertainty
else:
return ref_to_query, None
| 48.674556 | 201 | 0.640348 |
e51cab5dff9565768dcd01cbb1267fc84714ff56 | 1,862 | py | Python | spark_auto_mapper_fhir/value_sets/act_relationship_join.py | imranq2/SparkAutoMapper.FHIR | dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2 | [
"Apache-2.0"
] | 1 | 2020-10-31T23:25:07.000Z | 2020-10-31T23:25:07.000Z | spark_auto_mapper_fhir/value_sets/act_relationship_join.py | icanbwell/SparkAutoMapper.FHIR | 98f368e781b46523142c7cb513c670d659a93c9b | [
"Apache-2.0"
] | null | null | null | spark_auto_mapper_fhir/value_sets/act_relationship_join.py | icanbwell/SparkAutoMapper.FHIR | 98f368e781b46523142c7cb513c670d659a93c9b | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ActRelationshipJoin(GenericTypeCode):
"""
v3.ActRelationshipJoin
From: http://terminology.hl7.org/ValueSet/v3-ActRelationshipJoin in v3-codesystems.xml
**** MISSING DEFINITIONS ****
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/v3-ActRelationshipJoin
"""
codeset: FhirUri = "http://terminology.hl7.org/CodeSystem/v3-ActRelationshipJoin"
class ActRelationshipJoinValues:
"""
Detach this branch from the other branches so it will not be resynchronized
with the other branches.
From: http://terminology.hl7.org/CodeSystem/v3-ActRelationshipJoin in v3-codesystems.xml
"""
Detached = ActRelationshipJoin("D")
"""
When all other concurrent branches are terminated, interrupt and discontinue
this branch.
From: http://terminology.hl7.org/CodeSystem/v3-ActRelationshipJoin in v3-codesystems.xml
"""
Kill = ActRelationshipJoin("K")
"""
Wait for this branch to terminate.
From: http://terminology.hl7.org/CodeSystem/v3-ActRelationshipJoin in v3-codesystems.xml
"""
Wait = ActRelationshipJoin("W")
"""
Wait for any one of the branches in the set of exclusive wait branches to
terminate, then discontinue all the other exclusive wait branches.
From: http://terminology.hl7.org/CodeSystem/v3-ActRelationshipJoin in v3-codesystems.xml
"""
ExclusiveWait = ActRelationshipJoin("X")
| 35.807692 | 92 | 0.744361 |
d0231829dc535aea23440a7107d6b464e58eb7db | 8,475 | py | Python | modules/py/pkgs/CircuitPrinter/circuit_printer.py | ICHEC/QNLP | 2966c7f71e6979c7ddef62520c3749cf6473fabe | [
"Apache-2.0"
] | 29 | 2020-04-13T04:40:35.000Z | 2021-12-17T11:21:35.000Z | modules/py/pkgs/CircuitPrinter/circuit_printer.py | ICHEC/QNLP | 2966c7f71e6979c7ddef62520c3749cf6473fabe | [
"Apache-2.0"
] | 6 | 2020-03-12T17:40:00.000Z | 2021-01-20T12:15:08.000Z | modules/py/pkgs/CircuitPrinter/circuit_printer.py | ICHEC/QNLP | 2966c7f71e6979c7ddef62520c3749cf6473fabe | [
"Apache-2.0"
] | 9 | 2020-09-28T05:00:30.000Z | 2022-03-04T02:11:49.000Z | class CircuitPrinter:
"""
The CircuitPrinter class creates a quantum circuit .tex file for viewing the circuit output.
Assumes the availability of the quantikz LaTeX package when compiling. Preferably use lualatex
to ensure sufficient memory access.
"""
def __init__(self, num_qubits):
self.num_qubits = num_qubits
self.ccts = []
def ncu_cct_depth(self, num_ctrl):
"""
Calculate the required depth for the circuit using the implemented nCU decomposition
"""
if num_ctrl > 2:
return 2 + 3*self.ncu_cct_depth(num_ctrl-1)
else:
return 5
def ctrl_line_column(self, gate_name, ctrl, tgt, column_length):
"""
Creates a columnar slice of the circuit with the given gate name, control and target lines.
The number of qubits specifies the length of the column.
"""
column = [r"\qw & "]*column_length
column[tgt] = r"\gate{{{}}} & ".format(gate_name)
column[ctrl] = r"\ctrl{{{}}} & ".format(tgt - ctrl)
return column
def single_qubit_line_column(self, gate_name, index, column_length):
"""
Creates a columnar slice of the circuit with the given gate name, on index
The number of qubits specifies the length of the column.
"""
column = ["\qw & "]*column_length
column[index] = r"\gate{{{}}} & ".format(gate_name)
return column
def slice_line_column(self, slice_name, column_length):
"""
Creates a columnar slice of the circuit with a dashed line denoting the marked section
"""
column = ["\qw & "]*column_length
if "\\" in slice_name:
slice_name = "${}$".format(slice_name)
column[0] = r"\qw\slice{{{}}} & ".format(slice_name)
return column
def load_data_csv(self, csv_file):
"""
Loads the data from a CSV file. Assumes the following layout:
gate_name, control_qubit_number, target_qubit_number, gate_matrix_values
"""
import csv
cct = []
with open(csv_file, 'r') as csvfile:
filereader = csv.reader(csvfile, delimiter=',')
data = list(filereader)
cct_local = []
mergeable_row = 0
for idx,row in enumerate(data[1:]):
#Check for break in circuit runs, empty data, and single run datasets (ie currently empty output)
prev_was_ctrl = False
if (row != "\n") and (row != []) and (row != data[-1]):
if row[0][0:4] == "#!#{":
slice_label = row[0].rsplit("}")[0].rsplit("{")[-1]
slice_col = self.slice_line_column(slice_label, self.num_qubits)
cct_local.append(slice_col)
elif int(row[1]) <= 2**32:
prev_was_ctrl = True
cct_local.append(self.ctrl_line_column(row[0], int(row[1]), int(row[2]), self.num_qubits) )
else:
#Single qubit value; max value here indicates that the output value for the ctrl line was 2^64, hence non-existent
curr_col = self.single_qubit_line_column(row[0], int(row[2]), self.num_qubits)
#Attempt to merge non interfering gates to the same column to reduce visual circuit size
'''if (len(cct_local) > 0) and (prev_was_ctrl == False):
prev_col = cct_local[-1]
icurr = [idx for (idx,val) in enumerate(curr_col) if val != "\\qw & "]
iprev = [idx for (idx,val) in enumerate(prev_col) if val != "\\qw & "]
if icurr[0] != iprev[0] and len(icurr) == 1 and len(iprev) == 1:
curr_row[iprev[0]] = prev_row[iprev[0]]
del cct_local[-1]
'''
cct_local.append(curr_col)
prev_was_ctrl == False
else:
cct.append(cct_local)
cct_local = []
self.ccts = cct
return cct
def row_merge():
"""
WIP: Merges rows between independent qubit operations to reduce printed circuit size
e.g. [I, I, I, X], [I, Z, I, I] -> [I, Z, X, I]
"""
data = self.ccts[0]
for i in range(1,len(data)):
prev_mod_vals = [(idx,val) for (idx,val) in enumerate(data[i-1]) if val != "\\qw & "]
curr_mod_vals = [(idx,val) for (idx,val) in enumerate(data[i]) if val != "\\qw & "]
for j in prev_mod_vals:
prev_has_ctrl = []
if "\\ctrl" in j[1]:
# Terrible, but it will do for now
range_len_prev = int(j[1].rsplit("}")[0].rsplit("{")[1])
prev_has_ctrl = [(j[0], j[0] + range_len_prev)]
for k in curr_mod_vals:
curr_has_ctrl = []
if "\\ctrl" in k[1]:
# Still terrible, but it will do for now
range_len_curr = int(k[1].rsplit("}")[0].rsplit("{")[1])
curr_has_ctrl = [(k[0], k[0] + range_len_curr)]
# Continue this later... seems incredibly inefficient
def latex_cct(self, data_file, file_name="cct", max_depth=16):
"""
LaTeX file outputter for quantum circuit generation.
"""
cct_array = self.load_data_csv(data_file)
num_cct = len(cct_array)
depth = len(cct_array[0])
for cct_idx in range(num_cct):
with open(file_name + "_" + str(cct_idx) + ".tex", "w") as f:
f.write("\\documentclass{article} \n \\usepackage{amsmath} \\usepackage{adjustbox} \\usepackage{tikz} \\usetikzlibrary{quantikz} \\usepackage[margin=0.5cm]{geometry} \n \\begin{document} \centering\n")
# Split the circuit on a given depth boundary
# Due to issues with latex variables ending with numeric indices, appending letters to the temporary savebox commands allows us to generate multiple of these.
# As depth is an issue with circuits, we currently expect the output not to exceed n-choose-k of 52-C-4 = 270725 combinations
# This will suffice until later.
import string, itertools
s_list = [i for i in string.ascii_letters]
box_str = r"\Box"
label_iterator = itertools.combinations(s_list,4)
box_labels = []
for i in range(0, depth, max_depth):
local_label = box_str + "".join( next(label_iterator))
box_labels.append(local_label)
f.write(r"\newsavebox{{{}}}".format(local_label))
f.write(r"\savebox{{{}}}{{".format(local_label))
f.write("\\begin{quantikz}[row sep={0.5cm,between origins}, column sep={0.75cm,between origins}, slice label style={inner sep=1pt,anchor=south west,rotate=40}]")
#Transposes the data so that q rows of length n exist, rather than n cols of length q
if(i + max_depth < depth):
cct_list_qubit= list(map(list, zip(*cct_array[cct_idx][i:i+max_depth])))
else:
cct_list_qubit= list(map(list, zip(*cct_array[cct_idx][i:])))
for q in range(len(cct_list_qubit)):
out_str = "\\qw & ".join(cct_list_qubit[q]) + " \\qw "
if(q != len(cct_list_qubit)-1):
out_str += " \\\\ "
f.write(out_str)
f.write(" \\end{quantikz}\n}\n")
f.write("\n")
for idx,l in enumerate(box_labels):
f.write(r"\usebox{{{}}} \\".format(l))
f.write("\n \\vspace{2em}")
f.write(r"\end{document}")
if __name__== "__main__":
import os
args = os.sys.argv
if len(args) < 4:
print("Please specify the file to load and number of qubits in sim, and output filename to save: python cct.py <CSV> <>")
exit()
CCT = CircuitPrinter(num_qubits=int(args[3]))
CCT.latex_cct(args[1], args[2], max_depth=int(args[4]))
| 46.823204 | 217 | 0.531091 |
e26013727c163478b0263c7ebc6e9cb8dcb3052e | 8,805 | py | Python | user_accounts/tests/test_auth_integration.py | enterstudio/intake | 793a8935914fdc8356321ec46e54d9ae1eeeee04 | [
"MIT"
] | 1 | 2019-05-24T14:07:18.000Z | 2019-05-24T14:07:18.000Z | user_accounts/tests/test_auth_integration.py | enterstudio/intake | 793a8935914fdc8356321ec46e54d9ae1eeeee04 | [
"MIT"
] | null | null | null | user_accounts/tests/test_auth_integration.py | enterstudio/intake | 793a8935914fdc8356321ec46e54d9ae1eeeee04 | [
"MIT"
] | null | null | null | from django.core import mail
from django.core.urlresolvers import reverse
from django.utils import html as html_utils
from django.contrib.auth.models import User, Group
from user_accounts.forms import InviteForm
from user_accounts.models import (
Invitation,
get_user_display
)
from user_accounts.tests.base_testcases import AuthIntegrationTestCase
class TestUserAccounts(AuthIntegrationTestCase):
fixtures = ['counties', 'organizations', 'groups', 'mock_profiles']
def test_invite_form_has_the_right_fields(self):
form = InviteForm()
form_html = form.as_p()
for org in self.orgs:
self.assertIn(
html_utils.escape(org.name),
form_html)
for group in Group.objects.all():
self.assertIn(
html_utils.escape(group.name),
form_html)
def test_invite_form_saves_correctly(self):
form = InviteForm(dict(
email="someone@example.com",
organization=self.orgs[0].id,
groups=[self.groups[0].id],
should_get_notifications=True
))
self.assertTrue(form.is_valid())
invite = form.save()
qset = Invitation.objects.filter(
email="someone@example.com",
organization=self.orgs[0]
)
self.assertEqual(qset.first(), invite)
def test_uninvited_signups_are_redirected_to_home(self):
self.be_anonymous()
# try to go to signup page
response = self.client.get(
reverse(self.signup_view)
)
# get redirected to splash page
self.assertRedirects(response, reverse('intake-home'))
def test_superuser_can_invite_people(self):
self.be_superuser()
self.client.fill_form(
reverse(self.send_invite_view),
email=self.example_user['email'],
organization=self.orgs[0].id,
groups=[self.groups[0].id],
should_get_notifications=True,
follow=True,
)
last_email = mail.outbox[-1]
self.assertEqual(self.example_user['email'], last_email.to[0])
self.assertIn(
"You've been invited to create an account on Clear My Record",
last_email.body)
def test_invited_person_can_signup(self):
self.be_superuser()
response = self.client.fill_form(
reverse(self.send_invite_view),
email=self.example_user['email'],
organization=self.orgs[0].id,
groups=[self.groups[0].id],
should_get_notifications=True,
follow=True,
)
# be anonymous
self.be_anonymous()
last_email = mail.outbox[-1]
# click on link
link = self.get_link_from_email(last_email)
response = self.client.get(link)
# should go to /accounts/signup/
self.assertRedirects(response, reverse(self.signup_view))
response = self.client.fill_form(response.url,
email=self.example_user['email'],
password1=self.example_user[
'password']
)
self.assertRedirects(response, reverse("user_accounts-profile"))
# make sure the user exists and that they are authenticated
users = User.objects.filter(email=self.example_user['email'])
self.assertEqual(len(users), 1)
self.assertTrue(users[0].is_authenticated)
self.assertIn(self.example_user['email'], get_user_display(users[0]))
self.assertIn(self.groups[0], users[0].groups.all())
def test_user_can_add_info_in_profile_view(self):
user = self.be_sfpubdef_user()
# find link to profile
response = self.client.get(reverse("user_accounts-profile"))
self.assertContains(response,
html_utils.escape(user.profile.name))
result = self.client.fill_form(
reverse("user_accounts-profile"),
name=self.example_user['name'],
follow=True
)
self.assertContains(result, self.example_user['name'])
users = User.objects.filter(profile__name=self.example_user['name'])
self.assertEqual(len(users), 1)
def test_failed_login_gets_reasonable_error_message(self):
self.be_anonymous()
user = User.objects.first()
expected_error_message = str(
"Sorry, that email and password do not work together")
response = self.client.fill_form(
reverse(self.login_view),
login=user.email,
password='incorrect'
)
# should be storing the login email for the reset page
session = response.wsgi_request.session
self.assertEqual(
session['failed_login_email'],
user.email)
form = response.context_data['form']
self.assertIn(expected_error_message,
form.errors['__all__'])
self.assertContains(
response,
reverse(self.reset_password_view)
)
def test_can_reset_password_from_login_page(self):
self.be_anonymous()
user = User.objects.first()
# forget password
wrong_password = self.client.fill_form(
reverse(self.login_view),
login=user.email,
password='forgot'
)
# find a link to reset password
self.assertContains(wrong_password,
reverse(self.reset_password_view))
# hit "reset password"
reset = self.client.get(
reverse(self.reset_password_view))
self.assertContains(reset, user.email)
# enter email to request password reset
self.client.fill_form(
reverse(self.reset_password_view),
email=user.email,
)
# get an email to reset password
reset_email = mail.outbox[-1]
self.assertEqual(
'Password Reset for Clear My Record',
reset_email.subject
)
# follow the link in the email
reset_link = self.get_link_from_email(reset_email)
reset_page = self.client.get(reset_link)
# make sure it shows who it thinks we are
self.assertContains(reset_page, user.email)
# enter a new password
csrf = self.client.get_csrf_token(reset_page)
new_password = "FR35H H0T s3cr3tZ!1"
reset_done = self.client.fill_form(
reset_link, csrf_token=csrf,
password=new_password)
# we should be redirected to the profile
self.assertRedirects(reset_done,
reverse("user_accounts-profile"))
# make sure we are logged in
self.assertLoggedInAs(user)
# make sure we can login with the new password
self.client.logout()
self.client.login(
email=user.email,
password=new_password
)
self.assertLoggedInAs(user)
def test_can_reset_password_while_logged_in(self):
user = self.be_sfpubdef_user()
# go to profile
profile = self.client.get(
reverse("user_accounts-profile"))
# make sure there's a link to change password
self.assertContains(profile,
reverse(self.change_password_view))
change_password = self.client.get(
reverse(self.change_password_view))
# make sure the change password page
# knows who we are
self.assertContains(change_password,
user.email)
# set a new password
new_password = "FR35H H0T s3cr3tZ!1"
reset_done = self.client.fill_form(
reverse(self.change_password_view),
password=new_password
)
self.assertRedirects(reset_done,
reverse("user_accounts-profile"))
# make sure we are logged in
self.assertLoggedInAs(user)
# make sure we can login with the new password
self.client.logout()
self.client.login(
email=user.email,
password=new_password
)
self.assertLoggedInAs(user)
def test_only_staff_users_cant_invite_people(self):
self.be_apubdef_user()
response = self.client.get(reverse(self.send_invite_view))
self.assertEqual(response.status_code, 302)
self.be_monitor_user()
response = self.client.get(reverse(self.send_invite_view))
self.assertEqual(response.status_code, 302)
self.be_cfa_user()
response = self.client.get(reverse(self.send_invite_view))
self.assertEqual(response.status_code, 200)
| 37.7897 | 77 | 0.60795 |
a0aa3c0e618f9a4370a44e048b34eef2cf053c66 | 7,256 | py | Python | examples/pytorch/sign/sign.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | 9,516 | 2018-12-08T22:11:31.000Z | 2022-03-31T13:04:33.000Z | examples/pytorch/sign/sign.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | 2,494 | 2018-12-08T22:43:00.000Z | 2022-03-31T21:16:27.000Z | examples/pytorch/sign/sign.py | ketyi/dgl | a1b859c29b63a673c148d13231a49504740e0e01 | [
"Apache-2.0"
] | 2,529 | 2018-12-08T22:56:14.000Z | 2022-03-31T13:07:41.000Z | import argparse
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
import dgl.function as fn
from dataset import load_dataset
class FeedForwardNet(nn.Module):
def __init__(self, in_feats, hidden, out_feats, n_layers, dropout):
super(FeedForwardNet, self).__init__()
self.layers = nn.ModuleList()
self.n_layers = n_layers
if n_layers == 1:
self.layers.append(nn.Linear(in_feats, out_feats))
else:
self.layers.append(nn.Linear(in_feats, hidden))
for i in range(n_layers - 2):
self.layers.append(nn.Linear(hidden, hidden))
self.layers.append(nn.Linear(hidden, out_feats))
if self.n_layers > 1:
self.prelu = nn.PReLU()
self.dropout = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
gain = nn.init.calculate_gain("relu")
for layer in self.layers:
nn.init.xavier_uniform_(layer.weight, gain=gain)
nn.init.zeros_(layer.bias)
def forward(self, x):
for layer_id, layer in enumerate(self.layers):
x = layer(x)
if layer_id < self.n_layers - 1:
x = self.dropout(self.prelu(x))
return x
class Model(nn.Module):
def __init__(self, in_feats, hidden, out_feats, R, n_layers, dropout):
super(Model, self).__init__()
self.dropout = nn.Dropout(dropout)
self.prelu = nn.PReLU()
self.inception_ffs = nn.ModuleList()
for hop in range(R + 1):
self.inception_ffs.append(
FeedForwardNet(in_feats, hidden, hidden, n_layers, dropout))
# self.linear = nn.Linear(hidden * (R + 1), out_feats)
self.project = FeedForwardNet((R + 1) * hidden, hidden, out_feats,
n_layers, dropout)
def forward(self, feats):
hidden = []
for feat, ff in zip(feats, self.inception_ffs):
hidden.append(ff(feat))
out = self.project(self.dropout(self.prelu(torch.cat(hidden, dim=-1))))
return out
def calc_weight(g):
"""
Compute row_normalized(D^(-1/2)AD^(-1/2))
"""
with g.local_scope():
# compute D^(-0.5)*D(-1/2), assuming A is Identity
g.ndata["in_deg"] = g.in_degrees().float().pow(-0.5)
g.ndata["out_deg"] = g.out_degrees().float().pow(-0.5)
g.apply_edges(fn.u_mul_v("out_deg", "in_deg", "weight"))
# row-normalize weight
g.update_all(fn.copy_e("weight", "msg"), fn.sum("msg", "norm"))
g.apply_edges(fn.e_div_v("weight", "norm", "weight"))
return g.edata["weight"]
def preprocess(g, features, args):
"""
Pre-compute the average of n-th hop neighbors
"""
with torch.no_grad():
g.edata["weight"] = calc_weight(g)
g.ndata["feat_0"] = features
for hop in range(1, args.R + 1):
g.update_all(fn.u_mul_e(f"feat_{hop-1}", "weight", "msg"),
fn.sum("msg", f"feat_{hop}"))
res = []
for hop in range(args.R + 1):
res.append(g.ndata.pop(f"feat_{hop}"))
return res
def prepare_data(device, args):
data = load_dataset(args.dataset)
g, n_classes, train_nid, val_nid, test_nid = data
g = g.to(device)
in_feats = g.ndata['feat'].shape[1]
feats = preprocess(g, g.ndata['feat'], args)
labels = g.ndata['label']
# move to device
train_nid = train_nid.to(device)
val_nid = val_nid.to(device)
test_nid = test_nid.to(device)
train_feats = [x[train_nid] for x in feats]
train_labels = labels[train_nid]
return feats, labels, train_feats, train_labels, in_feats, \
n_classes, train_nid, val_nid, test_nid
def evaluate(epoch, args, model, feats, labels, train, val, test):
with torch.no_grad():
batch_size = args.eval_batch_size
if batch_size <= 0:
pred = model(feats)
else:
pred = []
num_nodes = labels.shape[0]
n_batch = (num_nodes + batch_size - 1) // batch_size
for i in range(n_batch):
batch_start = i * batch_size
batch_end = min((i + 1) * batch_size, num_nodes)
batch_feats = [feat[batch_start: batch_end] for feat in feats]
pred.append(model(batch_feats))
pred = torch.cat(pred)
pred = torch.argmax(pred, dim=1)
correct = (pred == labels).float()
train_acc = correct[train].sum() / len(train)
val_acc = correct[val].sum() / len(val)
test_acc = correct[test].sum() / len(test)
return train_acc, val_acc, test_acc
def main(args):
if args.gpu < 0:
device = "cpu"
else:
device = "cuda:{}".format(args.gpu)
data = prepare_data(device, args)
feats, labels, train_feats, train_labels, in_size, num_classes, \
train_nid, val_nid, test_nid = data
model = Model(in_size, args.num_hidden, num_classes, args.R, args.ff_layer,
args.dropout)
model = model.to(device)
loss_fcn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr,
weight_decay=args.weight_decay)
best_epoch = 0
best_val = 0
best_test = 0
for epoch in range(1, args.num_epochs + 1):
start = time.time()
model.train()
loss = loss_fcn(model(train_feats), train_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % args.eval_every == 0:
model.eval()
acc = evaluate(epoch, args, model, feats, labels,
train_nid, val_nid, test_nid)
end = time.time()
log = "Epoch {}, Times(s): {:.4f}".format(epoch, end - start)
log += ", Accuracy: Train {:.4f}, Val {:.4f}, Test {:.4f}" \
.format(*acc)
print(log)
if acc[1] > best_val:
best_val = acc[1]
best_epoch = epoch
best_test = acc[2]
print("Best Epoch {}, Val {:.4f}, Test {:.4f}".format(
best_epoch, best_val, best_test))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="SIGN")
parser.add_argument("--num-epochs", type=int, default=1000)
parser.add_argument("--num-hidden", type=int, default=256)
parser.add_argument("--R", type=int, default=3,
help="number of hops")
parser.add_argument("--lr", type=float, default=0.003)
parser.add_argument("--dataset", type=str, default="amazon")
parser.add_argument("--dropout", type=float, default=0.5)
parser.add_argument("--gpu", type=int, default=0)
parser.add_argument("--weight-decay", type=float, default=0)
parser.add_argument("--eval-every", type=int, default=50)
parser.add_argument("--eval-batch-size", type=int, default=250000,
help="evaluation batch size, -1 for full batch")
parser.add_argument("--ff-layer", type=int, default=2,
help="number of feed-forward layers")
args = parser.parse_args()
print(args)
main(args)
| 35.743842 | 79 | 0.583517 |
5333e535975537eb844c8754d8768b494d04ad11 | 12,300 | py | Python | ssd300_evaluate.py | konny0311/ssd_precision_comparison | f564995fddb440c5fc732bd448c0ceae78ba17f7 | [
"Apache-2.0"
] | null | null | null | ssd300_evaluate.py | konny0311/ssd_precision_comparison | f564995fddb440c5fc732bd448c0ceae78ba17f7 | [
"Apache-2.0"
] | 13 | 2020-01-28T22:49:28.000Z | 2022-02-10T00:20:24.000Z | ssd300_evaluate.py | konny0311/ssd_precision_comparison | f564995fddb440c5fc732bd448c0ceae78ba17f7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # SSD Evaluation Tutorial
#
# This is a brief tutorial that explains how compute the average precisions for any trained SSD model using the `Evaluator` class. The `Evaluator` computes the average precisions according to the Pascal VOC pre-2010 or post-2010 detection evaluation algorithms. You can find details about these computation methods [here](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/devkit_doc.html#sec:ap).
#
# As an example we'll evaluate an SSD300 on the Pascal VOC 2007 `test` dataset, but note that the `Evaluator` works for any SSD model and any dataset that is compatible with the `DataGenerator`. If you would like to run the evaluation on a different model and/or dataset, the procedure is analogous to what is shown below, you just have to build the appropriate model and load the relevant dataset.
#
# Note: I that in case you would like to evaluate a model on MS COCO, I would recommend to follow the [MS COCO evaluation notebook](https://github.com/pierluigiferrari/ssd_keras/blob/master/ssd300_evaluation_COCO.ipynb) instead, because it can produce the results format required by the MS COCO evaluation server and uses the official MS COCO evaluation code, which computes the mAP slightly differently from the Pascal VOC method.
#
# Note: In case you want to evaluate any of the provided trained models, make sure that you build the respective model with the correct set of scaling factors to reproduce the official results. The models that were trained on MS COCO and fine-tuned on Pascal VOC require the MS COCO scaling factors, not the Pascal VOC scaling factors.
from keras import backend as K
from keras.models import load_model
from keras.optimizers import Adam
from scipy.misc import imread
import numpy as np
from models.keras_ssd300 import ssd_300
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from keras_layers.keras_layer_L2Normalization import L2Normalization
from data_generator.object_detection_2d_data_generator import DataGenerator
from eval_utils.average_precision_evaluator import Evaluator
# Set a few configuration parameters.
img_height = 300
img_width = 300
model_mode = 'training' #set 'training' if your model was created by your own training
classes = ['background',
'bus', 'car', 'cat',
'horse', 'motorbike']
"""
classes = ['background',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor']
include_classes = [0,6,7,8,13,14]
"""
n_classes = 5
# ## 1. Load a trained SSD
#
# Either load a trained model or build a model and load trained weights into it. Since the HDF5 files I'm providing contain only the weights for the various SSD versions, not the complete models, you'll have to go with the latter option when using this implementation for the first time. You can then of course save the model and next time load the full model directly, without having to build it.
#
# You can find the download links to all the trained model weights in the README.
# ### 1.1. Build the model and load trained weights into it
# In[4]:
# 1: Build the Keras model
K.clear_session() # Clear previous models from memory.
model = ssd_300(image_size=(img_height, img_width, 3),
n_classes=n_classes,
mode=model_mode,
l2_regularization=0.0005,
scales=[0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05], # The scales for MS COCO [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]
aspect_ratios_per_layer=[[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=[8, 16, 32, 64, 100, 300],
offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
normalize_coords=True,
subtract_mean=[123, 117, 104],
swap_channels=[2, 1, 0],
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400)
# 2: Load the trained weights into the model.
# ### 1.2. Load a trained model
#
# We set `model_mode` to 'inference' above, so the evaluator expects that you load a model that was built in 'inference' mode. If you're loading a model that was built in 'training' mode, change the `model_mode` parameter accordingly.
# TODO: Set the path to the `.h5` file of the model to be loaded.
model_path = 'checkpoints_classlimitted/ssd300_pascal_07+12_1000_epoch-81_loss-5.6091_val_loss-6.1711.h5'
# We need to create an SSDLoss object in order to pass that to the model loader.
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
K.clear_session() # Clear previous models from memory.
model = load_model(model_path, custom_objects={'AnchorBoxes': AnchorBoxes,
'L2Normalization': L2Normalization,
'DecodeDetections': DecodeDetections,
'compute_loss': ssd_loss.compute_loss})
# ## 2. Create a data generator for the evaluation dataset
#
# Instantiate a `DataGenerator` that will serve the evaluation dataset during the prediction phase.
# In[5]:
dataset = DataGenerator()
# TODO: Set the paths to the dataset here.
Pascal_VOC_dataset_images_dir = '/home/ubuntu/data/VOCdevkit/VOC2007/JPEGImages/'
Pascal_VOC_dataset_annotations_dir = '/home/ubuntu/data/VOCdevkit/VOC2007/Annotations/'
Pascal_VOC_dataset_image_set_filename = '/home/ubuntu/data/VOCdevkit/VOC2007/ImageSets/Main/test.txt'
# The XML parser needs to now what object class names to look for and in which order to map them to integers.
dataset.parse_xml(images_dirs=[Pascal_VOC_dataset_images_dir],
image_set_filenames=[Pascal_VOC_dataset_image_set_filename],
annotations_dirs=[Pascal_VOC_dataset_annotations_dir],
classes=classes,
include_classes='all',
exclude_truncated=False,
exclude_difficult=False,
ret=False)
# ## 3. Run the evaluation
#
# Now that we have instantiated a model and a data generator to serve the dataset, we can set up the evaluator and run the evaluation.
#
# The evaluator is quite flexible: It can compute the average precisions according to the Pascal VOC pre-2010 algorithm, which samples 11 equidistant points of the precision-recall curves, or according to the Pascal VOC post-2010 algorithm, which integrates numerically over the entire precision-recall curves instead of sampling a few individual points. You could also change the number of sampled recall points or the required IoU overlap for a prediction to be considered a true positive, among other things. Check out the `Evaluator`'s documentation for details on all the arguments.
#
# In its default settings, the evaluator's algorithm is identical to the official Pascal VOC pre-2010 Matlab detection evaluation algorithm, so you don't really need to tweak anything unless you want to.
#
# The evaluator roughly performs the following steps: It runs predictions over the entire given dataset, then it matches these predictions to the ground truth boxes, then it computes the precision-recall curves for each class, then it samples 11 equidistant points from these precision-recall curves to compute the average precision for each class, and finally it computes the mean average precision over all classes.
# In[6]:
evaluator = Evaluator(model=model,
n_classes=n_classes,
data_generator=dataset,
model_mode=model_mode)
results = evaluator(img_height=img_height,
img_width=img_width,
batch_size=4, #少ない枚数のクラスがあるので小さくする
data_generator_mode='resize',
round_confidences=False,
matching_iou_threshold=0.5,
border_pixels='include',
sorting_algorithm='quicksort',
average_precision_mode='sample',
num_recall_points=11,
ignore_neutral_boxes=True,
return_precisions=True,
return_recalls=True,
return_average_precisions=True,
verbose=True)
mean_average_precision, average_precisions, precisions, recalls = results
# ## 4. Visualize the results
#
# Let's take a look:
# In[10]:
for i in range(1, len(average_precisions)):
print("{:<14}{:<6}{}".format(classes[i], 'AP', round(average_precisions[i], 3)))
print()
print("{:<14}{:<6}{}".format('','mAP', round(mean_average_precision, 3)))
# In[11]:
m = max((n_classes + 1) // 2, 2)
n = 2
# ## 5. Advanced use
#
# `Evaluator` objects maintain copies of all relevant intermediate results like predictions, precisions and recalls, etc., so in case you want to experiment with different parameters, e.g. different IoU overlaps, there is no need to compute the predictions all over again every time you make a change to a parameter. Instead, you can only update the computation from the point that is affected onwards.
#
# The evaluator's `__call__()` method is just a convenience wrapper that executes its other methods in the correct order. You could just call any of these other methods individually as shown below (but you have to make sure to call them in the correct order).
#
# Note that the example below uses the same evaluator object as above. Say you wanted to compute the Pascal VOC post-2010 'integrate' version of the average precisions instead of the pre-2010 version computed above. The evaluator object still has an internal copy of all the predictions, and since computing the predictions makes up the vast majority of the overall computation time and since the predictions aren't affected by changing the average precision computation mode, we skip computing the predictions again and instead only compute the steps that come after the prediction phase of the evaluation. We could even skip the matching part, since it isn't affected by changing the average precision mode either. In fact, we would only have to call `compute_average_precisions()` `compute_mean_average_precision()` again, but for the sake of illustration we'll re-do the other computations, too.
# In[12]:
evaluator.get_num_gt_per_class(ignore_neutral_boxes=True,
verbose=False,
ret=False)
evaluator.match_predictions(ignore_neutral_boxes=True,
matching_iou_threshold=0.5,
border_pixels='include',
sorting_algorithm='quicksort',
verbose=True,
ret=False)
precisions, recalls = evaluator.compute_precision_recall(verbose=True, ret=True)
average_precisions = evaluator.compute_average_precisions(mode='integrate',
num_recall_points=11,
verbose=True,
ret=True)
mean_average_precision = evaluator.compute_mean_average_precision(ret=True)
# In[17]:
results = []
for i in range(1, len(average_precisions)):
print("{:<14}{:<6}{}".format(classes[i], 'AP', round(average_precisions[i], 3)))
results.append(round(average_precisions[i], 3))
print()
print("{:<14}{:<6}{}".format('','mAP', round(mean_average_precision, 3)))
print(classes[1:])
print(results)
| 51.898734 | 899 | 0.678537 |
d9c18585097b3a28fe8fabb1c7a2dac6eeab0f2f | 822 | py | Python | bert-use/bert-use/setup.py | zhc0757/bert-api | 189cbae4fbf080b443122a9c103c276759796174 | [
"Apache-2.0"
] | 1 | 2019-12-23T05:09:51.000Z | 2019-12-23T05:09:51.000Z | bert-use/bert-use/setup.py | zhc0757/bert-api | 189cbae4fbf080b443122a9c103c276759796174 | [
"Apache-2.0"
] | null | null | null | bert-use/bert-use/setup.py | zhc0757/bert-api | 189cbae4fbf080b443122a9c103c276759796174 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
#reference from
# http://xiaoh.me/2015/12/11/python-egg/
# https://blog.csdn.net/m0_38088359/article/details/83656872
def setupMain():
setup(
name = "bert-api",
version = "0.0.1",
keywords = ("pip", "bert","api",'wrapper'),
description = "a wrapper of bert-tensorflow",
long_description = "a wrapper of bert-tensorflow of Google for python",
license = "Apache Licene 2.0",
url = "https://github.com/zhc0757/bert-api",
author = "zhc",
author_email = "ak4777@live.cn",
packages = find_packages(),
include_package_data = True,
platforms = "any",
install_requires = ['bert-tensorflow>=1.0.1']
)
if __name__=='__main__':
setupMain()
| 30.444444 | 80 | 0.590024 |
8cc66711b7cc3e6b109ebf6d959bf5c17e2e67a4 | 329 | py | Python | hashicorp_vault_client/hashicorp_vault_client/api/__init__.py | drewmullen/HAC | fb185804fd244366f8f8d01df22835b3d96e7512 | [
"Apache-2.0"
] | null | null | null | hashicorp_vault_client/hashicorp_vault_client/api/__init__.py | drewmullen/HAC | fb185804fd244366f8f8d01df22835b3d96e7512 | [
"Apache-2.0"
] | 2 | 2019-09-30T20:56:41.000Z | 2019-10-02T00:22:07.000Z | hashicorp_vault_client/hashicorp_vault_client/api/__init__.py | drewmullen/HAC | fb185804fd244366f8f8d01df22835b3d96e7512 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from hashicorp_vault_client.api.auth_api import AuthApi
from hashicorp_vault_client.api.identity_api import IdentityApi
from hashicorp_vault_client.api.secrets_api import SecretsApi
from hashicorp_vault_client.api.system_api import SystemApi
| 32.9 | 63 | 0.87234 |
8d83494c531865036c83f779acf1815a6a04a3f1 | 72,788 | py | Python | glance/tests/integration/legacy_functional/test_v1_api.py | kfwang/Glance-OVA-OVF | e983c3c79987e59d644917646edc6b0b7fd219d0 | [
"Apache-2.0"
] | null | null | null | glance/tests/integration/legacy_functional/test_v1_api.py | kfwang/Glance-OVA-OVF | e983c3c79987e59d644917646edc6b0b7fd219d0 | [
"Apache-2.0"
] | null | null | null | glance/tests/integration/legacy_functional/test_v1_api.py | kfwang/Glance-OVA-OVF | e983c3c79987e59d644917646edc6b0b7fd219d0 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import hashlib
import os
import tempfile
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import units
import testtools
from glance.tests.integration.legacy_functional import base
from glance.tests.utils import minimal_headers
FIVE_KB = 5 * units.Ki
FIVE_GB = 5 * units.Gi
class TestApi(base.ApiTest):
def test_get_head_simple_post(self):
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"images": []}', content)
# 1. GET /images/detail
# Verify no public images
path = "/v1/images/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"images": []}', content)
# 2. POST /images with public image named Image1
# attribute and no custom properties. Verify a 200 OK is returned
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
self.assertEqual(hashlib.md5(image_data).hexdigest(),
data['image']['checksum'])
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("Image1", data['image']['name'])
self.assertTrue(data['image']['is_public'])
# 3. HEAD image
# Verify image found now
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Image1", response['x-image-meta-name'])
# 4. GET image
# Verify all information on image we just added is correct
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_image_headers = {
'x-image-meta-id': image_id,
'x-image-meta-name': 'Image1',
'x-image-meta-is_public': 'True',
'x-image-meta-status': 'active',
'x-image-meta-disk_format': 'raw',
'x-image-meta-container_format': 'ovf',
'x-image-meta-size': str(FIVE_KB)}
expected_std_headers = {
'content-length': str(FIVE_KB),
'content-type': 'application/octet-stream'}
for expected_key, expected_value in expected_image_headers.items():
self.assertEqual(expected_value, response[expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
for expected_key, expected_value in expected_std_headers.items():
self.assertEqual(expected_value, response[expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
response[expected_key]))
self.assertEqual("*" * FIVE_KB, content)
self.assertEqual(hashlib.md5("*" * FIVE_KB).hexdigest(),
hashlib.md5(content).hexdigest())
# 5. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_result = {"images": [
{"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"name": "Image1",
"checksum": "c2e5db72bd7fd153f53ede5da5a06de3",
"size": 5120}]}
self.assertEqual(expected_result, jsonutils.loads(content))
# 6. GET /images/detail
# Verify image and all its metadata
path = "/v1/images/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_image = {
"status": "active",
"name": "Image1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"is_public": True,
"deleted_at": None,
"properties": {},
"size": 5120}
image = jsonutils.loads(content)
for expected_key, expected_value in expected_image.items():
self.assertEqual(expected_value, image['images'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
image['images'][0][expected_key]))
# 7. PUT image with custom properties of "distro" and "arch"
# Verify 200 returned
headers = {'X-Image-Meta-Property-Distro': 'Ubuntu',
'X-Image-Meta-Property-Arch': 'x86_64'}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual("x86_64", data['image']['properties']['arch'])
self.assertEqual("Ubuntu", data['image']['properties']['distro'])
# 8. GET /images/detail
# Verify image and all its metadata
path = "/v1/images/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
expected_image = {
"status": "active",
"name": "Image1",
"deleted": False,
"container_format": "ovf",
"disk_format": "raw",
"id": image_id,
"is_public": True,
"deleted_at": None,
"properties": {'distro': 'Ubuntu', 'arch': 'x86_64'},
"size": 5120}
image = jsonutils.loads(content)
for expected_key, expected_value in expected_image.items():
self.assertEqual(expected_value, image['images'][0][expected_key],
"For key '%s' expected header value '%s'. "
"Got '%s'" % (expected_key,
expected_value,
image['images'][0][expected_key]))
# 9. PUT image and remove a previously existing property.
headers = {'X-Image-Meta-Property-Arch': 'x86_64'}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
path = "/v1/images/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['images'][0]
self.assertEqual(1, len(data['properties']))
self.assertEqual("x86_64", data['properties']['arch'])
# 10. PUT image and add a previously deleted property.
headers = {'X-Image-Meta-Property-Distro': 'Ubuntu',
'X-Image-Meta-Property-Arch': 'x86_64'}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT', headers=headers)
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
path = "/v1/images/detail"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['images'][0]
self.assertEqual(2, len(data['properties']))
self.assertEqual("x86_64", data['properties']['arch'])
self.assertEqual("Ubuntu", data['properties']['distro'])
self.assertNotEqual(data['created_at'], data['updated_at'])
# DELETE image
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'DELETE')
self.assertEqual(200, response.status)
def test_queued_process_flow(self):
"""
We test the process flow where a user registers an image
with Glance but does not immediately upload an image file.
Later, the user uploads an image file using a PUT operation.
We track the changing of image status throughout this process.
0. GET /images
- Verify no public images
1. POST /images with public image named Image1 with no location
attribute and no image data.
- Verify 201 returned
2. GET /images
- Verify one public image
3. HEAD image
- Verify image now in queued status
4. PUT image with image data
- Verify 200 returned
5. HEAD images
- Verify image now in active status
6. GET /images
- Verify one public image
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"images": []}', content)
# 1. POST /images with public image named Image1
# with no location or image data
headers = minimal_headers('Image1')
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
self.assertIsNone(data['image']['checksum'])
self.assertEqual(0, data['image']['size'])
self.assertEqual('ovf', data['image']['container_format'])
self.assertEqual('raw', data['image']['disk_format'])
self.assertEqual("Image1", data['image']['name'])
self.assertTrue(data['image']['is_public'])
image_id = data['image']['id']
# 2. GET /images
# Verify 1 public image
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(image_id, data['images'][0]['id'])
self.assertIsNone(data['images'][0]['checksum'])
self.assertEqual(0, data['images'][0]['size'])
self.assertEqual('ovf', data['images'][0]['container_format'])
self.assertEqual('raw', data['images'][0]['disk_format'])
self.assertEqual("Image1", data['images'][0]['name'])
# 3. HEAD /images
# Verify status is in queued
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Image1", response['x-image-meta-name'])
self.assertEqual("queued", response['x-image-meta-status'])
self.assertEqual('0', response['x-image-meta-size'])
self.assertEqual(image_id, response['x-image-meta-id'])
# 4. PUT image with image data, verify 200 returned
image_data = "*" * FIVE_KB
headers = {'Content-Type': 'application/octet-stream'}
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'PUT', headers=headers,
body=image_data)
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(hashlib.md5(image_data).hexdigest(),
data['image']['checksum'])
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual("Image1", data['image']['name'])
self.assertTrue(data['image']['is_public'])
# 5. HEAD /images
# Verify status is in active
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual("Image1", response['x-image-meta-name'])
self.assertEqual("active", response['x-image-meta-status'])
# 6. GET /images
# Verify 1 public image still...
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(hashlib.md5(image_data).hexdigest(),
data['images'][0]['checksum'])
self.assertEqual(image_id, data['images'][0]['id'])
self.assertEqual(FIVE_KB, data['images'][0]['size'])
self.assertEqual('ovf', data['images'][0]['container_format'])
self.assertEqual('raw', data['images'][0]['disk_format'])
self.assertEqual("Image1", data['images'][0]['name'])
# DELETE image
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'DELETE')
self.assertEqual(200, response.status)
def test_v1_not_enabled(self):
self.config(enable_v1_api=False)
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(300, response.status)
def test_v1_enabled(self):
self.config(enable_v1_api=True)
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
def test_zero_initial_size(self):
"""
A test to ensure that an image with size explicitly set to zero
has status that immediately transitions to active.
"""
# 1. POST /images with public image named Image1
# attribute and a size of zero.
# Verify a 201 OK is returned
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Size': '0',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-disk_format': 'raw',
'X-image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
# 2. HEAD image-location
# Verify image size is zero and the status is active
path = response.get('location')
response, content = self.http.request(path, 'HEAD')
self.assertEqual(200, response.status)
self.assertEqual('0', response['x-image-meta-size'])
self.assertEqual('active', response['x-image-meta-status'])
# 3. GET image-location
# Verify image content is empty
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual(0, len(content))
def test_traceback_not_consumed(self):
"""
A test that errors coming from the POST API do not
get consumed and print the actual error message, and
not something like <traceback object at 0x1918d40>
:see https://bugs.launchpad.net/glance/+bug/755912
"""
# POST /images with binary data, but not setting
# Content-Type to application/octet-stream, verify a
# 400 returned and that the error is readable.
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write("XXX")
test_data_file.flush()
path = "/v1/images"
headers = minimal_headers('Image1')
headers['Content-Type'] = 'not octet-stream'
response, content = self.http.request(path, 'POST',
body=test_data_file.name,
headers=headers)
self.assertEqual(400, response.status)
expected = "Content-Type must be application/octet-stream"
self.assertIn(expected, content,
"Could not find '%s' in '%s'" % (expected, content))
def test_filtered_images(self):
"""
Set up four test images and ensure each query param filter works
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"images": []}', content)
image_ids = []
# 1. POST /images with three public images, and one private image
# with various attributes
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ovf',
'X-Image-Meta-Disk-Format': 'vdi',
'X-Image-Meta-Size': '19',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Protected': 'True',
'X-Image-Meta-Property-pants': 'are on'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
self.assertEqual("are on", data['image']['properties']['pants'])
self.assertTrue(data['image']['is_public'])
image_ids.append(data['image']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'My Image!',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ovf',
'X-Image-Meta-Disk-Format': 'vhd',
'X-Image-Meta-Size': '20',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Protected': 'False',
'X-Image-Meta-Property-pants': 'are on'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
self.assertEqual("are on", data['image']['properties']['pants'])
self.assertTrue(data['image']['is_public'])
image_ids.append(data['image']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'My Image!',
'X-Image-Meta-Status': 'saving',
'X-Image-Meta-Container-Format': 'ami',
'X-Image-Meta-Disk-Format': 'ami',
'X-Image-Meta-Size': '21',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Protected': 'False',
'X-Image-Meta-Property-pants': 'are off'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
self.assertEqual("are off", data['image']['properties']['pants'])
self.assertTrue(data['image']['is_public'])
image_ids.append(data['image']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'My Private Image',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ami',
'X-Image-Meta-Disk-Format': 'ami',
'X-Image-Meta-Size': '22',
'X-Image-Meta-Is-Public': 'False',
'X-Image-Meta-Protected': 'False'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
self.assertFalse(data['image']['is_public'])
image_ids.append(data['image']['id'])
# 2. GET /images
# Verify three public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['images']))
# 3. GET /images with name filter
# Verify correct images returned with name
params = "name=My%20Image!"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['images']))
for image in data['images']:
self.assertEqual("My Image!", image['name'])
# 4. GET /images with status filter
# Verify correct images returned with status
params = "status=queued"
path = "/v1/images/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['images']))
for image in data['images']:
self.assertEqual("queued", image['status'])
params = "status=active"
path = "/v1/images/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(0, len(data['images']))
# 5. GET /images with container_format filter
# Verify correct images returned with container_format
params = "container_format=ovf"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['images']))
for image in data['images']:
self.assertEqual("ovf", image['container_format'])
# 6. GET /images with disk_format filter
# Verify correct images returned with disk_format
params = "disk_format=vdi"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(1, len(data['images']))
for image in data['images']:
self.assertEqual("vdi", image['disk_format'])
# 7. GET /images with size_max filter
# Verify correct images returned with size <= expected
params = "size_max=20"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['images']))
for image in data['images']:
self.assertTrue(image['size'] <= 20)
# 8. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "size_min=20"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['images']))
for image in data['images']:
self.assertTrue(image['size'] >= 20)
# 9. Get /images with is_public=None filter
# Verify correct images returned with property
# Bug lp:803656 Support is_public in filtering
params = "is_public=None"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(4, len(data['images']))
# 10. Get /images with is_public=False filter
# Verify correct images returned with property
# Bug lp:803656 Support is_public in filtering
params = "is_public=False"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(1, len(data['images']))
for image in data['images']:
self.assertEqual("My Private Image", image['name'])
# 11. Get /images with is_public=True filter
# Verify correct images returned with property
# Bug lp:803656 Support is_public in filtering
params = "is_public=True"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['images']))
for image in data['images']:
self.assertNotEqual(image['name'], "My Private Image")
# 12. Get /images with protected=False filter
# Verify correct images returned with property
params = "protected=False"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['images']))
for image in data['images']:
self.assertNotEqual(image['name'], "Image1")
# 13. Get /images with protected=True filter
# Verify correct images returned with property
params = "protected=True"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(1, len(data['images']))
for image in data['images']:
self.assertEqual("Image1", image['name'])
# 14. GET /images with property filter
# Verify correct images returned with property
params = "property-pants=are%20on"
path = "/v1/images/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['images']))
for image in data['images']:
self.assertEqual("are on", image['properties']['pants'])
# 15. GET /images with property filter and name filter
# Verify correct images returned with property and name
# Make sure you quote the url when using more than one param!
params = "name=My%20Image!&property-pants=are%20on"
path = "/v1/images/detail?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(1, len(data['images']))
for image in data['images']:
self.assertEqual("are on", image['properties']['pants'])
self.assertEqual("My Image!", image['name'])
# 16. GET /images with past changes-since filter
yesterday = timeutils.isotime(timeutils.utcnow() -
datetime.timedelta(1))
params = "changes-since=%s" % yesterday
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['images']))
# one timezone west of Greenwich equates to an hour ago
# taking care to pre-urlencode '+' as '%2B', otherwise the timezone
# '+' is wrongly decoded as a space
# TODO(eglynn): investigate '+' --> <SPACE> decoding, an artifact
# of WSGI/webob dispatch?
now = timeutils.utcnow()
hour_ago = now.strftime('%Y-%m-%dT%H:%M:%S%%2B01:00')
params = "changes-since=%s" % hour_ago
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['images']))
# 17. GET /images with future changes-since filter
tomorrow = timeutils.isotime(timeutils.utcnow() +
datetime.timedelta(1))
params = "changes-since=%s" % tomorrow
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(0, len(data['images']))
# one timezone east of Greenwich equates to an hour from now
now = timeutils.utcnow()
hour_hence = now.strftime('%Y-%m-%dT%H:%M:%S-01:00')
params = "changes-since=%s" % hour_hence
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(0, len(data['images']))
# 18. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "size_min=-1"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(400, response.status)
self.assertIn("filter size_min got -1", content)
# 19. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "size_max=-1"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(400, response.status)
self.assertIn("filter size_max got -1", content)
# 20. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "min_ram=-1"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(400, response.status)
self.assertIn("Bad value passed to filter min_ram got -1", content)
# 21. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "protected=imalittleteapot"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(400, response.status)
self.assertIn("protected got imalittleteapot", content)
# 22. GET /images with size_min filter
# Verify correct images returned with size >= expected
params = "is_public=imalittleteapot"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(400, response.status)
self.assertIn("is_public got imalittleteapot", content)
def test_limited_images(self):
"""
Ensure marker and limit query params work
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"images": []}', content)
image_ids = []
# 1. POST /images with three public images with various attributes
headers = minimal_headers('Image1')
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
image_ids.append(jsonutils.loads(content)['image']['id'])
headers = minimal_headers('Image2')
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
image_ids.append(jsonutils.loads(content)['image']['id'])
headers = minimal_headers('Image3')
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
image_ids.append(jsonutils.loads(content)['image']['id'])
# 2. GET /images with all images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
images = jsonutils.loads(content)['images']
self.assertEqual(3, len(images))
# 3. GET /images with limit of 2
# Verify only two images were returned
params = "limit=2"
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['images']
self.assertEqual(2, len(data))
self.assertEqual(images[0]['id'], data[0]['id'])
self.assertEqual(images[1]['id'], data[1]['id'])
# 4. GET /images with marker
# Verify only two images were returned
params = "marker=%s" % images[0]['id']
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['images']
self.assertEqual(2, len(data))
self.assertEqual(images[1]['id'], data[0]['id'])
self.assertEqual(images[2]['id'], data[1]['id'])
# 5. GET /images with marker and limit
# Verify only one image was returned with the correct id
params = "limit=1&marker=%s" % images[1]['id']
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['images']
self.assertEqual(1, len(data))
self.assertEqual(images[2]['id'], data[0]['id'])
# 6. GET /images/detail with marker and limit
# Verify only one image was returned with the correct id
params = "limit=1&marker=%s" % images[1]['id']
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)['images']
self.assertEqual(1, len(data))
self.assertEqual(images[2]['id'], data[0]['id'])
# DELETE images
for image_id in image_ids:
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'DELETE')
self.assertEqual(200, response.status)
def test_ordered_images(self):
"""
Set up three test images and ensure each query param filter works
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"images": []}', content)
# 1. POST /images with three public images with various attributes
image_ids = []
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ovf',
'X-Image-Meta-Disk-Format': 'vdi',
'X-Image-Meta-Size': '19',
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
image_ids.append(jsonutils.loads(content)['image']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'ASDF',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'bare',
'X-Image-Meta-Disk-Format': 'iso',
'X-Image-Meta-Size': '2',
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
image_ids.append(jsonutils.loads(content)['image']['id'])
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'XYZ',
'X-Image-Meta-Status': 'saving',
'X-Image-Meta-Container-Format': 'ami',
'X-Image-Meta-Disk-Format': 'ami',
'X-Image-Meta-Size': '5',
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
image_ids.append(jsonutils.loads(content)['image']['id'])
# 2. GET /images with no query params
# Verify three public images sorted by created_at desc
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['images']))
self.assertEqual(image_ids[2], data['images'][0]['id'])
self.assertEqual(image_ids[1], data['images'][1]['id'])
self.assertEqual(image_ids[0], data['images'][2]['id'])
# 3. GET /images sorted by name asc
params = 'sort_key=name&sort_dir=asc'
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['images']))
self.assertEqual(image_ids[1], data['images'][0]['id'])
self.assertEqual(image_ids[0], data['images'][1]['id'])
self.assertEqual(image_ids[2], data['images'][2]['id'])
# 4. GET /images sorted by size desc
params = 'sort_key=size&sort_dir=desc'
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(3, len(data['images']))
self.assertEqual(image_ids[0], data['images'][0]['id'])
self.assertEqual(image_ids[2], data['images'][1]['id'])
self.assertEqual(image_ids[1], data['images'][2]['id'])
# 5. GET /images sorted by size desc with a marker
params = 'sort_key=size&sort_dir=desc&marker=%s' % image_ids[0]
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(2, len(data['images']))
self.assertEqual(image_ids[2], data['images'][0]['id'])
self.assertEqual(image_ids[1], data['images'][1]['id'])
# 6. GET /images sorted by name asc with a marker
params = 'sort_key=name&sort_dir=asc&marker=%s' % image_ids[2]
path = "/v1/images?%s" % (params)
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
data = jsonutils.loads(content)
self.assertEqual(0, len(data['images']))
# DELETE images
for image_id in image_ids:
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'DELETE')
self.assertEqual(200, response.status)
def test_duplicate_image_upload(self):
"""
Upload initial image, then attempt to upload duplicate image
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"images": []}', content)
# 1. POST /images with public image named Image1
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ovf',
'X-Image-Meta-Disk-Format': 'vdi',
'X-Image-Meta-Size': '19',
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
image = jsonutils.loads(content)['image']
# 2. POST /images with public image named Image1, and ID: 1
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1 Update',
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Container-Format': 'ovf',
'X-Image-Meta-Disk-Format': 'vdi',
'X-Image-Meta-Size': '19',
'X-Image-Meta-Id': image['id'],
'X-Image-Meta-Is-Public': 'True'}
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(409, response.status)
def test_delete_not_existing(self):
"""
We test the following:
0. GET /images/1
- Verify 404
1. DELETE /images/1
- Verify 404
"""
# 0. GET /images
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertEqual('{"images": []}', content)
# 1. DELETE /images/1
# Verify 404 returned
path = "/v1/images/1"
response, content = self.http.request(path, 'DELETE')
self.assertEqual(404, response.status)
def _do_test_post_image_content_bad_format(self, format):
"""
We test that missing container/disk format fails with 400 "Bad Request"
:see https://bugs.launchpad.net/glance/+bug/933702
"""
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
images = jsonutils.loads(content)['images']
self.assertEqual(0, len(images))
path = "/v1/images"
# POST /images without given format being specified
headers = minimal_headers('Image1')
headers['X-Image-Meta-' + format] = 'bad_value'
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write("XXX")
test_data_file.flush()
response, content = self.http.request(path, 'POST',
headers=headers,
body=test_data_file.name)
self.assertEqual(400, response.status)
type = format.replace('_format', '')
expected = "Invalid %s format 'bad_value' for image" % type
self.assertIn(expected, content,
"Could not find '%s' in '%s'" % (expected, content))
# make sure the image was not created
# Verify no public images
path = "/v1/images"
response, content = self.http.request(path, 'GET')
self.assertEqual(200, response.status)
images = jsonutils.loads(content)['images']
self.assertEqual(0, len(images))
def test_post_image_content_bad_container_format(self):
self._do_test_post_image_content_bad_format('container_format')
def test_post_image_content_bad_disk_format(self):
self._do_test_post_image_content_bad_format('disk_format')
def _do_test_put_image_content_missing_format(self, format):
"""
We test that missing container/disk format only fails with
400 "Bad Request" when the image content is PUT (i.e. not
on the original POST of a queued image).
:see https://bugs.launchpad.net/glance/+bug/937216
"""
# POST queued image
path = "/v1/images"
headers = {
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Is-Public': 'True',
}
response, content = self.http.request(path, 'POST', headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
self.addDetail('image_data', testtools.content.json_content(data))
# PUT image content images without given format being specified
path = "/v1/images/%s" % (image_id)
headers = minimal_headers('Image1')
del headers['X-Image-Meta-' + format]
with tempfile.NamedTemporaryFile() as test_data_file:
test_data_file.write("XXX")
test_data_file.flush()
response, content = self.http.request(path, 'PUT',
headers=headers,
body=test_data_file.name)
self.assertEqual(400, response.status)
type = format.replace('_format', '').capitalize()
expected = "%s format is not specified" % type
self.assertIn(expected, content,
"Could not find '%s' in '%s'" % (expected, content))
def test_put_image_content_bad_container_format(self):
self._do_test_put_image_content_missing_format('container_format')
def test_put_image_content_bad_disk_format(self):
self._do_test_put_image_content_missing_format('disk_format')
def _do_test_mismatched_attribute(self, attribute, value):
"""
Test mismatched attribute.
"""
image_data = "*" * FIVE_KB
headers = minimal_headers('Image1')
headers[attribute] = value
path = "/v1/images"
response, content = self.http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(400, response.status)
images_dir = os.path.join(self.test_dir, 'images')
image_count = len([name for name in os.listdir(images_dir)
if os.path.isfile(os.path.join(images_dir, name))])
self.assertEqual(0, image_count)
def test_mismatched_size(self):
"""
Test mismatched size.
"""
self._do_test_mismatched_attribute('x-image-meta-size',
str(FIVE_KB + 1))
def test_mismatched_checksum(self):
"""
Test mismatched checksum.
"""
self._do_test_mismatched_attribute('x-image-meta-checksum',
'foobar')
class TestApiWithFakeAuth(base.ApiTest):
def __init__(self, *args, **kwargs):
super(TestApiWithFakeAuth, self).__init__(*args, **kwargs)
self.api_flavor = 'fakeauth'
self.registry_flavor = 'fakeauth'
def test_ownership(self):
# Add an image with admin privileges and ensure the owner
# can be set to something other than what was used to authenticate
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
create_headers = {
'X-Image-Meta-Name': 'MyImage',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Owner': 'tenant2',
}
create_headers.update(auth_headers)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=create_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('tenant2', response['x-image-meta-owner'])
# Now add an image without admin privileges and ensure the owner
# cannot be set to something other than what was used to authenticate
auth_headers = {
'X-Auth-Token': 'user1:tenant1:role1',
}
create_headers.update(auth_headers)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=create_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
# We have to be admin to see the owner
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
create_headers.update(auth_headers)
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('tenant1', response['x-image-meta-owner'])
# Make sure the non-privileged user can't update their owner either
update_headers = {
'X-Image-Meta-Name': 'MyImage2',
'X-Image-Meta-Owner': 'tenant2',
'X-Auth-Token': 'user1:tenant1:role1',
}
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'PUT',
headers=update_headers)
self.assertEqual(200, response.status)
# We have to be admin to see the owner
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('tenant1', response['x-image-meta-owner'])
# An admin user should be able to update the owner
auth_headers = {
'X-Auth-Token': 'user1:tenant3:admin',
}
update_headers = {
'X-Image-Meta-Name': 'MyImage2',
'X-Image-Meta-Owner': 'tenant2',
}
update_headers.update(auth_headers)
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'PUT',
headers=update_headers)
self.assertEqual(200, response.status)
path = "/v1/images/%s" % (image_id)
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('tenant2', response['x-image-meta-owner'])
def test_image_visibility_to_different_users(self):
owners = ['admin', 'tenant1', 'tenant2', 'none']
visibilities = {'public': 'True', 'private': 'False'}
image_ids = {}
for owner in owners:
for visibility, is_public in visibilities.items():
name = '%s-%s' % (owner, visibility)
headers = {
'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': name,
'X-Image-Meta-Status': 'active',
'X-Image-Meta-Is-Public': is_public,
'X-Image-Meta-Owner': owner,
'X-Auth-Token': 'createuser:createtenant:admin',
}
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_ids[name] = data['image']['id']
def list_images(tenant, role='', is_public=None):
auth_token = 'user:%s:%s' % (tenant, role)
headers = {'X-Auth-Token': auth_token}
path = "/v1/images/detail"
if is_public is not None:
path += '?is_public=%s' % is_public
response, content = self.http.request(path, 'GET', headers=headers)
self.assertEqual(200, response.status)
return jsonutils.loads(content)['images']
# 1. Known user sees public and their own images
images = list_images('tenant1')
self.assertEqual(5, len(images))
for image in images:
self.assertTrue(image['is_public'] or image['owner'] == 'tenant1')
# 2. Unknown user sees only public images
images = list_images('none')
self.assertEqual(4, len(images))
for image in images:
self.assertTrue(image['is_public'])
# 3. Unknown admin sees only public images
images = list_images('none', role='admin')
self.assertEqual(4, len(images))
for image in images:
self.assertTrue(image['is_public'])
# 4. Unknown admin, is_public=none, shows all images
images = list_images('none', role='admin', is_public='none')
self.assertEqual(8, len(images))
# 5. Unknown admin, is_public=true, shows only public images
images = list_images('none', role='admin', is_public='true')
self.assertEqual(4, len(images))
for image in images:
self.assertTrue(image['is_public'])
# 6. Unknown admin, is_public=false, sees only private images
images = list_images('none', role='admin', is_public='false')
self.assertEqual(4, len(images))
for image in images:
self.assertFalse(image['is_public'])
# 7. Known admin sees public and their own images
images = list_images('admin', role='admin')
self.assertEqual(5, len(images))
for image in images:
self.assertTrue(image['is_public'] or image['owner'] == 'admin')
# 8. Known admin, is_public=none, shows all images
images = list_images('admin', role='admin', is_public='none')
self.assertEqual(8, len(images))
# 9. Known admin, is_public=true, sees all public and their images
images = list_images('admin', role='admin', is_public='true')
self.assertEqual(5, len(images))
for image in images:
self.assertTrue(image['is_public'] or image['owner'] == 'admin')
# 10. Known admin, is_public=false, sees all private images
images = list_images('admin', role='admin', is_public='false')
self.assertEqual(4, len(images))
for image in images:
self.assertFalse(image['is_public'])
def test_property_protections(self):
# Enable property protection
self.config(property_protection_file=self.property_file)
self.init()
CREATE_HEADERS = {
'X-Image-Meta-Name': 'MyImage',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Owner': 'tenant2',
}
# Create an image for role member with extra properties
# Raises 403 since user is not allowed to create 'foo'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:member',
}
custom_props = {
'x-image-meta-property-foo': 'bar'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(403, response.status)
# Create an image for role member without 'foo'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:member',
}
custom_props = {
'x-image-meta-property-x_owner_foo': 'o_s_bar',
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
# Returned image entity should have 'x_owner_foo'
data = jsonutils.loads(content)
self.assertEqual('o_s_bar',
data['image']['properties']['x_owner_foo'])
# Create an image for role spl_role with extra properties
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Image-Meta-Property-spl_create_prop': 'create_bar',
'X-Image-Meta-Property-spl_read_prop': 'read_bar',
'X-Image-Meta-Property-spl_update_prop': 'update_bar',
'X-Image-Meta-Property-spl_delete_prop': 'delete_bar'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
# Attempt to update two properties, one protected(spl_read_prop), the
# other not(spl_update_prop). Request should be forbidden.
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Image-Meta-Property-spl_read_prop': 'r',
'X-Image-Meta-Property-spl_update_prop': 'u',
'X-Glance-Registry-Purge-Props': 'False'
}
auth_headers.update(auth_headers)
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
# Attempt to create properties which are forbidden
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Image-Meta-Property-spl_new_prop': 'new',
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(auth_headers)
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
# Attempt to update, create and delete properties
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
custom_props = {
'X-Image-Meta-Property-spl_create_prop': 'create_bar',
'X-Image-Meta-Property-spl_read_prop': 'read_bar',
'X-Image-Meta-Property-spl_update_prop': 'u',
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(auth_headers)
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
# Returned image entity should reflect the changes
image = jsonutils.loads(content)
# 'spl_update_prop' has update permission for spl_role
# hence the value has changed
self.assertEqual('u', image['image']['properties']['spl_update_prop'])
# 'spl_delete_prop' has delete permission for spl_role
# hence the property has been deleted
self.assertNotIn('spl_delete_prop', image['image']['properties'])
# 'spl_create_prop' has create permission for spl_role
# hence the property has been created
self.assertEqual('create_bar',
image['image']['properties']['spl_create_prop'])
# Image Deletion should work
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'DELETE',
headers=auth_headers)
self.assertEqual(200, response.status)
# This image should be no longer be directly accessible
auth_headers = {
'X-Auth-Token': 'user1:tenant1:spl_role',
}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(404, response.status)
def test_property_protections_special_chars(self):
# Enable property protection
self.config(property_protection_file=self.property_file)
self.init()
CREATE_HEADERS = {
'X-Image-Meta-Name': 'MyImage',
'X-Image-Meta-disk_format': 'raw',
'X-Image-Meta-container_format': 'ovf',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Owner': 'tenant2',
'X-Image-Meta-Size': '0',
}
# Create an image
auth_headers = {
'X-Auth-Token': 'user1:tenant1:member',
}
auth_headers.update(CREATE_HEADERS)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
# Verify both admin and unknown role can create properties marked with
# '@'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Image-Meta-Property-x_all_permitted_admin': '1'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
image = jsonutils.loads(content)
self.assertEqual('1',
image['image']['properties']['x_all_permitted_admin'])
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Image-Meta-Property-x_all_permitted_joe_soap': '1',
'X-Glance-Registry-Purge-Props': 'False'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
image = jsonutils.loads(content)
self.assertEqual(
'1', image['image']['properties']['x_all_permitted_joe_soap'])
# Verify both admin and unknown role can read properties marked with
# '@'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('1', response.get(
'x-image-meta-property-x_all_permitted_admin'))
self.assertEqual('1', response.get(
'x-image-meta-property-x_all_permitted_joe_soap'))
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertEqual('1', response.get(
'x-image-meta-property-x_all_permitted_admin'))
self.assertEqual('1', response.get(
'x-image-meta-property-x_all_permitted_joe_soap'))
# Verify both admin and unknown role can update properties marked with
# '@'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Image-Meta-Property-x_all_permitted_admin': '2',
'X-Glance-Registry-Purge-Props': 'False'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
image = jsonutils.loads(content)
self.assertEqual('2',
image['image']['properties']['x_all_permitted_admin'])
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Image-Meta-Property-x_all_permitted_joe_soap': '2',
'X-Glance-Registry-Purge-Props': 'False'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
image = jsonutils.loads(content)
self.assertEqual(
'2', image['image']['properties']['x_all_permitted_joe_soap'])
# Verify both admin and unknown role can delete properties marked with
# '@'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Image-Meta-Property-x_all_permitted_joe_soap': '2',
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
image = jsonutils.loads(content)
self.assertNotIn('x_all_permitted_admin', image['image']['properties'])
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(200, response.status)
image = jsonutils.loads(content)
self.assertNotIn('x_all_permitted_joe_soap',
image['image']['properties'])
# Verify neither admin nor unknown role can create a property protected
# with '!'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Image-Meta-Property-x_none_permitted_admin': '1'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Image-Meta-Property-x_none_permitted_joe_soap': '1'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
# Verify neither admin nor unknown role can read properties marked with
# '!'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Image-Meta-Property-x_none_read': '1'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertRaises(KeyError,
response.get, 'X-Image-Meta-Property-x_none_read')
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'HEAD',
headers=auth_headers)
self.assertEqual(200, response.status)
self.assertRaises(KeyError,
response.get, 'X-Image-Meta-Property-x_none_read')
# Verify neither admin nor unknown role can update properties marked
# with '!'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Image-Meta-Property-x_none_update': '1'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Image-Meta-Property-x_none_update': '2'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Image-Meta-Property-x_none_update': '2'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
# Verify neither admin nor unknown role can delete properties marked
# with '!'
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Image-Meta-Property-x_none_delete': '1'
}
auth_headers.update(custom_props)
auth_headers.update(CREATE_HEADERS)
path = "/v1/images"
response, content = self.http.request(path, 'POST',
headers=auth_headers)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
image_id = data['image']['id']
auth_headers = {
'X-Auth-Token': 'user1:tenant1:admin',
}
custom_props = {
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
auth_headers = {
'X-Auth-Token': 'user1:tenant1:joe_soap',
}
custom_props = {
'X-Glance-Registry-Purge-Props': 'True'
}
auth_headers.update(custom_props)
path = "/v1/images/%s" % image_id
response, content = self.http.request(path, 'PUT',
headers=auth_headers)
self.assertEqual(403, response.status)
| 42.025404 | 79 | 0.570795 |
474bd394553eb2582d01ca3713dc2d830ad39fb0 | 2,216 | py | Python | labour/views/labour_admin_jobcategory_view.py | Siikakala/kompassi | 14cdcd966ab689d762cc885e28b6d15465c216f0 | [
"CC-BY-3.0"
] | null | null | null | labour/views/labour_admin_jobcategory_view.py | Siikakala/kompassi | 14cdcd966ab689d762cc885e28b6d15465c216f0 | [
"CC-BY-3.0"
] | null | null | null | labour/views/labour_admin_jobcategory_view.py | Siikakala/kompassi | 14cdcd966ab689d762cc885e28b6d15465c216f0 | [
"CC-BY-3.0"
] | null | null | null | # encoding: utf-8
from django.contrib import messages
from django.shortcuts import get_object_or_404, render, redirect
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_http_methods
from core.utils import initialize_form
from ..proxies.job_category.management import JobCategoryManagementProxy
from ..helpers import labour_admin_required
from ..forms import JobCategoryForm
@labour_admin_required
@require_http_methods(["GET", "HEAD", "POST"])
def labour_admin_jobcategory_view(request, vars, event, job_category_slug=None):
meta = event.labour_event_meta
if job_category_slug is not None:
# Edit existing
job_category = get_object_or_404(JobCategoryManagementProxy, event=event, slug=job_category_slug)
else:
# Add new
job_category = JobCategoryManagementProxy(event=event, app_label='labour')
form = initialize_form(JobCategoryForm, request, instance=job_category, event=event)
if request.method == 'POST':
action = request.POST.get('action')
if action in ('save-return', 'save-edit'):
if form.is_valid():
job_category = form.save()
meta.create_groups_async()
messages.success(request, _("The job category was saved."))
if action == 'save-return':
return redirect('labour_admin_jobcategories_view', event.slug)
elif action == 'save-edit':
return redirect('labour_admin_jobcategory_view', event.slug, job_category.slug)
else:
raise NotImplementedError(action)
else:
messages.error(request, _("Please check the form."))
elif action == 'remove' and job_category.can_remove:
job_category.delete()
messages.success(request, _("The job category was removed."))
return redirect('labour_admin_jobcategories_view', event.slug)
else:
messages.error(request, _("Invalid request."))
vars.update(
form=form,
job_category=job_category,
)
return render(request, 'labour_admin_jobcategory_view.jade', vars) | 37.559322 | 105 | 0.673736 |
bf094db285555e2b411e571ee5b98e24406273c3 | 4,868 | py | Python | gymgeek/settings.py | bugulin/gymgeek-web | 1def491392add2526fb0e8a53098d49ad2fdf983 | [
"Apache-2.0"
] | null | null | null | gymgeek/settings.py | bugulin/gymgeek-web | 1def491392add2526fb0e8a53098d49ad2fdf983 | [
"Apache-2.0"
] | null | null | null | gymgeek/settings.py | bugulin/gymgeek-web | 1def491392add2526fb0e8a53098d49ad2fdf983 | [
"Apache-2.0"
] | null | null | null | """
Django settings for gymgeek project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', '9e4@&tw46$l31)zrqe3wi+-slqm(ruvz&se0^%9#6(_w3ui!c0')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = (os.getenv('DEBUG') == '1')
ALLOWED_HOSTS = ['localhost', 'gymgeek-web.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'accounts.apps.AccountsConfig',
'social_django',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'gdstorage',
'core.apps.CoreConfig',
'lessons.apps.LessonsConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
'core.decorators.LoginRequiredMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'gymgeek.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'gymgeek.wsgi.application'
# python manage.py check --deploy
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
if os.getenv('DEBUG') == None:
SECURE_SSL_REDIRECT = True
SECURE_REDIRECT_EXEMPT = [r'^static/']
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
# Logs
from django.utils.log import DEFAULT_LOGGING
DEFAULT_LOGGING['handlers']['console']['filters'] = []
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
from . import database
DATABASES = {
'default': database.config()
}
# Authentication
AUTH_USER_MODEL = 'accounts.Account'
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'accounts.pipeline.authorize',
'accounts.pipeline.load_extra_data',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
)
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.getenv('GOOGLE_CLIENT_ID')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.getenv('GOOGLE_CLIENT_SECRET')
LOGOUT_REDIRECT_URL = 'home'
LOGIN_URL = 'home'
LOGIN_REDIRECT_URL = 'home'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'cs-CZ'
TIME_ZONE = 'Europe/Prague'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Media files
# http://django-googledrive-storage.readthedocs.io/en/latest/
#MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
GOOGLE_DRIVE_STORAGE_JSON_KEY_FILE = os.getenv('STORAGE_KEY_FILE')
| 27.817143 | 110 | 0.732539 |
f93345c47dedce43d94188bd2ebc8b5bad9326f2 | 1,537 | py | Python | Bing Search API/nkw/orton/comresults.py | brandeddavid/MSFT-Translation-API | 185d43783ff190ed1b2df47d8d58537013bd3fb7 | [
"MIT"
] | null | null | null | Bing Search API/nkw/orton/comresults.py | brandeddavid/MSFT-Translation-API | 185d43783ff190ed1b2df47d8d58537013bd3fb7 | [
"MIT"
] | null | null | null | Bing Search API/nkw/orton/comresults.py | brandeddavid/MSFT-Translation-API | 185d43783ff190ed1b2df47d8d58537013bd3fb7 | [
"MIT"
] | 1 | 2019-05-16T09:55:08.000Z | 2019-05-16T09:55:08.000Z | import requests, re, csv, os, sys
from urllib import request
from bs4 import BeautifulSoup
urlList = []
generic = ['admin', 'frontdesk', 'hiring', 'enroll', 'careers', 'career', 'info', 'online', 'help', 'desk', 'career', 'job', 'inquire', 'contact', 'post', 'master', 'general', 'admission', 'admissions', 'advise', 'advice', 'service', 'budget', 'department', 'board', 'noreply', 'webmaster', 'nr']
with open('ortoncom.csv', 'r') as k:
reader = csv.reader(k, delimiter = ',')
for item in reader:
urlList.append(item[1])
with open('resultscom.csv', 'a', newline = '') as l:
fieldnames = ['emails', 'http status']
writer = csv.DictWriter(l, fieldnames=fieldnames)
writer.writeheader()
for url in urlList:
print ('Url '+ str(urlList.index(url)+1) + ' of ' + str(len(urlList)))
try:
r = requests.get(url)
emails = re.findall(r'[\w\-][\w\-\.]+@[\w\-][\w\-\.]+[a-zA-Z]{1,4}', r.text)
finalemails = []
for email in set(emails):
finalemails.append(email)
for email in finalemails:
if len(email) < 8:
finalemails.remove(email)
elif email[0:email.index('@')] in generic:
finalemails.remove(email)
else:
pass
except:
pass
finally:
writer.writerow({'emails':[email for email in finalemails], 'http status':r.status_code})
emails = []
| 24.396825 | 296 | 0.532856 |
d581fc2ac6fc8ce14b4f2454935eebc0f5c6df6c | 8,741 | py | Python | balancer/balancer.py | jawilk/balancer-exchange-python | e842a44b89a06da78edee2053dc80418b207ff41 | [
"MIT"
] | 6 | 2021-02-09T04:16:13.000Z | 2022-01-03T00:40:39.000Z | balancer/balancer.py | jawilk/balancer-exchange-python | e842a44b89a06da78edee2053dc80418b207ff41 | [
"MIT"
] | null | null | null | balancer/balancer.py | jawilk/balancer-exchange-python | e842a44b89a06da78edee2053dc80418b207ff41 | [
"MIT"
] | 1 | 2020-08-30T04:25:00.000Z | 2020-08-30T04:25:00.000Z | import json
from web3 import Web3
from utils import load_abi
def initialize_tokens(tokens):
all_tokens = []
for token in tokens:
all_tokens.append(Token(
contract_address = Web3.toChecksumAddress(token['address']),
balance = token['balance'],
decimals = token['decimals'],
symbol = token['symbol'],
denorm_weight = token['denormWeight'],
))
return all_tokens
class Token:
''''
tokens': [{'address': '0xa3bed4e1c75d00fa6f4e5e6922db7261b5e9acd2', 'balance': '566201.286846114414239124', 'decimals': 18, 'denormWeight': '8', 'id': '0x003a70265a3662342010823bea15dc84c6f7ed54-0xa3bed4e1c75d00fa6f4e5e6922db7261b5e9acd2', 'symbol': 'MTA'}, {'address': '0xe2f2a5c287993345a840db3b0845fbc70f5935a5', 'balance': '783270.361777465361029266', 'decimals': 18, 'denormWeight': '2', 'id': '0x003a70265a3662342010823bea15dc84c6f7ed54-0xe2f2a5c287993345a840db3b0845fbc70f5935a5', 'symbol': 'mUSD'}]
'''
def __init__(
self,
contract_address,
balance,
decimals,
symbol,
denorm_weight,
):
self.contract_address = contract_address
self.balance = balance
self.decimals = decimals
self.symbol = symbol
self.denorm_weight = denorm_weight
class Pool:
'''
https://docs.balancer.finance/smart-contracts/api
{'pools': [{'finalized': True, 'id': '0x003a70265a3662342010823bea15dc84c6f7ed54', 'publicSwap': True, 'swapFee': '0.001', 'tokens': [{'address': '0xa3bed4e1c75d00fa6f4e5e6922db7261b5e9acd2', 'balance': '566201.286846114414239124', 'decimals': 18, 'denormWeight': '8', 'id': '0x003a70265a3662342010823bea15dc84c6f7ed54-0xa3bed4e1c75d00fa6f4e5e6922db7261b5e9acd2', 'symbol': 'MTA'}, {'address': '0xe2f2a5c287993345a840db3b0845fbc70f5935a5', 'balance': '783270.361777465361029266', 'decimals': 18, 'denormWeight': '2', 'id': '0x003a70265a3662342010823bea15dc84c6f7ed54-0xe2f2a5c287993345a840db3b0845fbc70f5935a5', 'symbol': 'mUSD'}], 'tokensList': ['0xe2f2a5c287993345a840db3b0845fbc70f5935a5', '0xa3bed4e1c75d00fa6f4e5e6922db7261b5e9acd2'], 'totalWeight': '10'}]}
'''
ABI_PATH = 'abi/BPool.abi'
def __init__(
self,
w3,
contract_address,
finalized=None,
public_swap=None,
swap_fee=None,
total_weight=None,
tokens_list=None,
tokens=None,
):
self.contract_address = contract_address
self.contract_abi = load_abi(self.ABI_PATH)
self.contract = w3.eth.contract(
address=self.contract_address,
abi=self.contract_abi,
)
# Pool properties
self.properties = {
'isFinalized': finalized,
'isPublicSwap': public_swap,
'getSwapFee': swap_fee,
'getTotalDenormalizedWeight': total_weight,
'tokens_list': tokens_list,
'getFinalTokens': initialize_tokens(tokens) if tokens else None,
}
def _set_value(self, prop, *argv):
'''Fetch static information only once on demand'''
if argv:
self.properties[prop] = self.properties[prop] if self.properties.get(prop) else self.contract.get_function_by_name(prop)(*argv).call()
return self.properties[prop]
self.properties[prop] = self.properties[prop] if self.properties.get(prop) else self.contract.get_function_by_name(prop)().call()
return self.properties[prop]
def bone(self):
return self._set_value('BONE')
def bpow_precision(self):
return self._set_value('BPOW_PRECISION')
def exit_fee(self):
return self._set_value('EXIT_FEE')
def init_pool_supply(self):
return self._set_value('INIT_POOL_SUPPLY')
def max_bound_tokens(self):
return self._set_value('MAX_BOUND_TOKENS')
def max_bpow_base(self):
return self._set_value('MAX_BPOW_BASE')
def max_fee(self):
return self._set_value('MAX_FEE')
def max_in_ratio(self):
return self._set_value('MAX_IN_RATIO')
def max_out_ratio(self):
return self._set_value('MAX_OUT_RATIO')
def max_total_weight(self):
return self._set_value('MAX_TOTAL_WEIGHT')
def max_weight(self):
return self._set_value('MAX_WEIGHT')
def min_balance(self):
return self._set_value('MIN_BAlANCE')
def min_bound_tokens(self):
return self._set_value('MIN_BOUND_TOKENS')
def min_bpow_base(self):
return self._set_value('MIN_BPOW_BASE')
def min_fee(self):
return self._set_value('MIN_FEE')
def min_weight(self):
return self._set_value('MIN_WEIGHT')
def allowance(self, src_address, dst_address):
return self._set_value('ALLOWANCE', src_address, dst_address)
def balance_of(self, address):
return self._set_value('BALANCE_OF', address)
def decimals(self):
return self._set_value('decimals')
def color(self):
return self._set_value('getColor')
def controller(self):
return self._set_value('getController')
def final_tokens(self):
return self._set_value('getFinalTokens')
def swap_fee(self):
return self._set_value('getSwapFee')
def total_denormalized_weight(self):
return self._set_value('getTotalDenormalizedWeight')
def is_finalized(self):
return self._set_value('isFinalized')
def is_public_swap(self):
return self._set_value('isPublicSwap')
def name(self):
return self._set_value('name')
def symbol(self):
return self._set_value('symbol')
def total_supply(self):
return self._set_value('totalSupply')
def calc_in_given_out(self, *argv):
'''argv:
tokenBalanceIn
tokenWeightIn
tokenBalanceOut
tokenWeightOut
tokenAmountOut
swapFee
'''
return self.contract.functions.calcInGivenOut(*argv).call()
def calc_out_given_in(self, *argv):
'''argv:
tokenBalanceIn
tokenWeightIn
tokenBalanceOut
tokenWeightOut
tokenAmountIn
swapFee
'''
return self.contract.functions.calcOutGivenIn(*argv).call()
def calc_pool_in_given_single_out(self, *argv):
'''argv:
tokenBalanceOut
tokenWeightOut
poolSupply
totalWeight
tokenAmountOut
swapFee
'''
return self.contract.functions.calcPoolInGivenSingleOut(*argv).call()
def calc_pool_out_given_single_in(self, *argv):
'''argv:
tokenBalanceIn
tokenWeightIn
poolSupply
totalWeight
tokenAmountIn
swapFee
'''
return self.contract.functions.calcPoolOutGivenSingleIn(*argv).call()
def calc_single_in_given_pool_out(self, *argv):
'''argv:
tokenBalanceIn
tokenWeightIn
poolSupply
totalWeight
tokenAmountOut
swapFee
'''
return self.contract.functions.calcSingleInGivenPoolOut(*argv).call()
def calc_single_out_given_pool_in(self, *argv):
'''argv:
tokenBalanceOut
tokenWeightOut
poolSupply
totalWeight
poolAmountIn
swapFee
'''
return self.contract.functions.calcPoolOutGivenSingleIn(*argv).call()
def cal_spot_price(self, *argv):
'''argv:
tokenBalanceIn
tokenWeightIn
tokenBalanceOut
tokenWeightOut
swapFee
'''
return self.contract.functions.calcSpotPrice(*argv).call()
def get_balance(self, address):
return self.contract.functions.getBalance(address).call()
def get_denormalized_weight(self, token_address):
return self.contract.functions.getDenormalizedWeight(token_address).call()
def get_normalized_weight(self, token_address):
return self.contract.functions.getNormalizedWeight(token_address).call() #/ 10**16
def get_num_tokens(self):
return self.contract.functions.getNumTokens().call()
def get_spot_price(self, token_in_address, token_out_address):
return self.contract.functions.getSpotPrice(token_in_address, token_out_address).call()
def get_spot_price_sans_fee(self, token_in_address, token_out_address):
return self.contract.functions.getSpotPriceSansFee(token_in_address, token_out_address).call()
def is_bound(self, token_address):
return self.contract.functions.isBound(token_address).call()
| 32.615672 | 766 | 0.648438 |
ba9f53f7489f139751b709ae70f891094376ae67 | 1,686 | py | Python | xlsxwriter/test/comparison/test_chart_errorbars10.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_chart_errorbars10.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_chart_errorbars10.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_errorbars10.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with error bars."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [69198976, 69200896]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$B$1:$B$5',
'y_error_bars': {
'type': 'custom',
'plus_values': '=Sheet1!$A$1',
'minus_values': '=Sheet1!$B$1:$B$3',
'plus_data': [1],
'minus_data': [2, 4, 6],
},
})
chart.add_series({
'categories': '=Sheet1!$A$1:$A$5',
'values': '=Sheet1!$C$1:$C$5',
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 25.545455 | 79 | 0.511269 |
c176c0c0395349c4c245a1b808f4cfa08320eaa1 | 215 | py | Python | src/brewlog/fermentation/utils.py | zgoda/brewlog | 13a930b328f81d01a2be9aca07d3b14703b80faa | [
"BSD-3-Clause"
] | 3 | 2019-03-11T04:30:06.000Z | 2020-01-26T03:21:52.000Z | src/brewlog/fermentation/utils.py | zgoda/brewlog | 13a930b328f81d01a2be9aca07d3b14703b80faa | [
"BSD-3-Clause"
] | 23 | 2019-02-06T20:37:37.000Z | 2020-06-01T07:08:35.000Z | src/brewlog/fermentation/utils.py | zgoda/brewlog | 13a930b328f81d01a2be9aca07d3b14703b80faa | [
"BSD-3-Clause"
] | null | null | null | from ..ext import db
def update_steps_gravity(step):
if step.fg is not None:
next_step = step.next_step()
if next_step:
next_step.og = step.fg
db.session.add(next_step)
| 21.5 | 37 | 0.604651 |
21f2cbc2cbd633b1a8c08ea4f063935efdf97eb2 | 4,705 | py | Python | luigi/tools/deps.py | huygn/luigi | bf7194091d9c934ca4e741033debe73ee693b29f | [
"Apache-2.0"
] | 2 | 2016-08-22T22:52:47.000Z | 2018-07-14T20:00:52.000Z | luigi/tools/deps.py | huygn/luigi | bf7194091d9c934ca4e741033debe73ee693b29f | [
"Apache-2.0"
] | 5 | 2015-01-22T06:54:59.000Z | 2021-01-13T23:09:09.000Z | luigi/tools/deps.py | huygn/luigi | bf7194091d9c934ca4e741033debe73ee693b29f | [
"Apache-2.0"
] | 1 | 2021-11-01T15:11:20.000Z | 2021-11-01T15:11:20.000Z | #!/usr/bin/env python
# Finds all tasks and task outputs on the dependency paths from the given downstream task T
# up to the given source/upstream task S (optional). If the upstream task is not given,
# all upstream tasks on all dependancy paths of T will be returned.
# Terms:
# if the execution of Task T depends on the output of task S on a dependancy graph,
# T is called a downstream/sink task, S is called an upstream/source task.
# This is useful and practical way to find all upstream tasks of task T.
# For example suppose you have a daily computation that starts with a task named Daily.
# And suppose you have another task named Aggregate. Daily triggers a few tasks
# which eventually trigger Aggregate. Now, suppose you find a bug in Aggregate.
# You fixed the bug and now you want to rerun it, including all it's upstream deps.
#
# To do that you run:
# bin/deps.py --module daily_module Aggregate --daily-param1 xxx --upstream-family Daily
#
# This will output all the tasks on the dependency path between Daily and Aggregate. In
# effect, this is how you find all upstream tasks for Aggregate. Now you can delete its
# output and run Aggregate again. Daily will eventually trigget Aggregate and all tasks on
# the way.
#
# The same code here might be used as a CLI tool as well as a python module.
# In python, invoke find_deps(task, upstream_name) to get a set of all task instances on the
# paths between task T and upstream task S. You can then use the task instances to delete their output or
# perform other computation based on that.
#
# Example:
#
# PYTHONPATH=$PYTHONPATH:/path/to/your/luigi/tasks bin/deps.py \
# --module my.tasks MyDownstreamTask
# --downstream_task_param1 123456
# [--upstream-family MyUpstreamTask]
#
from __future__ import print_function
import luigi.interface
from luigi.contrib.ssh import RemoteTarget
from luigi.postgres import PostgresTarget
from luigi.s3 import S3Target
from luigi.target import FileSystemTarget
from luigi.task import flatten
from luigi import parameter
import sys
from luigi.cmdline_parser import CmdlineParser
import collections
def get_task_requires(task):
return set(flatten(task.requires()))
def dfs_paths(start_task, goal_task_family, path=None):
if path is None:
path = [start_task]
if start_task.task_family == goal_task_family or goal_task_family is None:
for item in path:
yield item
for next in get_task_requires(start_task) - set(path):
for t in dfs_paths(next, goal_task_family, path + [next]):
yield t
class upstream(luigi.task.Config):
'''
Used to provide the parameter upstream-family
'''
family = parameter.Parameter(default=None)
def find_deps(task, upstream_task_family):
'''
Finds all dependencies that start with the given task and have a path
to upstream_task_family
Returns all deps on all paths between task and upstream
'''
return set([t for t in dfs_paths(task, upstream_task_family)])
def find_deps_cli():
'''
Finds all tasks on all paths from provided CLI task
'''
cmdline_args = sys.argv[1:]
with CmdlineParser.global_instance(cmdline_args) as cp:
return find_deps(cp.get_task_obj(), upstream().family)
def get_task_output_description(task_output):
'''
Returns a task's output as a string
'''
output_description = "n/a"
if isinstance(task_output, RemoteTarget):
output_description = "[SSH] {0}:{1}".format(task_output._fs.remote_context.host, task_output.path)
elif isinstance(task_output, S3Target):
output_description = "[S3] {0}".format(task_output.path)
elif isinstance(task_output, FileSystemTarget):
output_description = "[FileSystem] {0}".format(task_output.path)
elif isinstance(task_output, PostgresTarget):
output_description = "[DB] {0}:{1}".format(task_output.host, task_output.table)
else:
output_description = "to be determined"
return output_description
def main():
deps = find_deps_cli()
for task in deps:
task_output = task.output()
if isinstance(task_output, dict):
output_descriptions = [get_task_output_description(output) for label, output in task_output.iteritems()]
elif isinstance(task_output, collections.Iterable):
output_descriptions = [get_task_output_description(output) for output in task_output]
else:
output_descriptions = [get_task_output_description(task_output)]
print(" TASK: {0}".format(task))
for desc in output_descriptions:
print(" : {0}".format(desc))
if __name__ == '__main__':
main()
| 35.37594 | 116 | 0.72051 |
876cba1efe01c56d23c7db737edce6be0c8ba4a4 | 5,762 | py | Python | Assets/Python/Wonders.py | dguenms/Dawn-of-Civilization | 1c4f510af97a869637cddb4c0859759158cea5ce | [
"MIT"
] | 93 | 2015-11-20T04:13:36.000Z | 2022-03-24T00:03:08.000Z | Assets/Python/Wonders.py | dguenms/Dawn-of-Civilization | 1c4f510af97a869637cddb4c0859759158cea5ce | [
"MIT"
] | 206 | 2015-11-09T00:27:15.000Z | 2021-12-04T19:05:18.000Z | Assets/Python/Wonders.py | dguenms/Dawn-of-Civilization | 1c4f510af97a869637cddb4c0859759158cea5ce | [
"MIT"
] | 117 | 2015-11-08T02:43:46.000Z | 2022-02-12T06:29:00.000Z | from Core import *
from RFCUtils import *
from Events import handler
@handler("cityAcquired")
def escorialAcquiredCity(iOwner, iPlayer, city):
escorialEffect(iPlayer, city)
@handler("cityBuilt")
def escorialFoundedCity(city):
escorialEffect(city.getOwner(), city)
def escorialEffect(iPlayer, city):
if player(iPlayer).isHasBuildingEffect(iEscorial):
if city.isColony() and city.getGameTurnPlayerLost(iPlayer) == -1:
capital = player(iPlayer).getCapitalCity()
iGold = scale(10 + distance(capital, city))
message(iPlayer, 'TXT_KEY_BUILDING_ESCORIAL_EFFECT', iGold, city.getName(), location=city, button=infos.building(iEscorial).getButton())
player(iPlayer).changeGold(iGold)
@handler("combatResult")
def brandenburgGateEffect(winningUnit, losingUnit):
if player(losingUnit).isHasBuildingEffect(iBrandenburgGate):
if any(infos.promotion(iPromotion).isLeader() and losingUnit.isHasPromotion(iPromotion) for iPromotion in infos.promotions()):
player(losingUnit).restoreGeneralThreshold()
@handler("combatResult")
def motherlandCallsEffect(winningUnit, losingUnit):
iLoser = losingUnit.getOwner()
if player(iLoser).isHasBuildingEffect(iMotherlandCalls):
if losingUnit.getLevel() >= 3:
city = cities.owner(iLoser).where(lambda city: not city.isDrafted()).closest(losingUnit)
if city:
city.conscript(True)
player(iLoser).changeConscriptCount(-1)
message(iLoser, 'TXT_KEY_BUILDING_MOTHERLAND_CALLS_EFFECT', losingUnit.getName(), city.getName())
@handler("cityGrowth")
def orientalPearlTowerOnGrowth(city):
if city.isHasBuildingEffect(iOrientalPearlTower):
orientalPearlTowerEffect(city)
@handler("buildingBuilt")
def orientalPearlTowerWhenBuilt(city, iBuilding):
if iBuilding == iOrientalPearlTower:
orientalPearlTowerEffect(city)
def orientalPearlTowerEffect(city):
city.setBuildingCommerceChange(infos.building(iOrientalPearlTower).getBuildingClassType(), CommerceTypes.COMMERCE_RESEARCH, 2 * city.getPopulation())
@handler("cityCaptureGold")
def gurEAmirEffect(city, iPlayer, iGold):
if iGold > 0:
if player(iPlayer).isHasBuildingEffect(iGurEAmir):
wonderCity = cities.owner(iPlayer).building(iGurEAmir).one()
if wonderCity:
message(iPlayer, 'TXT_KEY_BUILDING_GUR_E_AMIR_EFFECT', iGold, city.getName(), wonderCity.getName())
wonderCity.changeCulture(iPlayer, iGold, True)
# Space Elevator effect: +1 commerce per satellite built
@handler("unitBuilt")
def spaceElevatorEffect(city, unit):
if unit.getUnitType() == iSatellite:
city = getBuildingCity(iSpaceElevator)
if city:
city.changeBuildingYieldChange(infos.building(iSpaceElevator).getBuildingClassType(), YieldTypes.YIELD_COMMERCE, 1)
# Space Elevator effect: +5 commerce per space projectBuilt
@handler("projectBuilt")
def spaceElevatorProjectEffect(city, iProject):
if infos.project(iProject).isSpaceship():
city = getBuildingCity(iSpaceElevator)
if city:
city.changeBuildingYieldChange(infos.building(iSpaceElevator).getBuildingClassType(), YieldTypes.YIELD_COMMERCE, 5)
@handler("buildingBuilt")
def porcelainTowerEffect(city, iBuilding):
if iBuilding == iPorcelainTower:
player(city).updateTradeRoutes()
@handler("cityGrowth")
def empireStateBuildingOnGrowth(city):
if city.isHasBuildingEffect(iEmpireStateBuilding):
empireStateBuildingEffect(city)
@handler("buildingBuilt")
def empireStateBuildingWhenBuilt(city, iBuilding):
if iBuilding == iEmpireStateBuilding:
empireStateBuildingEffect(city)
def empireStateBuildingEffect(city):
city.setBuildingCommerceChange(infos.building(iEmpireStateBuilding).getBuildingClassType(), CommerceTypes.COMMERCE_GOLD, city.getPopulation())
@handler("buildingBuilt")
def machuPicchuEffect(city, iBuilding):
if iBuilding == iMachuPicchu:
iNumPeaks = plots.city_radius(city).where(lambda plot: plot.isPeak()).count()
city.setBuildingCommerceChange(infos.building(iMachuPicchu).getBuildingClassType(), CommerceTypes.COMMERCE_GOLD, iNumPeaks * 2)
@handler("buildingBuilt")
def greatWallEffect(city, iBuilding):
if iBuilding == iGreatWall:
for plot in plots.all().owner(city.getOwner()).where(lambda plot: not plot.isWater()):
plot.setWithinGreatWall(True)
# Silver Tree Fountain effect: free Great Person whenever a Great General is born
@handler("greatPersonBorn")
def silverTreeFountainEffect(unit, iPlayer):
if infos.unit(unit).getLeaderExperience() > 0 and player(iPlayer).isHasBuildingEffect(iSilverTreeFountain):
city = cities.owner(iPlayer).where(lambda city: city.getGreatPeopleProgress() > 0).maximum(lambda city: city.getGreatPeopleProgress())
if city:
iGreatPerson = find_max(range(iNumUnits), lambda iUnit: city.getGreatPeopleUnitProgress(iUnit)).result
if iGreatPerson >= 0:
player(iPlayer).createGreatPeople(iGreatPerson, False, False, city.getX(), city.getY())
# Nobel Prize effect: additional great people points whenever a Great Person is born in a civ with pleasant relations
@handler("greatPersonBorn")
def nobelPrizeEffect(unit, iPlayer):
city = getBuildingCity(iNobelPrize)
if city:
if infos.unit(unit).getLeaderExperience() == 0 and infos.unit(unit).getEspionagePoints() == 0:
if unit.getOwner() == city.getOwner() or player(unit).AI_getAttitude(city.getOwner()) >= AttitudeTypes.ATTITUDE_PLEASED:
iGreatPersonType = getDefaultGreatPerson(unit.getUnitType())
iGreatPeoplePoints = max(4, player(city).getGreatPeopleCreated())
city.changeGreatPeopleProgress(iGreatPeoplePoints)
city.changeGreatPeopleUnitProgress(iGreatPersonType, iGreatPeoplePoints)
interface.setDirty(InterfaceDirtyBits.MiscButtons_DIRTY_BIT, True)
message(city.getOwner(), 'TXT_KEY_BUILDING_NOBEL_PRIZE_EFFECT', adjective(unit), unit.getName(), city.getName(), iGreatPeoplePoints) | 38.671141 | 150 | 0.787747 |
5a67114cc87f2c1de34fa7e4d03aa4cf2e6ac743 | 4,614 | py | Python | userbot/modules/snips.py | notudope/Lynx-Userbot | cbefc6e90b3042004314341b7be3b332f2515f3b | [
"PostgreSQL"
] | 1 | 2021-09-13T06:04:27.000Z | 2021-09-13T06:04:27.000Z | userbot/modules/snips.py | notudope/Lynx-Userbot | cbefc6e90b3042004314341b7be3b332f2515f3b | [
"PostgreSQL"
] | 30 | 2022-01-13T22:29:07.000Z | 2022-03-31T22:29:59.000Z | userbot/modules/snips.py | notudope/Lynx-Userbot | cbefc6e90b3042004314341b7be3b332f2515f3b | [
"PostgreSQL"
] | 1 | 2022-01-10T09:24:19.000Z | 2022-01-10T09:24:19.000Z | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
""" Userbot module containing commands for keeping global notes. """
from userbot.events import register
from userbot import CMD_HELP, BOTLOG_CHATID
@register(outgoing=True,
pattern=r"\$\w*",
ignore_unsafe=True,
disable_errors=True)
async def on_snip(event):
"""Snips logic."""
try:
from userbot.modules.sql_helper.snips_sql import get_snip
except AttributeError:
return
name = event.text[1:]
snip = get_snip(name)
message_id_to_reply = event.message.reply_to_msg_id
if not message_id_to_reply:
message_id_to_reply = None
if snip and snip.f_mesg_id:
msg_o = await event.client.get_messages(entity=BOTLOG_CHATID,
ids=int(snip.f_mesg_id))
await event.client.send_message(event.chat_id,
msg_o.message,
reply_to=message_id_to_reply,
file=msg_o.media)
await event.delete()
elif snip and snip.reply:
await event.client.send_message(event.chat_id,
snip.reply,
reply_to=message_id_to_reply)
await event.delete()
@register(outgoing=True, pattern=r"^\.snip (\w*)")
async def on_snip_save(event):
"""For .snip command, saves snips for future use."""
try:
from userbot.modules.sql_helper.snips_sql import add_snip
except AtrributeError:
await event.edit("`Running on Non-SQL mode!`")
return
keyword = event.pattern_match.group(1)
string = event.text.partition(keyword)[2]
msg = await event.get_reply_message()
msg_id = None
if msg and msg.media and not string:
if BOTLOG_CHATID:
await event.client.send_message(
BOTLOG_CHATID, f"#SNIP\
\nKEYWORD: {keyword}\
\n\nThe following message is saved as the data for the snip, please do NOT delete it !!"
)
msg_o = await event.client.forward_messages(
entity=BOTLOG_CHATID,
messages=msg,
from_peer=event.chat_id,
silent=True)
msg_id = msg_o.id
else:
await event.edit(
"`Saving snips with media requires the BOTLOG_CHATID to be set.`"
)
return
elif event.reply_to_msg_id and not string:
rep_msg = await event.get_reply_message()
string = rep_msg.text
success = "`Snip {} successfully. Use` **${}** `anywhere to get it`"
if add_snip(keyword, string, msg_id) is False:
await event.edit(success.format('updated', keyword))
else:
await event.edit(success.format('saved', keyword))
@register(outgoing=True, pattern="^\\.snips$")
async def on_snip_list(event):
"""For .snips command, lists snips saved by you."""
try:
from userbot.modules.sql_helper.snips_sql import get_snips
except AttributeError:
await event.edit("`Running on Non-SQL mode!`")
return
message = "`No snips available right now.`"
all_snips = get_snips()
for a_snip in all_snips:
if message == "`No snips available right now.`":
message = "Available snips:\n"
message += f"`${a_snip.snip}`\n"
else:
message += f"`${a_snip.snip}`\n"
await event.edit(message)
@register(outgoing=True, pattern=r"^\.remsnip (\w*)")
async def on_snip_delete(event):
"""For .remsnip command, deletes a snip."""
try:
from userbot.modules.sql_helper.snips_sql import remove_snip
except AttributeError:
await event.edit("`Running on Non-SQL mode!`")
return
name = event.pattern_match.group(1)
if remove_snip(name) is True:
await event.edit(f"`Successfully deleted snip:` **{name}**")
else:
await event.edit(f"`Couldn't find snip:` **{name}**")
CMD_HELP.update({
"snips":
"✘ Pʟᴜɢɪɴ : Snips\
\n\n$<snip_name>\
\nUsage: Gets the specified snip, anywhere.\
\n\n⚡𝘾𝙈𝘿⚡: `.snip` <name> <data> or reply to a message with .snip <name>\
\n↳ : Saves the message as a snip (global note) with the name. (Works with pics, docs, and stickers too!)\
\n\n⚡𝘾𝙈𝘿⚡: `.snips`\
\n↳ : Gets all saved snips.\
\n\n⚡𝘾𝙈𝘿⚡: `.remsnip` <snip_name>\
\n↳ : Deletes the specified snip.\
"
})
| 35.492308 | 106 | 0.606415 |
46a5d58ae4a8804cf3d8a776950517b331f84fd1 | 159,807 | py | Python | boto3_type_annotations_with_docs/boto3_type_annotations/machinelearning/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 119 | 2018-12-01T18:20:57.000Z | 2022-02-02T10:31:29.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/machinelearning/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 15 | 2018-11-16T00:16:44.000Z | 2021-11-13T03:44:18.000Z | boto3_type_annotations_with_docs/boto3_type_annotations/machinelearning/client.py | cowboygneox/boto3_type_annotations | 450dce1de4e066b939de7eac2ec560ed1a7ddaa2 | [
"MIT"
] | 11 | 2019-05-06T05:26:51.000Z | 2021-09-28T15:27:59.000Z | from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def add_tags(self, Tags: List, ResourceId: str, ResourceType: str) -> Dict:
"""
Adds one or more tags to an object, up to a limit of 10. Each tag consists of a key and an optional value. If you add a tag using a key that is already associated with the ML object, ``AddTags`` updates the tag's value.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/AddTags>`_
**Request Syntax**
::
response = client.add_tags(
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
ResourceId='string',
ResourceType='BatchPrediction'|'DataSource'|'Evaluation'|'MLModel'
)
**Response Syntax**
::
{
'ResourceId': 'string',
'ResourceType': 'BatchPrediction'|'DataSource'|'Evaluation'|'MLModel'
}
**Response Structure**
- *(dict) --*
Amazon ML returns the following elements.
- **ResourceId** *(string) --*
The ID of the ML object that was tagged.
- **ResourceType** *(string) --*
The type of the ML object that was tagged.
:type Tags: list
:param Tags: **[REQUIRED]**
The key-value pairs to use to create tags. If you specify a key without specifying a value, Amazon ML creates a tag with the specified key and a value of null.
- *(dict) --*
A custom key-value pair associated with an ML object, such as an ML model.
- **Key** *(string) --*
A unique identifier for the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.
- **Value** *(string) --*
An optional string, typically used to describe or define the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.
:type ResourceId: string
:param ResourceId: **[REQUIRED]**
The ID of the ML object to tag. For example, ``exampleModelId`` .
:type ResourceType: string
:param ResourceType: **[REQUIRED]**
The type of the ML object to tag.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_batch_prediction(self, BatchPredictionId: str, MLModelId: str, BatchPredictionDataSourceId: str, OutputUri: str, BatchPredictionName: str = None) -> Dict:
"""
Generates predictions for a group of observations. The observations to process exist in one or more data files referenced by a ``DataSource`` . This operation creates a new ``BatchPrediction`` , and uses an ``MLModel`` and the data files referenced by the ``DataSource`` as information sources.
``CreateBatchPrediction`` is an asynchronous operation. In response to ``CreateBatchPrediction`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the ``BatchPrediction`` status to ``PENDING`` . After the ``BatchPrediction`` completes, Amazon ML sets the status to ``COMPLETED`` .
You can poll for status updates by using the GetBatchPrediction operation and checking the ``Status`` parameter of the result. After the ``COMPLETED`` status appears, the results are available in the location specified by the ``OutputUri`` parameter.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateBatchPrediction>`_
**Request Syntax**
::
response = client.create_batch_prediction(
BatchPredictionId='string',
BatchPredictionName='string',
MLModelId='string',
BatchPredictionDataSourceId='string',
OutputUri='string'
)
**Response Syntax**
::
{
'BatchPredictionId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateBatchPrediction`` operation, and is an acknowledgement that Amazon ML received the request.
The ``CreateBatchPrediction`` operation is asynchronous. You can poll for status updates by using the ``>GetBatchPrediction`` operation and checking the ``Status`` parameter of the result.
- **BatchPredictionId** *(string) --*
A user-supplied ID that uniquely identifies the ``BatchPrediction`` . This value is identical to the value of the ``BatchPredictionId`` in the request.
:type BatchPredictionId: string
:param BatchPredictionId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``BatchPrediction`` .
:type BatchPredictionName: string
:param BatchPredictionName:
A user-supplied name or description of the ``BatchPrediction`` . ``BatchPredictionName`` can only use the UTF-8 character set.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID of the ``MLModel`` that will generate predictions for the group of observations.
:type BatchPredictionDataSourceId: string
:param BatchPredictionDataSourceId: **[REQUIRED]**
The ID of the ``DataSource`` that points to the group of observations to predict.
:type OutputUri: string
:param OutputUri: **[REQUIRED]**
The location of an Amazon Simple Storage Service (Amazon S3) bucket or directory to store the batch prediction results. The following substrings are not allowed in the ``s3 key`` portion of the ``outputURI`` field: \':\', \'//\', \'/./\', \'/../\'.
Amazon ML needs permissions to store and retrieve the logs on your behalf. For information about how to set permissions, see the `Amazon Machine Learning Developer Guide <http://docs.aws.amazon.com/machine-learning/latest/dg>`__ .
:rtype: dict
:returns:
"""
pass
def create_data_source_from_rds(self, DataSourceId: str, RDSData: Dict, RoleARN: str, DataSourceName: str = None, ComputeStatistics: bool = None) -> Dict:
"""
Creates a ``DataSource`` object from an `Amazon Relational Database Service <http://aws.amazon.com/rds/>`__ (Amazon RDS). A ``DataSource`` references data that can be used to perform ``CreateMLModel`` , ``CreateEvaluation`` , or ``CreateBatchPrediction`` operations.
``CreateDataSourceFromRDS`` is an asynchronous operation. In response to ``CreateDataSourceFromRDS`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the ``DataSource`` status to ``PENDING`` . After the ``DataSource`` is created and ready for use, Amazon ML sets the ``Status`` parameter to ``COMPLETED`` . ``DataSource`` in the ``COMPLETED`` or ``PENDING`` state can be used only to perform ``>CreateMLModel`` >, ``CreateEvaluation`` , or ``CreateBatchPrediction`` operations.
If Amazon ML cannot accept the input source, it sets the ``Status`` parameter to ``FAILED`` and includes an error message in the ``Message`` attribute of the ``GetDataSource`` operation response.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateDataSourceFromRDS>`_
**Request Syntax**
::
response = client.create_data_source_from_rds(
DataSourceId='string',
DataSourceName='string',
RDSData={
'DatabaseInformation': {
'InstanceIdentifier': 'string',
'DatabaseName': 'string'
},
'SelectSqlQuery': 'string',
'DatabaseCredentials': {
'Username': 'string',
'Password': 'string'
},
'S3StagingLocation': 'string',
'DataRearrangement': 'string',
'DataSchema': 'string',
'DataSchemaUri': 'string',
'ResourceRole': 'string',
'ServiceRole': 'string',
'SubnetId': 'string',
'SecurityGroupIds': [
'string',
]
},
RoleARN='string',
ComputeStatistics=True|False
)
**Response Syntax**
::
{
'DataSourceId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateDataSourceFromRDS`` operation, and is an acknowledgement that Amazon ML received the request.
The ``CreateDataSourceFromRDS`` > operation is asynchronous. You can poll for updates by using the ``GetBatchPrediction`` operation and checking the ``Status`` parameter. You can inspect the ``Message`` when ``Status`` shows up as ``FAILED`` . You can also check the progress of the copy operation by going to the ``DataPipeline`` console and looking up the pipeline using the ``pipelineId`` from the describe call.
- **DataSourceId** *(string) --*
A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the ``DataSourceID`` in the request.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``DataSource`` . Typically, an Amazon Resource Number (ARN) becomes the ID for a ``DataSource`` .
:type DataSourceName: string
:param DataSourceName:
A user-supplied name or description of the ``DataSource`` .
:type RDSData: dict
:param RDSData: **[REQUIRED]**
The data specification of an Amazon RDS ``DataSource`` :
* DatabaseInformation -
* ``DatabaseName`` - The name of the Amazon RDS database.
* ``InstanceIdentifier`` - A unique identifier for the Amazon RDS database instance.
* DatabaseCredentials - AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon RDS database.
* ResourceRole - A role (DataPipelineDefaultResourceRole) assumed by an EC2 instance to carry out the copy task from Amazon RDS to Amazon Simple Storage Service (Amazon S3). For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
* ServiceRole - A role (DataPipelineDefaultRole) assumed by the AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
* SecurityInfo - The security information to use to access an RDS DB instance. You need to set up appropriate ingress rules for the security entity IDs provided to allow access to the Amazon RDS instance. Specify a [``SubnetId`` , ``SecurityGroupIds`` ] pair for a VPC-based RDS DB instance.
* SelectSqlQuery - A query that is used to retrieve the observation data for the ``Datasource`` .
* S3StagingLocation - The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using ``SelectSqlQuery`` is stored in this location.
* DataSchemaUri - The Amazon S3 location of the ``DataSchema`` .
* DataSchema - A JSON string representing the schema. This is not required if ``DataSchemaUri`` is specified.
* DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the ``Datasource`` . Sample - ``\"{\\"splitting\\":{\\"percentBegin\\":10,\\"percentEnd\\":60}}\"``
- **DatabaseInformation** *(dict) --* **[REQUIRED]**
Describes the ``DatabaseName`` and ``InstanceIdentifier`` of an Amazon RDS database.
- **InstanceIdentifier** *(string) --* **[REQUIRED]**
The ID of an RDS DB instance.
- **DatabaseName** *(string) --* **[REQUIRED]**
The name of a database hosted on an RDS DB instance.
- **SelectSqlQuery** *(string) --* **[REQUIRED]**
The query that is used to retrieve the observation data for the ``DataSource`` .
- **DatabaseCredentials** *(dict) --* **[REQUIRED]**
The AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon RDS database.
- **Username** *(string) --* **[REQUIRED]**
The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an ``RDSSelectSqlQuery`` query.
- **Password** *(string) --* **[REQUIRED]**
The password to be used by Amazon ML to connect to a database on an RDS DB instance. The password should have sufficient permissions to execute the ``RDSSelectQuery`` query.
- **S3StagingLocation** *(string) --* **[REQUIRED]**
The Amazon S3 location for staging Amazon RDS data. The data retrieved from Amazon RDS using ``SelectSqlQuery`` is stored in this location.
- **DataRearrangement** *(string) --*
A JSON string that represents the splitting and rearrangement processing to be applied to a ``DataSource`` . If the ``DataRearrangement`` parameter is not provided, all of the input data is used to create the ``Datasource`` .
There are multiple parameters that control what data is used to create a datasource:
* **``percentBegin``** Use ``percentBegin`` to indicate the beginning of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``percentEnd``** Use ``percentEnd`` to indicate the end of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``complement``** The ``complement`` parameter instructs Amazon ML to use the data that is not included in the range of ``percentBegin`` to ``percentEnd`` to create a datasource. The ``complement`` parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for ``percentBegin`` and ``percentEnd`` , along with the ``complement`` parameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25, \"complement\":\"true\"}}``
* **``strategy``** To change how Amazon ML splits the data for a datasource, use the ``strategy`` parameter. The default value for the ``strategy`` parameter is ``sequential`` , meaning that Amazon ML takes all of the data records between the ``percentBegin`` and ``percentEnd`` parameters for the datasource, in the order that the records appear in the input data. The following two ``DataRearrangement`` lines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\", \"complement\":\"true\"}}`` To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the ``strategy`` parameter to ``random`` and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between ``percentBegin`` and ``percentEnd`` . Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following two ``DataRearrangement`` lines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\", \"complement\":\"true\"}}``
- **DataSchema** *(string) --*
A JSON string that represents the schema for an Amazon RDS ``DataSource`` . The ``DataSchema`` defines the structure of the observation data in the data file(s) referenced in the ``DataSource`` .
A ``DataSchema`` is not required if you specify a ``DataSchemaUri``
Define your ``DataSchema`` as a series of key-value pairs. ``attributes`` and ``excludedVariableNames`` have an array of key-value pairs for their value. Use the following format to define your ``DataSchema`` .
{ \"version\": \"1.0\",
\"recordAnnotationFieldName\": \"F1\",
\"recordWeightFieldName\": \"F2\",
\"targetFieldName\": \"F3\",
\"dataFormat\": \"CSV\",
\"dataFileContainsHeader\": true,
\"attributes\": [
{ \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],
\"excludedVariableNames\": [ \"F6\" ] }
- **DataSchemaUri** *(string) --*
The Amazon S3 location of the ``DataSchema`` .
- **ResourceRole** *(string) --* **[REQUIRED]**
The role (DataPipelineDefaultResourceRole) assumed by an Amazon Elastic Compute Cloud (Amazon EC2) instance to carry out the copy operation from Amazon RDS to an Amazon S3 task. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **ServiceRole** *(string) --* **[REQUIRED]**
The role (DataPipelineDefaultRole) assumed by AWS Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **SubnetId** *(string) --* **[REQUIRED]**
The subnet ID to be used to access a VPC-based RDS DB instance. This attribute is used by Data Pipeline to carry out the copy task from Amazon RDS to Amazon S3.
- **SecurityGroupIds** *(list) --* **[REQUIRED]**
The security group IDs to be used to access a VPC-based RDS DB instance. Ensure that there are appropriate ingress rules set up to allow access to the RDS DB instance. This attribute is used by Data Pipeline to carry out the copy operation from Amazon RDS to an Amazon S3 task.
- *(string) --*
:type RoleARN: string
:param RoleARN: **[REQUIRED]**
The role that Amazon ML assumes on behalf of the user to create and activate a data pipeline in the user\'s account and copy data using the ``SelectSqlQuery`` query from Amazon RDS to Amazon S3.
:type ComputeStatistics: boolean
:param ComputeStatistics:
The compute statistics for a ``DataSource`` . The statistics are generated from the observation data referenced by a ``DataSource`` . Amazon ML uses the statistics internally during ``MLModel`` training. This parameter must be set to ``true`` if the DataSourceneeds to be used for ``MLModel`` training.
:rtype: dict
:returns:
"""
pass
def create_data_source_from_redshift(self, DataSourceId: str, DataSpec: Dict, RoleARN: str, DataSourceName: str = None, ComputeStatistics: bool = None) -> Dict:
"""
Creates a ``DataSource`` from a database hosted on an Amazon Redshift cluster. A ``DataSource`` references data that can be used to perform either ``CreateMLModel`` , ``CreateEvaluation`` , or ``CreateBatchPrediction`` operations.
``CreateDataSourceFromRedshift`` is an asynchronous operation. In response to ``CreateDataSourceFromRedshift`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the ``DataSource`` status to ``PENDING`` . After the ``DataSource`` is created and ready for use, Amazon ML sets the ``Status`` parameter to ``COMPLETED`` . ``DataSource`` in ``COMPLETED`` or ``PENDING`` states can be used to perform only ``CreateMLModel`` , ``CreateEvaluation`` , or ``CreateBatchPrediction`` operations.
If Amazon ML can't accept the input source, it sets the ``Status`` parameter to ``FAILED`` and includes an error message in the ``Message`` attribute of the ``GetDataSource`` operation response.
The observations should be contained in the database hosted on an Amazon Redshift cluster and should be specified by a ``SelectSqlQuery`` query. Amazon ML executes an ``Unload`` command in Amazon Redshift to transfer the result set of the ``SelectSqlQuery`` query to ``S3StagingLocation`` .
After the ``DataSource`` has been created, it's ready for use in evaluations and batch predictions. If you plan to use the ``DataSource`` to train an ``MLModel`` , the ``DataSource`` also requires a recipe. A recipe describes how each input variable will be used in training an ``MLModel`` . Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.
You can't change an existing datasource, but you can copy and modify the settings from an existing Amazon Redshift datasource to create a new datasource. To do so, call ``GetDataSource`` for an existing datasource and copy the values to a ``CreateDataSource`` call. Change the settings that you want to change and make sure that all required fields have the appropriate values.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateDataSourceFromRedshift>`_
**Request Syntax**
::
response = client.create_data_source_from_redshift(
DataSourceId='string',
DataSourceName='string',
DataSpec={
'DatabaseInformation': {
'DatabaseName': 'string',
'ClusterIdentifier': 'string'
},
'SelectSqlQuery': 'string',
'DatabaseCredentials': {
'Username': 'string',
'Password': 'string'
},
'S3StagingLocation': 'string',
'DataRearrangement': 'string',
'DataSchema': 'string',
'DataSchemaUri': 'string'
},
RoleARN='string',
ComputeStatistics=True|False
)
**Response Syntax**
::
{
'DataSourceId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateDataSourceFromRedshift`` operation, and is an acknowledgement that Amazon ML received the request.
The ``CreateDataSourceFromRedshift`` operation is asynchronous. You can poll for updates by using the ``GetBatchPrediction`` operation and checking the ``Status`` parameter.
- **DataSourceId** *(string) --*
A user-supplied ID that uniquely identifies the datasource. This value should be identical to the value of the ``DataSourceID`` in the request.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``DataSource`` .
:type DataSourceName: string
:param DataSourceName:
A user-supplied name or description of the ``DataSource`` .
:type DataSpec: dict
:param DataSpec: **[REQUIRED]**
The data specification of an Amazon Redshift ``DataSource`` :
* DatabaseInformation -
* ``DatabaseName`` - The name of the Amazon Redshift database.
* ``ClusterIdentifier`` - The unique ID for the Amazon Redshift cluster.
* DatabaseCredentials - The AWS Identity and Access Management (IAM) credentials that are used to connect to the Amazon Redshift database.
* SelectSqlQuery - The query that is used to retrieve the observation data for the ``Datasource`` .
* S3StagingLocation - The Amazon Simple Storage Service (Amazon S3) location for staging Amazon Redshift data. The data retrieved from Amazon Redshift using the ``SelectSqlQuery`` query is stored in this location.
* DataSchemaUri - The Amazon S3 location of the ``DataSchema`` .
* DataSchema - A JSON string representing the schema. This is not required if ``DataSchemaUri`` is specified.
* DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the ``DataSource`` . Sample - ``\"{\\"splitting\\":{\\"percentBegin\\":10,\\"percentEnd\\":60}}\"``
- **DatabaseInformation** *(dict) --* **[REQUIRED]**
Describes the ``DatabaseName`` and ``ClusterIdentifier`` for an Amazon Redshift ``DataSource`` .
- **DatabaseName** *(string) --* **[REQUIRED]**
The name of a database hosted on an Amazon Redshift cluster.
- **ClusterIdentifier** *(string) --* **[REQUIRED]**
The ID of an Amazon Redshift cluster.
- **SelectSqlQuery** *(string) --* **[REQUIRED]**
Describes the SQL Query to execute on an Amazon Redshift database for an Amazon Redshift ``DataSource`` .
- **DatabaseCredentials** *(dict) --* **[REQUIRED]**
Describes AWS Identity and Access Management (IAM) credentials that are used connect to the Amazon Redshift database.
- **Username** *(string) --* **[REQUIRED]**
A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the ``RedshiftSelectSqlQuery`` query. The username should be valid for an Amazon Redshift `USER <http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html>`__ .
- **Password** *(string) --* **[REQUIRED]**
A password to be used by Amazon ML to connect to a database on an Amazon Redshift cluster. The password should have sufficient permissions to execute a ``RedshiftSelectSqlQuery`` query. The password should be valid for an Amazon Redshift `USER <http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html>`__ .
- **S3StagingLocation** *(string) --* **[REQUIRED]**
Describes an Amazon S3 location to store the result set of the ``SelectSqlQuery`` query.
- **DataRearrangement** *(string) --*
A JSON string that represents the splitting and rearrangement processing to be applied to a ``DataSource`` . If the ``DataRearrangement`` parameter is not provided, all of the input data is used to create the ``Datasource`` .
There are multiple parameters that control what data is used to create a datasource:
* **``percentBegin``** Use ``percentBegin`` to indicate the beginning of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``percentEnd``** Use ``percentEnd`` to indicate the end of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``complement``** The ``complement`` parameter instructs Amazon ML to use the data that is not included in the range of ``percentBegin`` to ``percentEnd`` to create a datasource. The ``complement`` parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for ``percentBegin`` and ``percentEnd`` , along with the ``complement`` parameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25, \"complement\":\"true\"}}``
* **``strategy``** To change how Amazon ML splits the data for a datasource, use the ``strategy`` parameter. The default value for the ``strategy`` parameter is ``sequential`` , meaning that Amazon ML takes all of the data records between the ``percentBegin`` and ``percentEnd`` parameters for the datasource, in the order that the records appear in the input data. The following two ``DataRearrangement`` lines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\", \"complement\":\"true\"}}`` To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the ``strategy`` parameter to ``random`` and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between ``percentBegin`` and ``percentEnd`` . Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following two ``DataRearrangement`` lines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\", \"complement\":\"true\"}}``
- **DataSchema** *(string) --*
A JSON string that represents the schema for an Amazon Redshift ``DataSource`` . The ``DataSchema`` defines the structure of the observation data in the data file(s) referenced in the ``DataSource`` .
A ``DataSchema`` is not required if you specify a ``DataSchemaUri`` .
Define your ``DataSchema`` as a series of key-value pairs. ``attributes`` and ``excludedVariableNames`` have an array of key-value pairs for their value. Use the following format to define your ``DataSchema`` .
{ \"version\": \"1.0\",
\"recordAnnotationFieldName\": \"F1\",
\"recordWeightFieldName\": \"F2\",
\"targetFieldName\": \"F3\",
\"dataFormat\": \"CSV\",
\"dataFileContainsHeader\": true,
\"attributes\": [
{ \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],
\"excludedVariableNames\": [ \"F6\" ] }
- **DataSchemaUri** *(string) --*
Describes the schema location for an Amazon Redshift ``DataSource`` .
:type RoleARN: string
:param RoleARN: **[REQUIRED]**
A fully specified role Amazon Resource Name (ARN). Amazon ML assumes the role on behalf of the user to create the following:
* A security group to allow Amazon ML to execute the ``SelectSqlQuery`` query on an Amazon Redshift cluster
* An Amazon S3 bucket policy to grant Amazon ML read/write permissions on the ``S3StagingLocation``
:type ComputeStatistics: boolean
:param ComputeStatistics:
The compute statistics for a ``DataSource`` . The statistics are generated from the observation data referenced by a ``DataSource`` . Amazon ML uses the statistics internally during ``MLModel`` training. This parameter must be set to ``true`` if the ``DataSource`` needs to be used for ``MLModel`` training.
:rtype: dict
:returns:
"""
pass
def create_data_source_from_s3(self, DataSourceId: str, DataSpec: Dict, DataSourceName: str = None, ComputeStatistics: bool = None) -> Dict:
"""
Creates a ``DataSource`` object. A ``DataSource`` references data that can be used to perform ``CreateMLModel`` , ``CreateEvaluation`` , or ``CreateBatchPrediction`` operations.
``CreateDataSourceFromS3`` is an asynchronous operation. In response to ``CreateDataSourceFromS3`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the ``DataSource`` status to ``PENDING`` . After the ``DataSource`` has been created and is ready for use, Amazon ML sets the ``Status`` parameter to ``COMPLETED`` . ``DataSource`` in the ``COMPLETED`` or ``PENDING`` state can be used to perform only ``CreateMLModel`` , ``CreateEvaluation`` or ``CreateBatchPrediction`` operations.
If Amazon ML can't accept the input source, it sets the ``Status`` parameter to ``FAILED`` and includes an error message in the ``Message`` attribute of the ``GetDataSource`` operation response.
The observation data used in a ``DataSource`` should be ready to use; that is, it should have a consistent structure, and missing data values should be kept to a minimum. The observation data must reside in one or more .csv files in an Amazon Simple Storage Service (Amazon S3) location, along with a schema that describes the data items by name and type. The same schema must be used for all of the data files referenced by the ``DataSource`` .
After the ``DataSource`` has been created, it's ready to use in evaluations and batch predictions. If you plan to use the ``DataSource`` to train an ``MLModel`` , the ``DataSource`` also needs a recipe. A recipe describes how each input variable will be used in training an ``MLModel`` . Will the variable be included or excluded from training? Will the variable be manipulated; for example, will it be combined with another variable or will it be split apart into word combinations? The recipe provides answers to these questions.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateDataSourceFromS3>`_
**Request Syntax**
::
response = client.create_data_source_from_s3(
DataSourceId='string',
DataSourceName='string',
DataSpec={
'DataLocationS3': 'string',
'DataRearrangement': 'string',
'DataSchema': 'string',
'DataSchemaLocationS3': 'string'
},
ComputeStatistics=True|False
)
**Response Syntax**
::
{
'DataSourceId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateDataSourceFromS3`` operation, and is an acknowledgement that Amazon ML received the request.
The ``CreateDataSourceFromS3`` operation is asynchronous. You can poll for updates by using the ``GetBatchPrediction`` operation and checking the ``Status`` parameter.
- **DataSourceId** *(string) --*
A user-supplied ID that uniquely identifies the ``DataSource`` . This value should be identical to the value of the ``DataSourceID`` in the request.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
A user-supplied identifier that uniquely identifies the ``DataSource`` .
:type DataSourceName: string
:param DataSourceName:
A user-supplied name or description of the ``DataSource`` .
:type DataSpec: dict
:param DataSpec: **[REQUIRED]**
The data specification of a ``DataSource`` :
* DataLocationS3 - The Amazon S3 location of the observation data.
* DataSchemaLocationS3 - The Amazon S3 location of the ``DataSchema`` .
* DataSchema - A JSON string representing the schema. This is not required if ``DataSchemaUri`` is specified.
* DataRearrangement - A JSON string that represents the splitting and rearrangement requirements for the ``Datasource`` . Sample - ``\"{\\"splitting\\":{\\"percentBegin\\":10,\\"percentEnd\\":60}}\"``
- **DataLocationS3** *(string) --* **[REQUIRED]**
The location of the data file(s) used by a ``DataSource`` . The URI specifies a data file or an Amazon Simple Storage Service (Amazon S3) directory or bucket containing data files.
- **DataRearrangement** *(string) --*
A JSON string that represents the splitting and rearrangement processing to be applied to a ``DataSource`` . If the ``DataRearrangement`` parameter is not provided, all of the input data is used to create the ``Datasource`` .
There are multiple parameters that control what data is used to create a datasource:
* **``percentBegin``** Use ``percentBegin`` to indicate the beginning of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``percentEnd``** Use ``percentEnd`` to indicate the end of the range of the data used to create the Datasource. If you do not include ``percentBegin`` and ``percentEnd`` , Amazon ML includes all of the data when creating the datasource.
* **``complement``** The ``complement`` parameter instructs Amazon ML to use the data that is not included in the range of ``percentBegin`` to ``percentEnd`` to create a datasource. The ``complement`` parameter is useful if you need to create complementary datasources for training and evaluation. To create a complementary datasource, use the same values for ``percentBegin`` and ``percentEnd`` , along with the ``complement`` parameter. For example, the following two datasources do not share any data, and can be used to train and evaluate a model. The first datasource has 25 percent of the data, and the second one has 75 percent of the data. Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":0, \"percentEnd\":25, \"complement\":\"true\"}}``
* **``strategy``** To change how Amazon ML splits the data for a datasource, use the ``strategy`` parameter. The default value for the ``strategy`` parameter is ``sequential`` , meaning that Amazon ML takes all of the data records between the ``percentBegin`` and ``percentEnd`` parameters for the datasource, in the order that the records appear in the input data. The following two ``DataRearrangement`` lines are examples of sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"sequential\", \"complement\":\"true\"}}`` To randomly split the input data into the proportions indicated by the percentBegin and percentEnd parameters, set the ``strategy`` parameter to ``random`` and provide a string that is used as the seed value for the random data splitting (for example, you can use the S3 path to your data as the random seed string). If you choose the random split strategy, Amazon ML assigns each row of data a pseudo-random number between 0 and 100, and then selects the rows that have an assigned number between ``percentBegin`` and ``percentEnd`` . Pseudo-random numbers are assigned using both the input seed string value and the byte offset as a seed, so changing the data results in a different split. Any existing ordering is preserved. The random splitting strategy ensures that variables in the training and evaluation data are distributed similarly. It is useful in the cases where the input data may have an implicit sort order, which would otherwise result in training and evaluation datasources containing non-similar data records. The following two ``DataRearrangement`` lines are examples of non-sequentially ordered training and evaluation datasources: Datasource for evaluation: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\"}}`` Datasource for training: ``{\"splitting\":{\"percentBegin\":70, \"percentEnd\":100, \"strategy\":\"random\", \"randomSeed\"=\"s3://my_s3_path/bucket/file.csv\", \"complement\":\"true\"}}``
- **DataSchema** *(string) --*
A JSON string that represents the schema for an Amazon S3 ``DataSource`` . The ``DataSchema`` defines the structure of the observation data in the data file(s) referenced in the ``DataSource`` .
You must provide either the ``DataSchema`` or the ``DataSchemaLocationS3`` .
Define your ``DataSchema`` as a series of key-value pairs. ``attributes`` and ``excludedVariableNames`` have an array of key-value pairs for their value. Use the following format to define your ``DataSchema`` .
{ \"version\": \"1.0\",
\"recordAnnotationFieldName\": \"F1\",
\"recordWeightFieldName\": \"F2\",
\"targetFieldName\": \"F3\",
\"dataFormat\": \"CSV\",
\"dataFileContainsHeader\": true,
\"attributes\": [
{ \"fieldName\": \"F1\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F2\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F3\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F4\", \"fieldType\": \"NUMERIC\" }, { \"fieldName\": \"F5\", \"fieldType\": \"CATEGORICAL\" }, { \"fieldName\": \"F6\", \"fieldType\": \"TEXT\" }, { \"fieldName\": \"F7\", \"fieldType\": \"WEIGHTED_INT_SEQUENCE\" }, { \"fieldName\": \"F8\", \"fieldType\": \"WEIGHTED_STRING_SEQUENCE\" } ],
\"excludedVariableNames\": [ \"F6\" ] }
- **DataSchemaLocationS3** *(string) --*
Describes the schema location in Amazon S3. You must provide either the ``DataSchema`` or the ``DataSchemaLocationS3`` .
:type ComputeStatistics: boolean
:param ComputeStatistics:
The compute statistics for a ``DataSource`` . The statistics are generated from the observation data referenced by a ``DataSource`` . Amazon ML uses the statistics internally during ``MLModel`` training. This parameter must be set to ``true`` if the DataSourceneeds to be used for ``MLModel`` training.
:rtype: dict
:returns:
"""
pass
def create_evaluation(self, EvaluationId: str, MLModelId: str, EvaluationDataSourceId: str, EvaluationName: str = None) -> Dict:
"""
Creates a new ``Evaluation`` of an ``MLModel`` . An ``MLModel`` is evaluated on a set of observations associated to a ``DataSource`` . Like a ``DataSource`` for an ``MLModel`` , the ``DataSource`` for an ``Evaluation`` contains values for the ``Target Variable`` . The ``Evaluation`` compares the predicted result for each observation to the actual outcome and provides a summary so that you know how effective the ``MLModel`` functions on the test data. Evaluation generates a relevant performance metric, such as BinaryAUC, RegressionRMSE or MulticlassAvgFScore based on the corresponding ``MLModelType`` : ``BINARY`` , ``REGRESSION`` or ``MULTICLASS`` .
``CreateEvaluation`` is an asynchronous operation. In response to ``CreateEvaluation`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the evaluation status to ``PENDING`` . After the ``Evaluation`` is created and ready for use, Amazon ML sets the status to ``COMPLETED`` .
You can use the ``GetEvaluation`` operation to check progress of the evaluation during the creation operation.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateEvaluation>`_
**Request Syntax**
::
response = client.create_evaluation(
EvaluationId='string',
EvaluationName='string',
MLModelId='string',
EvaluationDataSourceId='string'
)
**Response Syntax**
::
{
'EvaluationId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateEvaluation`` operation, and is an acknowledgement that Amazon ML received the request.
``CreateEvaluation`` operation is asynchronous. You can poll for status updates by using the ``GetEvcaluation`` operation and checking the ``Status`` parameter.
- **EvaluationId** *(string) --*
The user-supplied ID that uniquely identifies the ``Evaluation`` . This value should be identical to the value of the ``EvaluationId`` in the request.
:type EvaluationId: string
:param EvaluationId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``Evaluation`` .
:type EvaluationName: string
:param EvaluationName:
A user-supplied name or description of the ``Evaluation`` .
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID of the ``MLModel`` to evaluate.
The schema used in creating the ``MLModel`` must match the schema of the ``DataSource`` used in the ``Evaluation`` .
:type EvaluationDataSourceId: string
:param EvaluationDataSourceId: **[REQUIRED]**
The ID of the ``DataSource`` for the evaluation. The schema of the ``DataSource`` must match the schema used to create the ``MLModel`` .
:rtype: dict
:returns:
"""
pass
def create_ml_model(self, MLModelId: str, MLModelType: str, TrainingDataSourceId: str, MLModelName: str = None, Parameters: Dict = None, Recipe: str = None, RecipeUri: str = None) -> Dict:
"""
Creates a new ``MLModel`` using the ``DataSource`` and the recipe as information sources.
An ``MLModel`` is nearly immutable. Users can update only the ``MLModelName`` and the ``ScoreThreshold`` in an ``MLModel`` without creating a new ``MLModel`` .
``CreateMLModel`` is an asynchronous operation. In response to ``CreateMLModel`` , Amazon Machine Learning (Amazon ML) immediately returns and sets the ``MLModel`` status to ``PENDING`` . After the ``MLModel`` has been created and ready is for use, Amazon ML sets the status to ``COMPLETED`` .
You can use the ``GetMLModel`` operation to check the progress of the ``MLModel`` during the creation operation.
``CreateMLModel`` requires a ``DataSource`` with computed statistics, which can be created by setting ``ComputeStatistics`` to ``true`` in ``CreateDataSourceFromRDS`` , ``CreateDataSourceFromS3`` , or ``CreateDataSourceFromRedshift`` operations.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateMLModel>`_
**Request Syntax**
::
response = client.create_ml_model(
MLModelId='string',
MLModelName='string',
MLModelType='REGRESSION'|'BINARY'|'MULTICLASS',
Parameters={
'string': 'string'
},
TrainingDataSourceId='string',
Recipe='string',
RecipeUri='string'
)
**Response Syntax**
::
{
'MLModelId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``CreateMLModel`` operation, and is an acknowledgement that Amazon ML received the request.
The ``CreateMLModel`` operation is asynchronous. You can poll for status updates by using the ``GetMLModel`` operation and checking the ``Status`` parameter.
- **MLModelId** *(string) --*
A user-supplied ID that uniquely identifies the ``MLModel`` . This value should be identical to the value of the ``MLModelId`` in the request.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``MLModel`` .
:type MLModelName: string
:param MLModelName:
A user-supplied name or description of the ``MLModel`` .
:type MLModelType: string
:param MLModelType: **[REQUIRED]**
The category of supervised learning that this ``MLModel`` will address. Choose from the following types:
* Choose ``REGRESSION`` if the ``MLModel`` will be used to predict a numeric value.
* Choose ``BINARY`` if the ``MLModel`` result has two possible values.
* Choose ``MULTICLASS`` if the ``MLModel`` result has a limited number of values.
For more information, see the `Amazon Machine Learning Developer Guide <http://docs.aws.amazon.com/machine-learning/latest/dg>`__ .
:type Parameters: dict
:param Parameters:
A list of the training parameters in the ``MLModel`` . The list is implemented as a map of key-value pairs.
The following is the current set of training parameters:
* ``sgd.maxMLModelSizeInBytes`` - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance. The value is an integer that ranges from ``100000`` to ``2147483648`` . The default value is ``33554432`` .
* ``sgd.maxPasses`` - The number of times that the training process traverses the observations to build the ``MLModel`` . The value is an integer that ranges from ``1`` to ``10000`` . The default value is ``10`` .
* ``sgd.shuffleType`` - Whether Amazon ML shuffles the training data. Shuffling the data improves a model\'s ability to find the optimal solution for a variety of data types. The valid values are ``auto`` and ``none`` . The default value is ``none`` . We strongly recommend that you shuffle your data.
* ``sgd.l1RegularizationAmount`` - The coefficient regularization L1 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to zero, resulting in a sparse feature set. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L1 normalization. This parameter can\'t be used when ``L2`` is specified. Use this parameter sparingly.
* ``sgd.l2RegularizationAmount`` - The coefficient regularization L2 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L2 normalization. This parameter can\'t be used when ``L1`` is specified. Use this parameter sparingly.
- *(string) --*
String type.
- *(string) --*
String type.
:type TrainingDataSourceId: string
:param TrainingDataSourceId: **[REQUIRED]**
The ``DataSource`` that points to the training data.
:type Recipe: string
:param Recipe:
The data recipe for creating the ``MLModel`` . You must specify either the recipe or its URI. If you don\'t specify a recipe or its URI, Amazon ML creates a default.
:type RecipeUri: string
:param RecipeUri:
The Amazon Simple Storage Service (Amazon S3) location and file name that contains the ``MLModel`` recipe. You must specify either the recipe or its URI. If you don\'t specify a recipe or its URI, Amazon ML creates a default.
:rtype: dict
:returns:
"""
pass
def create_realtime_endpoint(self, MLModelId: str) -> Dict:
"""
Creates a real-time endpoint for the ``MLModel`` . The endpoint contains the URI of the ``MLModel`` ; that is, the location to send real-time prediction requests for the specified ``MLModel`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/CreateRealtimeEndpoint>`_
**Request Syntax**
::
response = client.create_realtime_endpoint(
MLModelId='string'
)
**Response Syntax**
::
{
'MLModelId': 'string',
'RealtimeEndpointInfo': {
'PeakRequestsPerSecond': 123,
'CreatedAt': datetime(2015, 1, 1),
'EndpointUrl': 'string',
'EndpointStatus': 'NONE'|'READY'|'UPDATING'|'FAILED'
}
}
**Response Structure**
- *(dict) --*
Represents the output of an ``CreateRealtimeEndpoint`` operation.
The result contains the ``MLModelId`` and the endpoint information for the ``MLModel`` .
.. note::
The endpoint information includes the URI of the ``MLModel`` ; that is, the location to send online prediction requests for the specified ``MLModel`` .
- **MLModelId** *(string) --*
A user-supplied ID that uniquely identifies the ``MLModel`` . This value should be identical to the value of the ``MLModelId`` in the request.
- **RealtimeEndpointInfo** *(dict) --*
The endpoint information of the ``MLModel``
- **PeakRequestsPerSecond** *(integer) --*
The maximum processing rate for the real-time endpoint for ``MLModel`` , measured in incoming requests per second.
- **CreatedAt** *(datetime) --*
The time that the request to create the real-time endpoint for the ``MLModel`` was received. The time is expressed in epoch time.
- **EndpointUrl** *(string) --*
The URI that specifies where to send real-time prediction requests for the ``MLModel`` .
.. note::
Note
The application must wait until the real-time endpoint is ready before using this URI.
- **EndpointStatus** *(string) --*
The current status of the real-time endpoint for the ``MLModel`` . This element can have one of the following values:
* ``NONE`` - Endpoint does not exist or was previously deleted.
* ``READY`` - Endpoint is ready to be used for real-time predictions.
* ``UPDATING`` - Updating/creating the endpoint.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID assigned to the ``MLModel`` during creation.
:rtype: dict
:returns:
"""
pass
def delete_batch_prediction(self, BatchPredictionId: str) -> Dict:
"""
Assigns the DELETED status to a ``BatchPrediction`` , rendering it unusable.
After using the ``DeleteBatchPrediction`` operation, you can use the GetBatchPrediction operation to verify that the status of the ``BatchPrediction`` changed to DELETED.
**Caution:** The result of the ``DeleteBatchPrediction`` operation is irreversible.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteBatchPrediction>`_
**Request Syntax**
::
response = client.delete_batch_prediction(
BatchPredictionId='string'
)
**Response Syntax**
::
{
'BatchPredictionId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DeleteBatchPrediction`` operation.
You can use the ``GetBatchPrediction`` operation and check the value of the ``Status`` parameter to see whether a ``BatchPrediction`` is marked as ``DELETED`` .
- **BatchPredictionId** *(string) --*
A user-supplied ID that uniquely identifies the ``BatchPrediction`` . This value should be identical to the value of the ``BatchPredictionID`` in the request.
:type BatchPredictionId: string
:param BatchPredictionId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``BatchPrediction`` .
:rtype: dict
:returns:
"""
pass
def delete_data_source(self, DataSourceId: str) -> Dict:
"""
Assigns the DELETED status to a ``DataSource`` , rendering it unusable.
After using the ``DeleteDataSource`` operation, you can use the GetDataSource operation to verify that the status of the ``DataSource`` changed to DELETED.
**Caution:** The results of the ``DeleteDataSource`` operation are irreversible.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteDataSource>`_
**Request Syntax**
::
response = client.delete_data_source(
DataSourceId='string'
)
**Response Syntax**
::
{
'DataSourceId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DeleteDataSource`` operation.
- **DataSourceId** *(string) --*
A user-supplied ID that uniquely identifies the ``DataSource`` . This value should be identical to the value of the ``DataSourceID`` in the request.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``DataSource`` .
:rtype: dict
:returns:
"""
pass
def delete_evaluation(self, EvaluationId: str) -> Dict:
"""
Assigns the ``DELETED`` status to an ``Evaluation`` , rendering it unusable.
After invoking the ``DeleteEvaluation`` operation, you can use the ``GetEvaluation`` operation to verify that the status of the ``Evaluation`` changed to ``DELETED`` .
Caution
The results of the ``DeleteEvaluation`` operation are irreversible.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteEvaluation>`_
**Request Syntax**
::
response = client.delete_evaluation(
EvaluationId='string'
)
**Response Syntax**
::
{
'EvaluationId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DeleteEvaluation`` operation. The output indicates that Amazon Machine Learning (Amazon ML) received the request.
You can use the ``GetEvaluation`` operation and check the value of the ``Status`` parameter to see whether an ``Evaluation`` is marked as ``DELETED`` .
- **EvaluationId** *(string) --*
A user-supplied ID that uniquely identifies the ``Evaluation`` . This value should be identical to the value of the ``EvaluationId`` in the request.
:type EvaluationId: string
:param EvaluationId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``Evaluation`` to delete.
:rtype: dict
:returns:
"""
pass
def delete_ml_model(self, MLModelId: str) -> Dict:
"""
Assigns the ``DELETED`` status to an ``MLModel`` , rendering it unusable.
After using the ``DeleteMLModel`` operation, you can use the ``GetMLModel`` operation to verify that the status of the ``MLModel`` changed to DELETED.
**Caution:** The result of the ``DeleteMLModel`` operation is irreversible.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteMLModel>`_
**Request Syntax**
::
response = client.delete_ml_model(
MLModelId='string'
)
**Response Syntax**
::
{
'MLModelId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DeleteMLModel`` operation.
You can use the ``GetMLModel`` operation and check the value of the ``Status`` parameter to see whether an ``MLModel`` is marked as ``DELETED`` .
- **MLModelId** *(string) --*
A user-supplied ID that uniquely identifies the ``MLModel`` . This value should be identical to the value of the ``MLModelID`` in the request.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
A user-supplied ID that uniquely identifies the ``MLModel`` .
:rtype: dict
:returns:
"""
pass
def delete_realtime_endpoint(self, MLModelId: str) -> Dict:
"""
Deletes a real time endpoint of an ``MLModel`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteRealtimeEndpoint>`_
**Request Syntax**
::
response = client.delete_realtime_endpoint(
MLModelId='string'
)
**Response Syntax**
::
{
'MLModelId': 'string',
'RealtimeEndpointInfo': {
'PeakRequestsPerSecond': 123,
'CreatedAt': datetime(2015, 1, 1),
'EndpointUrl': 'string',
'EndpointStatus': 'NONE'|'READY'|'UPDATING'|'FAILED'
}
}
**Response Structure**
- *(dict) --*
Represents the output of an ``DeleteRealtimeEndpoint`` operation.
The result contains the ``MLModelId`` and the endpoint information for the ``MLModel`` .
- **MLModelId** *(string) --*
A user-supplied ID that uniquely identifies the ``MLModel`` . This value should be identical to the value of the ``MLModelId`` in the request.
- **RealtimeEndpointInfo** *(dict) --*
The endpoint information of the ``MLModel``
- **PeakRequestsPerSecond** *(integer) --*
The maximum processing rate for the real-time endpoint for ``MLModel`` , measured in incoming requests per second.
- **CreatedAt** *(datetime) --*
The time that the request to create the real-time endpoint for the ``MLModel`` was received. The time is expressed in epoch time.
- **EndpointUrl** *(string) --*
The URI that specifies where to send real-time prediction requests for the ``MLModel`` .
.. note::
Note
The application must wait until the real-time endpoint is ready before using this URI.
- **EndpointStatus** *(string) --*
The current status of the real-time endpoint for the ``MLModel`` . This element can have one of the following values:
* ``NONE`` - Endpoint does not exist or was previously deleted.
* ``READY`` - Endpoint is ready to be used for real-time predictions.
* ``UPDATING`` - Updating/creating the endpoint.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID assigned to the ``MLModel`` during creation.
:rtype: dict
:returns:
"""
pass
def delete_tags(self, TagKeys: List, ResourceId: str, ResourceType: str) -> Dict:
"""
Deletes the specified tags associated with an ML object. After this operation is complete, you can't recover deleted tags.
If you specify a tag that doesn't exist, Amazon ML ignores it.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DeleteTags>`_
**Request Syntax**
::
response = client.delete_tags(
TagKeys=[
'string',
],
ResourceId='string',
ResourceType='BatchPrediction'|'DataSource'|'Evaluation'|'MLModel'
)
**Response Syntax**
::
{
'ResourceId': 'string',
'ResourceType': 'BatchPrediction'|'DataSource'|'Evaluation'|'MLModel'
}
**Response Structure**
- *(dict) --*
Amazon ML returns the following elements.
- **ResourceId** *(string) --*
The ID of the ML object from which tags were deleted.
- **ResourceType** *(string) --*
The type of the ML object from which tags were deleted.
:type TagKeys: list
:param TagKeys: **[REQUIRED]**
One or more tags to delete.
- *(string) --*
:type ResourceId: string
:param ResourceId: **[REQUIRED]**
The ID of the tagged ML object. For example, ``exampleModelId`` .
:type ResourceType: string
:param ResourceType: **[REQUIRED]**
The type of the tagged ML object.
:rtype: dict
:returns:
"""
pass
def describe_batch_predictions(self, FilterVariable: str = None, EQ: str = None, GT: str = None, LT: str = None, GE: str = None, LE: str = None, NE: str = None, Prefix: str = None, SortOrder: str = None, NextToken: str = None, Limit: int = None) -> Dict:
"""
Returns a list of ``BatchPrediction`` operations that match the search criteria in the request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DescribeBatchPredictions>`_
**Request Syntax**
::
response = client.describe_batch_predictions(
FilterVariable='CreatedAt'|'LastUpdatedAt'|'Status'|'Name'|'IAMUser'|'MLModelId'|'DataSourceId'|'DataURI',
EQ='string',
GT='string',
LT='string',
GE='string',
LE='string',
NE='string',
Prefix='string',
SortOrder='asc'|'dsc',
NextToken='string',
Limit=123
)
**Response Syntax**
::
{
'Results': [
{
'BatchPredictionId': 'string',
'MLModelId': 'string',
'BatchPredictionDataSourceId': 'string',
'InputDataLocationS3': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'OutputUri': 'string',
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1),
'TotalRecordCount': 123,
'InvalidRecordCount': 123
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DescribeBatchPredictions`` operation. The content is essentially a list of ``BatchPrediction`` s.
- **Results** *(list) --*
A list of ``BatchPrediction`` objects that meet the search criteria.
- *(dict) --*
Represents the output of a ``GetBatchPrediction`` operation.
The content consists of the detailed metadata, the status, and the data file information of a ``Batch Prediction`` .
- **BatchPredictionId** *(string) --*
The ID assigned to the ``BatchPrediction`` at creation. This value should be identical to the value of the ``BatchPredictionID`` in the request.
- **MLModelId** *(string) --*
The ID of the ``MLModel`` that generated predictions for the ``BatchPrediction`` request.
- **BatchPredictionDataSourceId** *(string) --*
The ID of the ``DataSource`` that points to the group of observations to predict.
- **InputDataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **CreatedByIamUser** *(string) --*
The AWS user account that invoked the ``BatchPrediction`` . The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``BatchPrediction`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``BatchPrediction`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``BatchPrediction`` .
- **Status** *(string) --*
The status of the ``BatchPrediction`` . This element can have one of the following values:
* ``PENDING`` - Amazon Machine Learning (Amazon ML) submitted a request to generate predictions for a batch of observations.
* ``INPROGRESS`` - The process is underway.
* ``FAILED`` - The request to perform a batch prediction did not run to completion. It is not usable.
* ``COMPLETED`` - The batch prediction process completed successfully.
* ``DELETED`` - The ``BatchPrediction`` is marked as deleted. It is not usable.
- **OutputUri** *(string) --*
The location of an Amazon S3 bucket or directory to receive the operation results. The following substrings are not allowed in the ``s3 key`` portion of the ``outputURI`` field: ':', '//', '/./', '/../'.
- **Message** *(string) --*
A description of the most recent details about processing the batch prediction request.
- **ComputeTime** *(integer) --*
Long integer type that is a 64-bit signed number.
- **FinishedAt** *(datetime) --*
A timestamp represented in epoch time.
- **StartedAt** *(datetime) --*
A timestamp represented in epoch time.
- **TotalRecordCount** *(integer) --*
Long integer type that is a 64-bit signed number.
- **InvalidRecordCount** *(integer) --*
Long integer type that is a 64-bit signed number.
- **NextToken** *(string) --*
The ID of the next page in the paginated results that indicates at least one more page follows.
:type FilterVariable: string
:param FilterVariable:
Use one of the following variables to filter a list of ``BatchPrediction`` :
* ``CreatedAt`` - Sets the search criteria to the ``BatchPrediction`` creation date.
* ``Status`` - Sets the search criteria to the ``BatchPrediction`` status.
* ``Name`` - Sets the search criteria to the contents of the ``BatchPrediction`` **** ``Name`` .
* ``IAMUser`` - Sets the search criteria to the user account that invoked the ``BatchPrediction`` creation.
* ``MLModelId`` - Sets the search criteria to the ``MLModel`` used in the ``BatchPrediction`` .
* ``DataSourceId`` - Sets the search criteria to the ``DataSource`` used in the ``BatchPrediction`` .
* ``DataURI`` - Sets the search criteria to the data file(s) used in the ``BatchPrediction`` . The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory.
:type EQ: string
:param EQ:
The equal to operator. The ``BatchPrediction`` results will have ``FilterVariable`` values that exactly match the value specified with ``EQ`` .
:type GT: string
:param GT:
The greater than operator. The ``BatchPrediction`` results will have ``FilterVariable`` values that are greater than the value specified with ``GT`` .
:type LT: string
:param LT:
The less than operator. The ``BatchPrediction`` results will have ``FilterVariable`` values that are less than the value specified with ``LT`` .
:type GE: string
:param GE:
The greater than or equal to operator. The ``BatchPrediction`` results will have ``FilterVariable`` values that are greater than or equal to the value specified with ``GE`` .
:type LE: string
:param LE:
The less than or equal to operator. The ``BatchPrediction`` results will have ``FilterVariable`` values that are less than or equal to the value specified with ``LE`` .
:type NE: string
:param NE:
The not equal to operator. The ``BatchPrediction`` results will have ``FilterVariable`` values not equal to the value specified with ``NE`` .
:type Prefix: string
:param Prefix:
A string that is found at the beginning of a variable, such as ``Name`` or ``Id`` .
For example, a ``Batch Prediction`` operation could have the ``Name`` ``2014-09-09-HolidayGiftMailer`` . To search for this ``BatchPrediction`` , select ``Name`` for the ``FilterVariable`` and any of the following strings for the ``Prefix`` :
* 2014-09
* 2014-09-09
* 2014-09-09-Holiday
:type SortOrder: string
:param SortOrder:
A two-value parameter that determines the sequence of the resulting list of ``MLModel`` s.
* ``asc`` - Arranges the list in ascending order (A-Z, 0-9).
* ``dsc`` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by ``FilterVariable`` .
:type NextToken: string
:param NextToken:
An ID of the page in the paginated results.
:type Limit: integer
:param Limit:
The number of pages of information to include in the result. The range of acceptable values is ``1`` through ``100`` . The default value is ``100`` .
:rtype: dict
:returns:
"""
pass
def describe_data_sources(self, FilterVariable: str = None, EQ: str = None, GT: str = None, LT: str = None, GE: str = None, LE: str = None, NE: str = None, Prefix: str = None, SortOrder: str = None, NextToken: str = None, Limit: int = None) -> Dict:
"""
Returns a list of ``DataSource`` that match the search criteria in the request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DescribeDataSources>`_
**Request Syntax**
::
response = client.describe_data_sources(
FilterVariable='CreatedAt'|'LastUpdatedAt'|'Status'|'Name'|'DataLocationS3'|'IAMUser',
EQ='string',
GT='string',
LT='string',
GE='string',
LE='string',
NE='string',
Prefix='string',
SortOrder='asc'|'dsc',
NextToken='string',
Limit=123
)
**Response Syntax**
::
{
'Results': [
{
'DataSourceId': 'string',
'DataLocationS3': 'string',
'DataRearrangement': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'DataSizeInBytes': 123,
'NumberOfFiles': 123,
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'Message': 'string',
'RedshiftMetadata': {
'RedshiftDatabase': {
'DatabaseName': 'string',
'ClusterIdentifier': 'string'
},
'DatabaseUserName': 'string',
'SelectSqlQuery': 'string'
},
'RDSMetadata': {
'Database': {
'InstanceIdentifier': 'string',
'DatabaseName': 'string'
},
'DatabaseUserName': 'string',
'SelectSqlQuery': 'string',
'ResourceRole': 'string',
'ServiceRole': 'string',
'DataPipelineId': 'string'
},
'RoleARN': 'string',
'ComputeStatistics': True|False,
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Represents the query results from a DescribeDataSources operation. The content is essentially a list of ``DataSource`` .
- **Results** *(list) --*
A list of ``DataSource`` that meet the search criteria.
- *(dict) --*
Represents the output of the ``GetDataSource`` operation.
The content consists of the detailed metadata and data file information and the current status of the ``DataSource`` .
- **DataSourceId** *(string) --*
The ID that is assigned to the ``DataSource`` during creation.
- **DataLocationS3** *(string) --*
The location and name of the data in Amazon Simple Storage Service (Amazon S3) that is used by a ``DataSource`` .
- **DataRearrangement** *(string) --*
A JSON string that represents the splitting and rearrangement requirement used when this ``DataSource`` was created.
- **CreatedByIamUser** *(string) --*
The AWS user account from which the ``DataSource`` was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``DataSource`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``BatchPrediction`` . The time is expressed in epoch time.
- **DataSizeInBytes** *(integer) --*
The total number of observations contained in the data files that the ``DataSource`` references.
- **NumberOfFiles** *(integer) --*
The number of data files referenced by the ``DataSource`` .
- **Name** *(string) --*
A user-supplied name or description of the ``DataSource`` .
- **Status** *(string) --*
The current status of the ``DataSource`` . This element can have one of the following values:
* PENDING - Amazon Machine Learning (Amazon ML) submitted a request to create a ``DataSource`` .
* INPROGRESS - The creation process is underway.
* FAILED - The request to create a ``DataSource`` did not run to completion. It is not usable.
* COMPLETED - The creation process completed successfully.
* DELETED - The ``DataSource`` is marked as deleted. It is not usable.
- **Message** *(string) --*
A description of the most recent details about creating the ``DataSource`` .
- **RedshiftMetadata** *(dict) --*
Describes the ``DataSource`` details specific to Amazon Redshift.
- **RedshiftDatabase** *(dict) --*
Describes the database details required to connect to an Amazon Redshift database.
- **DatabaseName** *(string) --*
The name of a database hosted on an Amazon Redshift cluster.
- **ClusterIdentifier** *(string) --*
The ID of an Amazon Redshift cluster.
- **DatabaseUserName** *(string) --*
A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the ``RedshiftSelectSqlQuery`` query. The username should be valid for an Amazon Redshift `USER <http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html>`__ .
- **SelectSqlQuery** *(string) --*
The SQL query that is specified during CreateDataSourceFromRedshift . Returns only if ``Verbose`` is true in GetDataSourceInput.
- **RDSMetadata** *(dict) --*
The datasource details that are specific to Amazon RDS.
- **Database** *(dict) --*
The database details required to connect to an Amazon RDS.
- **InstanceIdentifier** *(string) --*
The ID of an RDS DB instance.
- **DatabaseName** *(string) --*
The name of a database hosted on an RDS DB instance.
- **DatabaseUserName** *(string) --*
The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an ``RDSSelectSqlQuery`` query.
- **SelectSqlQuery** *(string) --*
The SQL query that is supplied during CreateDataSourceFromRDS . Returns only if ``Verbose`` is true in ``GetDataSourceInput`` .
- **ResourceRole** *(string) --*
The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **ServiceRole** *(string) --*
The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **DataPipelineId** *(string) --*
The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.
- **RoleARN** *(string) --*
The Amazon Resource Name (ARN) of an `AWS IAM Role <http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts>`__ , such as the following: arn:aws:iam::account:role/rolename.
- **ComputeStatistics** *(boolean) --*
The parameter is ``true`` if statistics need to be generated from the observation data.
- **ComputeTime** *(integer) --*
Long integer type that is a 64-bit signed number.
- **FinishedAt** *(datetime) --*
A timestamp represented in epoch time.
- **StartedAt** *(datetime) --*
A timestamp represented in epoch time.
- **NextToken** *(string) --*
An ID of the next page in the paginated results that indicates at least one more page follows.
:type FilterVariable: string
:param FilterVariable:
Use one of the following variables to filter a list of ``DataSource`` :
* ``CreatedAt`` - Sets the search criteria to ``DataSource`` creation dates.
* ``Status`` - Sets the search criteria to ``DataSource`` statuses.
* ``Name`` - Sets the search criteria to the contents of ``DataSource`` **** ``Name`` .
* ``DataUri`` - Sets the search criteria to the URI of data files used to create the ``DataSource`` . The URI can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
* ``IAMUser`` - Sets the search criteria to the user account that invoked the ``DataSource`` creation.
:type EQ: string
:param EQ:
The equal to operator. The ``DataSource`` results will have ``FilterVariable`` values that exactly match the value specified with ``EQ`` .
:type GT: string
:param GT:
The greater than operator. The ``DataSource`` results will have ``FilterVariable`` values that are greater than the value specified with ``GT`` .
:type LT: string
:param LT:
The less than operator. The ``DataSource`` results will have ``FilterVariable`` values that are less than the value specified with ``LT`` .
:type GE: string
:param GE:
The greater than or equal to operator. The ``DataSource`` results will have ``FilterVariable`` values that are greater than or equal to the value specified with ``GE`` .
:type LE: string
:param LE:
The less than or equal to operator. The ``DataSource`` results will have ``FilterVariable`` values that are less than or equal to the value specified with ``LE`` .
:type NE: string
:param NE:
The not equal to operator. The ``DataSource`` results will have ``FilterVariable`` values not equal to the value specified with ``NE`` .
:type Prefix: string
:param Prefix:
A string that is found at the beginning of a variable, such as ``Name`` or ``Id`` .
For example, a ``DataSource`` could have the ``Name`` ``2014-09-09-HolidayGiftMailer`` . To search for this ``DataSource`` , select ``Name`` for the ``FilterVariable`` and any of the following strings for the ``Prefix`` :
* 2014-09
* 2014-09-09
* 2014-09-09-Holiday
:type SortOrder: string
:param SortOrder:
A two-value parameter that determines the sequence of the resulting list of ``DataSource`` .
* ``asc`` - Arranges the list in ascending order (A-Z, 0-9).
* ``dsc`` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by ``FilterVariable`` .
:type NextToken: string
:param NextToken:
The ID of the page in the paginated results.
:type Limit: integer
:param Limit:
The maximum number of ``DataSource`` to include in the result.
:rtype: dict
:returns:
"""
pass
def describe_evaluations(self, FilterVariable: str = None, EQ: str = None, GT: str = None, LT: str = None, GE: str = None, LE: str = None, NE: str = None, Prefix: str = None, SortOrder: str = None, NextToken: str = None, Limit: int = None) -> Dict:
"""
Returns a list of ``DescribeEvaluations`` that match the search criteria in the request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DescribeEvaluations>`_
**Request Syntax**
::
response = client.describe_evaluations(
FilterVariable='CreatedAt'|'LastUpdatedAt'|'Status'|'Name'|'IAMUser'|'MLModelId'|'DataSourceId'|'DataURI',
EQ='string',
GT='string',
LT='string',
GE='string',
LE='string',
NE='string',
Prefix='string',
SortOrder='asc'|'dsc',
NextToken='string',
Limit=123
)
**Response Syntax**
::
{
'Results': [
{
'EvaluationId': 'string',
'MLModelId': 'string',
'EvaluationDataSourceId': 'string',
'InputDataLocationS3': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'PerformanceMetrics': {
'Properties': {
'string': 'string'
}
},
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Represents the query results from a ``DescribeEvaluations`` operation. The content is essentially a list of ``Evaluation`` .
- **Results** *(list) --*
A list of ``Evaluation`` that meet the search criteria.
- *(dict) --*
Represents the output of ``GetEvaluation`` operation.
The content consists of the detailed metadata and data file information and the current status of the ``Evaluation`` .
- **EvaluationId** *(string) --*
The ID that is assigned to the ``Evaluation`` at creation.
- **MLModelId** *(string) --*
The ID of the ``MLModel`` that is the focus of the evaluation.
- **EvaluationDataSourceId** *(string) --*
The ID of the ``DataSource`` that is used to evaluate the ``MLModel`` .
- **InputDataLocationS3** *(string) --*
The location and name of the data in Amazon Simple Storage Server (Amazon S3) that is used in the evaluation.
- **CreatedByIamUser** *(string) --*
The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``Evaluation`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``Evaluation`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``Evaluation`` .
- **Status** *(string) --*
The status of the evaluation. This element can have one of the following values:
* ``PENDING`` - Amazon Machine Learning (Amazon ML) submitted a request to evaluate an ``MLModel`` .
* ``INPROGRESS`` - The evaluation is underway.
* ``FAILED`` - The request to evaluate an ``MLModel`` did not run to completion. It is not usable.
* ``COMPLETED`` - The evaluation process completed successfully.
* ``DELETED`` - The ``Evaluation`` is marked as deleted. It is not usable.
- **PerformanceMetrics** *(dict) --*
Measurements of how well the ``MLModel`` performed, using observations referenced by the ``DataSource`` . One of the following metrics is returned, based on the type of the ``MLModel`` :
* BinaryAUC: A binary ``MLModel`` uses the Area Under the Curve (AUC) technique to measure performance.
* RegressionRMSE: A regression ``MLModel`` uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.
* MulticlassAvgFScore: A multiclass ``MLModel`` uses the F1 score technique to measure performance.
For more information about performance metrics, please see the `Amazon Machine Learning Developer Guide <http://docs.aws.amazon.com/machine-learning/latest/dg>`__ .
- **Properties** *(dict) --*
- *(string) --*
- *(string) --*
- **Message** *(string) --*
A description of the most recent details about evaluating the ``MLModel`` .
- **ComputeTime** *(integer) --*
Long integer type that is a 64-bit signed number.
- **FinishedAt** *(datetime) --*
A timestamp represented in epoch time.
- **StartedAt** *(datetime) --*
A timestamp represented in epoch time.
- **NextToken** *(string) --*
The ID of the next page in the paginated results that indicates at least one more page follows.
:type FilterVariable: string
:param FilterVariable:
Use one of the following variable to filter a list of ``Evaluation`` objects:
* ``CreatedAt`` - Sets the search criteria to the ``Evaluation`` creation date.
* ``Status`` - Sets the search criteria to the ``Evaluation`` status.
* ``Name`` - Sets the search criteria to the contents of ``Evaluation`` **** ``Name`` .
* ``IAMUser`` - Sets the search criteria to the user account that invoked an ``Evaluation`` .
* ``MLModelId`` - Sets the search criteria to the ``MLModel`` that was evaluated.
* ``DataSourceId`` - Sets the search criteria to the ``DataSource`` used in ``Evaluation`` .
* ``DataUri`` - Sets the search criteria to the data file(s) used in ``Evaluation`` . The URL can identify either a file or an Amazon Simple Storage Solution (Amazon S3) bucket or directory.
:type EQ: string
:param EQ:
The equal to operator. The ``Evaluation`` results will have ``FilterVariable`` values that exactly match the value specified with ``EQ`` .
:type GT: string
:param GT:
The greater than operator. The ``Evaluation`` results will have ``FilterVariable`` values that are greater than the value specified with ``GT`` .
:type LT: string
:param LT:
The less than operator. The ``Evaluation`` results will have ``FilterVariable`` values that are less than the value specified with ``LT`` .
:type GE: string
:param GE:
The greater than or equal to operator. The ``Evaluation`` results will have ``FilterVariable`` values that are greater than or equal to the value specified with ``GE`` .
:type LE: string
:param LE:
The less than or equal to operator. The ``Evaluation`` results will have ``FilterVariable`` values that are less than or equal to the value specified with ``LE`` .
:type NE: string
:param NE:
The not equal to operator. The ``Evaluation`` results will have ``FilterVariable`` values not equal to the value specified with ``NE`` .
:type Prefix: string
:param Prefix:
A string that is found at the beginning of a variable, such as ``Name`` or ``Id`` .
For example, an ``Evaluation`` could have the ``Name`` ``2014-09-09-HolidayGiftMailer`` . To search for this ``Evaluation`` , select ``Name`` for the ``FilterVariable`` and any of the following strings for the ``Prefix`` :
* 2014-09
* 2014-09-09
* 2014-09-09-Holiday
:type SortOrder: string
:param SortOrder:
A two-value parameter that determines the sequence of the resulting list of ``Evaluation`` .
* ``asc`` - Arranges the list in ascending order (A-Z, 0-9).
* ``dsc`` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by ``FilterVariable`` .
:type NextToken: string
:param NextToken:
The ID of the page in the paginated results.
:type Limit: integer
:param Limit:
The maximum number of ``Evaluation`` to include in the result.
:rtype: dict
:returns:
"""
pass
def describe_ml_models(self, FilterVariable: str = None, EQ: str = None, GT: str = None, LT: str = None, GE: str = None, LE: str = None, NE: str = None, Prefix: str = None, SortOrder: str = None, NextToken: str = None, Limit: int = None) -> Dict:
"""
Returns a list of ``MLModel`` that match the search criteria in the request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DescribeMLModels>`_
**Request Syntax**
::
response = client.describe_ml_models(
FilterVariable='CreatedAt'|'LastUpdatedAt'|'Status'|'Name'|'IAMUser'|'TrainingDataSourceId'|'RealtimeEndpointStatus'|'MLModelType'|'Algorithm'|'TrainingDataURI',
EQ='string',
GT='string',
LT='string',
GE='string',
LE='string',
NE='string',
Prefix='string',
SortOrder='asc'|'dsc',
NextToken='string',
Limit=123
)
**Response Syntax**
::
{
'Results': [
{
'MLModelId': 'string',
'TrainingDataSourceId': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'SizeInBytes': 123,
'EndpointInfo': {
'PeakRequestsPerSecond': 123,
'CreatedAt': datetime(2015, 1, 1),
'EndpointUrl': 'string',
'EndpointStatus': 'NONE'|'READY'|'UPDATING'|'FAILED'
},
'TrainingParameters': {
'string': 'string'
},
'InputDataLocationS3': 'string',
'Algorithm': 'sgd',
'MLModelType': 'REGRESSION'|'BINARY'|'MULTICLASS',
'ScoreThreshold': ...,
'ScoreThresholdLastUpdatedAt': datetime(2015, 1, 1),
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1)
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``DescribeMLModels`` operation. The content is essentially a list of ``MLModel`` .
- **Results** *(list) --*
A list of ``MLModel`` that meet the search criteria.
- *(dict) --*
Represents the output of a ``GetMLModel`` operation.
The content consists of the detailed metadata and the current status of the ``MLModel`` .
- **MLModelId** *(string) --*
The ID assigned to the ``MLModel`` at creation.
- **TrainingDataSourceId** *(string) --*
The ID of the training ``DataSource`` . The ``CreateMLModel`` operation uses the ``TrainingDataSourceId`` .
- **CreatedByIamUser** *(string) --*
The AWS user account from which the ``MLModel`` was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``MLModel`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``MLModel`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``MLModel`` .
- **Status** *(string) --*
The current status of an ``MLModel`` . This element can have one of the following values:
* ``PENDING`` - Amazon Machine Learning (Amazon ML) submitted a request to create an ``MLModel`` .
* ``INPROGRESS`` - The creation process is underway.
* ``FAILED`` - The request to create an ``MLModel`` didn't run to completion. The model isn't usable.
* ``COMPLETED`` - The creation process completed successfully.
* ``DELETED`` - The ``MLModel`` is marked as deleted. It isn't usable.
- **SizeInBytes** *(integer) --*
Long integer type that is a 64-bit signed number.
- **EndpointInfo** *(dict) --*
The current endpoint of the ``MLModel`` .
- **PeakRequestsPerSecond** *(integer) --*
The maximum processing rate for the real-time endpoint for ``MLModel`` , measured in incoming requests per second.
- **CreatedAt** *(datetime) --*
The time that the request to create the real-time endpoint for the ``MLModel`` was received. The time is expressed in epoch time.
- **EndpointUrl** *(string) --*
The URI that specifies where to send real-time prediction requests for the ``MLModel`` .
.. note::
Note
The application must wait until the real-time endpoint is ready before using this URI.
- **EndpointStatus** *(string) --*
The current status of the real-time endpoint for the ``MLModel`` . This element can have one of the following values:
* ``NONE`` - Endpoint does not exist or was previously deleted.
* ``READY`` - Endpoint is ready to be used for real-time predictions.
* ``UPDATING`` - Updating/creating the endpoint.
- **TrainingParameters** *(dict) --*
A list of the training parameters in the ``MLModel`` . The list is implemented as a map of key-value pairs.
The following is the current set of training parameters:
* ``sgd.maxMLModelSizeInBytes`` - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance. The value is an integer that ranges from ``100000`` to ``2147483648`` . The default value is ``33554432`` .
* ``sgd.maxPasses`` - The number of times that the training process traverses the observations to build the ``MLModel`` . The value is an integer that ranges from ``1`` to ``10000`` . The default value is ``10`` .
* ``sgd.shuffleType`` - Whether Amazon ML shuffles the training data. Shuffling the data improves a model's ability to find the optimal solution for a variety of data types. The valid values are ``auto`` and ``none`` . The default value is ``none`` .
* ``sgd.l1RegularizationAmount`` - The coefficient regularization L1 norm, which controls overfitting the data by penalizing large coefficients. This parameter tends to drive coefficients to zero, resulting in sparse feature set. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L1 normalization. This parameter can't be used when ``L2`` is specified. Use this parameter sparingly.
* ``sgd.l2RegularizationAmount`` - The coefficient regularization L2 norm, which controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L2 normalization. This parameter can't be used when ``L1`` is specified. Use this parameter sparingly.
- *(string) --*
String type.
- *(string) --*
String type.
- **InputDataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **Algorithm** *(string) --*
The algorithm used to train the ``MLModel`` . The following algorithm is supported:
* ``SGD`` -- Stochastic gradient descent. The goal of ``SGD`` is to minimize the gradient of the loss function.
- **MLModelType** *(string) --*
Identifies the ``MLModel`` category. The following are the available types:
* ``REGRESSION`` - Produces a numeric result. For example, "What price should a house be listed at?"
* ``BINARY`` - Produces one of two possible results. For example, "Is this a child-friendly web site?".
* ``MULTICLASS`` - Produces one of several possible results. For example, "Is this a HIGH-, LOW-, or MEDIUM-risk trade?".
- **ScoreThreshold** *(float) --*
- **ScoreThresholdLastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``ScoreThreshold`` . The time is expressed in epoch time.
- **Message** *(string) --*
A description of the most recent details about accessing the ``MLModel`` .
- **ComputeTime** *(integer) --*
Long integer type that is a 64-bit signed number.
- **FinishedAt** *(datetime) --*
A timestamp represented in epoch time.
- **StartedAt** *(datetime) --*
A timestamp represented in epoch time.
- **NextToken** *(string) --*
The ID of the next page in the paginated results that indicates at least one more page follows.
:type FilterVariable: string
:param FilterVariable:
Use one of the following variables to filter a list of ``MLModel`` :
* ``CreatedAt`` - Sets the search criteria to ``MLModel`` creation date.
* ``Status`` - Sets the search criteria to ``MLModel`` status.
* ``Name`` - Sets the search criteria to the contents of ``MLModel`` **** ``Name`` .
* ``IAMUser`` - Sets the search criteria to the user account that invoked the ``MLModel`` creation.
* ``TrainingDataSourceId`` - Sets the search criteria to the ``DataSource`` used to train one or more ``MLModel`` .
* ``RealtimeEndpointStatus`` - Sets the search criteria to the ``MLModel`` real-time endpoint status.
* ``MLModelType`` - Sets the search criteria to ``MLModel`` type: binary, regression, or multi-class.
* ``Algorithm`` - Sets the search criteria to the algorithm that the ``MLModel`` uses.
* ``TrainingDataURI`` - Sets the search criteria to the data file(s) used in training a ``MLModel`` . The URL can identify either a file or an Amazon Simple Storage Service (Amazon S3) bucket or directory.
:type EQ: string
:param EQ:
The equal to operator. The ``MLModel`` results will have ``FilterVariable`` values that exactly match the value specified with ``EQ`` .
:type GT: string
:param GT:
The greater than operator. The ``MLModel`` results will have ``FilterVariable`` values that are greater than the value specified with ``GT`` .
:type LT: string
:param LT:
The less than operator. The ``MLModel`` results will have ``FilterVariable`` values that are less than the value specified with ``LT`` .
:type GE: string
:param GE:
The greater than or equal to operator. The ``MLModel`` results will have ``FilterVariable`` values that are greater than or equal to the value specified with ``GE`` .
:type LE: string
:param LE:
The less than or equal to operator. The ``MLModel`` results will have ``FilterVariable`` values that are less than or equal to the value specified with ``LE`` .
:type NE: string
:param NE:
The not equal to operator. The ``MLModel`` results will have ``FilterVariable`` values not equal to the value specified with ``NE`` .
:type Prefix: string
:param Prefix:
A string that is found at the beginning of a variable, such as ``Name`` or ``Id`` .
For example, an ``MLModel`` could have the ``Name`` ``2014-09-09-HolidayGiftMailer`` . To search for this ``MLModel`` , select ``Name`` for the ``FilterVariable`` and any of the following strings for the ``Prefix`` :
* 2014-09
* 2014-09-09
* 2014-09-09-Holiday
:type SortOrder: string
:param SortOrder:
A two-value parameter that determines the sequence of the resulting list of ``MLModel`` .
* ``asc`` - Arranges the list in ascending order (A-Z, 0-9).
* ``dsc`` - Arranges the list in descending order (Z-A, 9-0).
Results are sorted by ``FilterVariable`` .
:type NextToken: string
:param NextToken:
The ID of the page in the paginated results.
:type Limit: integer
:param Limit:
The number of pages of information to include in the result. The range of acceptable values is ``1`` through ``100`` . The default value is ``100`` .
:rtype: dict
:returns:
"""
pass
def describe_tags(self, ResourceId: str, ResourceType: str) -> Dict:
"""
Describes one or more of the tags for your Amazon ML object.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/DescribeTags>`_
**Request Syntax**
::
response = client.describe_tags(
ResourceId='string',
ResourceType='BatchPrediction'|'DataSource'|'Evaluation'|'MLModel'
)
**Response Syntax**
::
{
'ResourceId': 'string',
'ResourceType': 'BatchPrediction'|'DataSource'|'Evaluation'|'MLModel',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
Amazon ML returns the following elements.
- **ResourceId** *(string) --*
The ID of the tagged ML object.
- **ResourceType** *(string) --*
The type of the tagged ML object.
- **Tags** *(list) --*
A list of tags associated with the ML object.
- *(dict) --*
A custom key-value pair associated with an ML object, such as an ML model.
- **Key** *(string) --*
A unique identifier for the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.
- **Value** *(string) --*
An optional string, typically used to describe or define the tag. Valid characters include Unicode letters, digits, white space, _, ., /, =, +, -, %, and @.
:type ResourceId: string
:param ResourceId: **[REQUIRED]**
The ID of the ML object. For example, ``exampleModelId`` .
:type ResourceType: string
:param ResourceType: **[REQUIRED]**
The type of the ML object.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_batch_prediction(self, BatchPredictionId: str) -> Dict:
"""
Returns a ``BatchPrediction`` that includes detailed metadata, status, and data file information for a ``Batch Prediction`` request.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/GetBatchPrediction>`_
**Request Syntax**
::
response = client.get_batch_prediction(
BatchPredictionId='string'
)
**Response Syntax**
::
{
'BatchPredictionId': 'string',
'MLModelId': 'string',
'BatchPredictionDataSourceId': 'string',
'InputDataLocationS3': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'OutputUri': 'string',
'LogUri': 'string',
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1),
'TotalRecordCount': 123,
'InvalidRecordCount': 123
}
**Response Structure**
- *(dict) --*
Represents the output of a ``GetBatchPrediction`` operation and describes a ``BatchPrediction`` .
- **BatchPredictionId** *(string) --*
An ID assigned to the ``BatchPrediction`` at creation. This value should be identical to the value of the ``BatchPredictionID`` in the request.
- **MLModelId** *(string) --*
The ID of the ``MLModel`` that generated predictions for the ``BatchPrediction`` request.
- **BatchPredictionDataSourceId** *(string) --*
The ID of the ``DataSource`` that was used to create the ``BatchPrediction`` .
- **InputDataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **CreatedByIamUser** *(string) --*
The AWS user account that invoked the ``BatchPrediction`` . The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time when the ``BatchPrediction`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to ``BatchPrediction`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``BatchPrediction`` .
- **Status** *(string) --*
The status of the ``BatchPrediction`` , which can be one of the following values:
* ``PENDING`` - Amazon Machine Learning (Amazon ML) submitted a request to generate batch predictions.
* ``INPROGRESS`` - The batch predictions are in progress.
* ``FAILED`` - The request to perform a batch prediction did not run to completion. It is not usable.
* ``COMPLETED`` - The batch prediction process completed successfully.
* ``DELETED`` - The ``BatchPrediction`` is marked as deleted. It is not usable.
- **OutputUri** *(string) --*
The location of an Amazon S3 bucket or directory to receive the operation results.
- **LogUri** *(string) --*
A link to the file that contains logs of the ``CreateBatchPrediction`` operation.
- **Message** *(string) --*
A description of the most recent details about processing the batch prediction request.
- **ComputeTime** *(integer) --*
The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the ``BatchPrediction`` , normalized and scaled on computation resources. ``ComputeTime`` is only available if the ``BatchPrediction`` is in the ``COMPLETED`` state.
- **FinishedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``BatchPrediction`` as ``COMPLETED`` or ``FAILED`` . ``FinishedAt`` is only available when the ``BatchPrediction`` is in the ``COMPLETED`` or ``FAILED`` state.
- **StartedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``BatchPrediction`` as ``INPROGRESS`` . ``StartedAt`` isn't available if the ``BatchPrediction`` is in the ``PENDING`` state.
- **TotalRecordCount** *(integer) --*
The number of total records that Amazon Machine Learning saw while processing the ``BatchPrediction`` .
- **InvalidRecordCount** *(integer) --*
The number of invalid records that Amazon Machine Learning saw while processing the ``BatchPrediction`` .
:type BatchPredictionId: string
:param BatchPredictionId: **[REQUIRED]**
An ID assigned to the ``BatchPrediction`` at creation.
:rtype: dict
:returns:
"""
pass
def get_data_source(self, DataSourceId: str, Verbose: bool = None) -> Dict:
"""
Returns a ``DataSource`` that includes metadata and data file information, as well as the current status of the ``DataSource`` .
``GetDataSource`` provides results in normal or verbose format. The verbose format adds the schema description and the list of files pointed to by the DataSource to the normal format.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/GetDataSource>`_
**Request Syntax**
::
response = client.get_data_source(
DataSourceId='string',
Verbose=True|False
)
**Response Syntax**
::
{
'DataSourceId': 'string',
'DataLocationS3': 'string',
'DataRearrangement': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'DataSizeInBytes': 123,
'NumberOfFiles': 123,
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'LogUri': 'string',
'Message': 'string',
'RedshiftMetadata': {
'RedshiftDatabase': {
'DatabaseName': 'string',
'ClusterIdentifier': 'string'
},
'DatabaseUserName': 'string',
'SelectSqlQuery': 'string'
},
'RDSMetadata': {
'Database': {
'InstanceIdentifier': 'string',
'DatabaseName': 'string'
},
'DatabaseUserName': 'string',
'SelectSqlQuery': 'string',
'ResourceRole': 'string',
'ServiceRole': 'string',
'DataPipelineId': 'string'
},
'RoleARN': 'string',
'ComputeStatistics': True|False,
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1),
'DataSourceSchema': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``GetDataSource`` operation and describes a ``DataSource`` .
- **DataSourceId** *(string) --*
The ID assigned to the ``DataSource`` at creation. This value should be identical to the value of the ``DataSourceId`` in the request.
- **DataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **DataRearrangement** *(string) --*
A JSON string that represents the splitting and rearrangement requirement used when this ``DataSource`` was created.
- **CreatedByIamUser** *(string) --*
The AWS user account from which the ``DataSource`` was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``DataSource`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``DataSource`` . The time is expressed in epoch time.
- **DataSizeInBytes** *(integer) --*
The total size of observations in the data files.
- **NumberOfFiles** *(integer) --*
The number of data files referenced by the ``DataSource`` .
- **Name** *(string) --*
A user-supplied name or description of the ``DataSource`` .
- **Status** *(string) --*
The current status of the ``DataSource`` . This element can have one of the following values:
* ``PENDING`` - Amazon ML submitted a request to create a ``DataSource`` .
* ``INPROGRESS`` - The creation process is underway.
* ``FAILED`` - The request to create a ``DataSource`` did not run to completion. It is not usable.
* ``COMPLETED`` - The creation process completed successfully.
* ``DELETED`` - The ``DataSource`` is marked as deleted. It is not usable.
- **LogUri** *(string) --*
A link to the file containing logs of ``CreateDataSourceFrom*`` operations.
- **Message** *(string) --*
The user-supplied description of the most recent details about creating the ``DataSource`` .
- **RedshiftMetadata** *(dict) --*
Describes the ``DataSource`` details specific to Amazon Redshift.
- **RedshiftDatabase** *(dict) --*
Describes the database details required to connect to an Amazon Redshift database.
- **DatabaseName** *(string) --*
The name of a database hosted on an Amazon Redshift cluster.
- **ClusterIdentifier** *(string) --*
The ID of an Amazon Redshift cluster.
- **DatabaseUserName** *(string) --*
A username to be used by Amazon Machine Learning (Amazon ML)to connect to a database on an Amazon Redshift cluster. The username should have sufficient permissions to execute the ``RedshiftSelectSqlQuery`` query. The username should be valid for an Amazon Redshift `USER <http://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_USER.html>`__ .
- **SelectSqlQuery** *(string) --*
The SQL query that is specified during CreateDataSourceFromRedshift . Returns only if ``Verbose`` is true in GetDataSourceInput.
- **RDSMetadata** *(dict) --*
The datasource details that are specific to Amazon RDS.
- **Database** *(dict) --*
The database details required to connect to an Amazon RDS.
- **InstanceIdentifier** *(string) --*
The ID of an RDS DB instance.
- **DatabaseName** *(string) --*
The name of a database hosted on an RDS DB instance.
- **DatabaseUserName** *(string) --*
The username to be used by Amazon ML to connect to database on an Amazon RDS instance. The username should have sufficient permissions to execute an ``RDSSelectSqlQuery`` query.
- **SelectSqlQuery** *(string) --*
The SQL query that is supplied during CreateDataSourceFromRDS . Returns only if ``Verbose`` is true in ``GetDataSourceInput`` .
- **ResourceRole** *(string) --*
The role (DataPipelineDefaultResourceRole) assumed by an Amazon EC2 instance to carry out the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **ServiceRole** *(string) --*
The role (DataPipelineDefaultRole) assumed by the Data Pipeline service to monitor the progress of the copy task from Amazon RDS to Amazon S3. For more information, see `Role templates <http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-iam-roles.html>`__ for data pipelines.
- **DataPipelineId** *(string) --*
The ID of the Data Pipeline instance that is used to carry to copy data from Amazon RDS to Amazon S3. You can use the ID to find details about the instance in the Data Pipeline console.
- **RoleARN** *(string) --*
The Amazon Resource Name (ARN) of an `AWS IAM Role <http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html#roles-about-termsandconcepts>`__ , such as the following: arn:aws:iam::account:role/rolename.
- **ComputeStatistics** *(boolean) --*
The parameter is ``true`` if statistics need to be generated from the observation data.
- **ComputeTime** *(integer) --*
The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the ``DataSource`` , normalized and scaled on computation resources. ``ComputeTime`` is only available if the ``DataSource`` is in the ``COMPLETED`` state and the ``ComputeStatistics`` is set to true.
- **FinishedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``DataSource`` as ``COMPLETED`` or ``FAILED`` . ``FinishedAt`` is only available when the ``DataSource`` is in the ``COMPLETED`` or ``FAILED`` state.
- **StartedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``DataSource`` as ``INPROGRESS`` . ``StartedAt`` isn't available if the ``DataSource`` is in the ``PENDING`` state.
- **DataSourceSchema** *(string) --*
The schema used by all of the data files of this ``DataSource`` .
.. note::
Note
This parameter is provided as part of the verbose format.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
The ID assigned to the ``DataSource`` at creation.
:type Verbose: boolean
:param Verbose:
Specifies whether the ``GetDataSource`` operation should return ``DataSourceSchema`` .
If true, ``DataSourceSchema`` is returned.
If false, ``DataSourceSchema`` is not returned.
:rtype: dict
:returns:
"""
pass
def get_evaluation(self, EvaluationId: str) -> Dict:
"""
Returns an ``Evaluation`` that includes metadata as well as the current status of the ``Evaluation`` .
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/GetEvaluation>`_
**Request Syntax**
::
response = client.get_evaluation(
EvaluationId='string'
)
**Response Syntax**
::
{
'EvaluationId': 'string',
'MLModelId': 'string',
'EvaluationDataSourceId': 'string',
'InputDataLocationS3': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'PerformanceMetrics': {
'Properties': {
'string': 'string'
}
},
'LogUri': 'string',
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1)
}
**Response Structure**
- *(dict) --*
Represents the output of a ``GetEvaluation`` operation and describes an ``Evaluation`` .
- **EvaluationId** *(string) --*
The evaluation ID which is same as the ``EvaluationId`` in the request.
- **MLModelId** *(string) --*
The ID of the ``MLModel`` that was the focus of the evaluation.
- **EvaluationDataSourceId** *(string) --*
The ``DataSource`` used for this evaluation.
- **InputDataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **CreatedByIamUser** *(string) --*
The AWS user account that invoked the evaluation. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``Evaluation`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``Evaluation`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``Evaluation`` .
- **Status** *(string) --*
The status of the evaluation. This element can have one of the following values:
* ``PENDING`` - Amazon Machine Language (Amazon ML) submitted a request to evaluate an ``MLModel`` .
* ``INPROGRESS`` - The evaluation is underway.
* ``FAILED`` - The request to evaluate an ``MLModel`` did not run to completion. It is not usable.
* ``COMPLETED`` - The evaluation process completed successfully.
* ``DELETED`` - The ``Evaluation`` is marked as deleted. It is not usable.
- **PerformanceMetrics** *(dict) --*
Measurements of how well the ``MLModel`` performed using observations referenced by the ``DataSource`` . One of the following metric is returned based on the type of the ``MLModel`` :
* BinaryAUC: A binary ``MLModel`` uses the Area Under the Curve (AUC) technique to measure performance.
* RegressionRMSE: A regression ``MLModel`` uses the Root Mean Square Error (RMSE) technique to measure performance. RMSE measures the difference between predicted and actual values for a single variable.
* MulticlassAvgFScore: A multiclass ``MLModel`` uses the F1 score technique to measure performance.
For more information about performance metrics, please see the `Amazon Machine Learning Developer Guide <http://docs.aws.amazon.com/machine-learning/latest/dg>`__ .
- **Properties** *(dict) --*
- *(string) --*
- *(string) --*
- **LogUri** *(string) --*
A link to the file that contains logs of the ``CreateEvaluation`` operation.
- **Message** *(string) --*
A description of the most recent details about evaluating the ``MLModel`` .
- **ComputeTime** *(integer) --*
The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the ``Evaluation`` , normalized and scaled on computation resources. ``ComputeTime`` is only available if the ``Evaluation`` is in the ``COMPLETED`` state.
- **FinishedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``Evaluation`` as ``COMPLETED`` or ``FAILED`` . ``FinishedAt`` is only available when the ``Evaluation`` is in the ``COMPLETED`` or ``FAILED`` state.
- **StartedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``Evaluation`` as ``INPROGRESS`` . ``StartedAt`` isn't available if the ``Evaluation`` is in the ``PENDING`` state.
:type EvaluationId: string
:param EvaluationId: **[REQUIRED]**
The ID of the ``Evaluation`` to retrieve. The evaluation of each ``MLModel`` is recorded and cataloged. The ID provides the means to access the information.
:rtype: dict
:returns:
"""
pass
def get_ml_model(self, MLModelId: str, Verbose: bool = None) -> Dict:
"""
Returns an ``MLModel`` that includes detailed metadata, data source information, and the current status of the ``MLModel`` .
``GetMLModel`` provides results in normal or verbose format.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/GetMLModel>`_
**Request Syntax**
::
response = client.get_ml_model(
MLModelId='string',
Verbose=True|False
)
**Response Syntax**
::
{
'MLModelId': 'string',
'TrainingDataSourceId': 'string',
'CreatedByIamUser': 'string',
'CreatedAt': datetime(2015, 1, 1),
'LastUpdatedAt': datetime(2015, 1, 1),
'Name': 'string',
'Status': 'PENDING'|'INPROGRESS'|'FAILED'|'COMPLETED'|'DELETED',
'SizeInBytes': 123,
'EndpointInfo': {
'PeakRequestsPerSecond': 123,
'CreatedAt': datetime(2015, 1, 1),
'EndpointUrl': 'string',
'EndpointStatus': 'NONE'|'READY'|'UPDATING'|'FAILED'
},
'TrainingParameters': {
'string': 'string'
},
'InputDataLocationS3': 'string',
'MLModelType': 'REGRESSION'|'BINARY'|'MULTICLASS',
'ScoreThreshold': ...,
'ScoreThresholdLastUpdatedAt': datetime(2015, 1, 1),
'LogUri': 'string',
'Message': 'string',
'ComputeTime': 123,
'FinishedAt': datetime(2015, 1, 1),
'StartedAt': datetime(2015, 1, 1),
'Recipe': 'string',
'Schema': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of a ``GetMLModel`` operation, and provides detailed information about a ``MLModel`` .
- **MLModelId** *(string) --*
The MLModel ID,which is same as the ``MLModelId`` in the request.
- **TrainingDataSourceId** *(string) --*
The ID of the training ``DataSource`` .
- **CreatedByIamUser** *(string) --*
The AWS user account from which the ``MLModel`` was created. The account type can be either an AWS root account or an AWS Identity and Access Management (IAM) user account.
- **CreatedAt** *(datetime) --*
The time that the ``MLModel`` was created. The time is expressed in epoch time.
- **LastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``MLModel`` . The time is expressed in epoch time.
- **Name** *(string) --*
A user-supplied name or description of the ``MLModel`` .
- **Status** *(string) --*
The current status of the ``MLModel`` . This element can have one of the following values:
* ``PENDING`` - Amazon Machine Learning (Amazon ML) submitted a request to describe a ``MLModel`` .
* ``INPROGRESS`` - The request is processing.
* ``FAILED`` - The request did not run to completion. The ML model isn't usable.
* ``COMPLETED`` - The request completed successfully.
* ``DELETED`` - The ``MLModel`` is marked as deleted. It isn't usable.
- **SizeInBytes** *(integer) --*
Long integer type that is a 64-bit signed number.
- **EndpointInfo** *(dict) --*
The current endpoint of the ``MLModel``
- **PeakRequestsPerSecond** *(integer) --*
The maximum processing rate for the real-time endpoint for ``MLModel`` , measured in incoming requests per second.
- **CreatedAt** *(datetime) --*
The time that the request to create the real-time endpoint for the ``MLModel`` was received. The time is expressed in epoch time.
- **EndpointUrl** *(string) --*
The URI that specifies where to send real-time prediction requests for the ``MLModel`` .
.. note::
Note
The application must wait until the real-time endpoint is ready before using this URI.
- **EndpointStatus** *(string) --*
The current status of the real-time endpoint for the ``MLModel`` . This element can have one of the following values:
* ``NONE`` - Endpoint does not exist or was previously deleted.
* ``READY`` - Endpoint is ready to be used for real-time predictions.
* ``UPDATING`` - Updating/creating the endpoint.
- **TrainingParameters** *(dict) --*
A list of the training parameters in the ``MLModel`` . The list is implemented as a map of key-value pairs.
The following is the current set of training parameters:
* ``sgd.maxMLModelSizeInBytes`` - The maximum allowed size of the model. Depending on the input data, the size of the model might affect its performance. The value is an integer that ranges from ``100000`` to ``2147483648`` . The default value is ``33554432`` .
* ``sgd.maxPasses`` - The number of times that the training process traverses the observations to build the ``MLModel`` . The value is an integer that ranges from ``1`` to ``10000`` . The default value is ``10`` .
* ``sgd.shuffleType`` - Whether Amazon ML shuffles the training data. Shuffling data improves a model's ability to find the optimal solution for a variety of data types. The valid values are ``auto`` and ``none`` . The default value is ``none`` . We strongly recommend that you shuffle your data.
* ``sgd.l1RegularizationAmount`` - The coefficient regularization L1 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to zero, resulting in a sparse feature set. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L1 normalization. This parameter can't be used when ``L2`` is specified. Use this parameter sparingly.
* ``sgd.l2RegularizationAmount`` - The coefficient regularization L2 norm. It controls overfitting the data by penalizing large coefficients. This tends to drive coefficients to small, nonzero values. If you use this parameter, start by specifying a small value, such as ``1.0E-08`` . The value is a double that ranges from ``0`` to ``MAX_DOUBLE`` . The default is to not use L2 normalization. This parameter can't be used when ``L1`` is specified. Use this parameter sparingly.
- *(string) --*
String type.
- *(string) --*
String type.
- **InputDataLocationS3** *(string) --*
The location of the data file or directory in Amazon Simple Storage Service (Amazon S3).
- **MLModelType** *(string) --*
Identifies the ``MLModel`` category. The following are the available types:
* REGRESSION -- Produces a numeric result. For example, "What price should a house be listed at?"
* BINARY -- Produces one of two possible results. For example, "Is this an e-commerce website?"
* MULTICLASS -- Produces one of several possible results. For example, "Is this a HIGH, LOW or MEDIUM risk trade?"
- **ScoreThreshold** *(float) --*
The scoring threshold is used in binary classification ``MLModel`` models. It marks the boundary between a positive prediction and a negative prediction.
Output values greater than or equal to the threshold receive a positive result from the MLModel, such as ``true`` . Output values less than the threshold receive a negative response from the MLModel, such as ``false`` .
- **ScoreThresholdLastUpdatedAt** *(datetime) --*
The time of the most recent edit to the ``ScoreThreshold`` . The time is expressed in epoch time.
- **LogUri** *(string) --*
A link to the file that contains logs of the ``CreateMLModel`` operation.
- **Message** *(string) --*
A description of the most recent details about accessing the ``MLModel`` .
- **ComputeTime** *(integer) --*
The approximate CPU time in milliseconds that Amazon Machine Learning spent processing the ``MLModel`` , normalized and scaled on computation resources. ``ComputeTime`` is only available if the ``MLModel`` is in the ``COMPLETED`` state.
- **FinishedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``MLModel`` as ``COMPLETED`` or ``FAILED`` . ``FinishedAt`` is only available when the ``MLModel`` is in the ``COMPLETED`` or ``FAILED`` state.
- **StartedAt** *(datetime) --*
The epoch time when Amazon Machine Learning marked the ``MLModel`` as ``INPROGRESS`` . ``StartedAt`` isn't available if the ``MLModel`` is in the ``PENDING`` state.
- **Recipe** *(string) --*
The recipe to use when training the ``MLModel`` . The ``Recipe`` provides detailed information about the observation data to use during training, and manipulations to perform on the observation data during training.
.. note::
Note
This parameter is provided as part of the verbose format.
- **Schema** *(string) --*
The schema used by all of the data files referenced by the ``DataSource`` .
.. note::
Note
This parameter is provided as part of the verbose format.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID assigned to the ``MLModel`` at creation.
:type Verbose: boolean
:param Verbose:
Specifies whether the ``GetMLModel`` operation should return ``Recipe`` .
If true, ``Recipe`` is returned.
If false, ``Recipe`` is not returned.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def predict(self, MLModelId: str, Record: Dict, PredictEndpoint: str) -> Dict:
"""
Generates a prediction for the observation using the specified ``ML Model`` .
.. note::
Note
Not all response parameters will be populated. Whether a response parameter is populated depends on the type of model requested.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/Predict>`_
**Request Syntax**
::
response = client.predict(
MLModelId='string',
Record={
'string': 'string'
},
PredictEndpoint='string'
)
**Response Syntax**
::
{
'Prediction': {
'predictedLabel': 'string',
'predictedValue': ...,
'predictedScores': {
'string': ...
},
'details': {
'string': 'string'
}
}
}
**Response Structure**
- *(dict) --*
- **Prediction** *(dict) --*
The output from a ``Predict`` operation:
* ``Details`` - Contains the following attributes: ``DetailsAttributes.PREDICTIVE_MODEL_TYPE - REGRESSION | BINARY | MULTICLASS`` ``DetailsAttributes.ALGORITHM - SGD``
* ``PredictedLabel`` - Present for either a ``BINARY`` or ``MULTICLASS`` ``MLModel`` request.
* ``PredictedScores`` - Contains the raw classification score corresponding to each label.
* ``PredictedValue`` - Present for a ``REGRESSION`` ``MLModel`` request.
- **predictedLabel** *(string) --*
The prediction label for either a ``BINARY`` or ``MULTICLASS`` ``MLModel`` .
- **predictedValue** *(float) --* The prediction value for ``REGRESSION`` ``MLModel`` .
- **predictedScores** *(dict) --* Provides the raw classification score corresponding to each label.
- *(string) --*
- *(float) --*
- **details** *(dict) --* Provides any additional details regarding the prediction.
- *(string) --* Contains the key values of ``DetailsMap`` : ``PredictiveModelType`` - Indicates the type of the ``MLModel`` . ``Algorithm`` - Indicates the algorithm that was used for the ``MLModel`` .
- *(string) --*
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
A unique identifier of the ``MLModel`` .
:type Record: dict
:param Record: **[REQUIRED]**
A map of variable name-value pairs that represent an observation.
- *(string) --*
The name of a variable. Currently it\'s used to specify the name of the target value, label, weight, and tags.
- *(string) --*
The value of a variable. Currently it\'s used to specify values of the target value, weights, and tag variables and for filtering variable values.
:type PredictEndpoint: string
:param PredictEndpoint: **[REQUIRED]**
:rtype: dict
:returns:
"""
pass
def update_batch_prediction(self, BatchPredictionId: str, BatchPredictionName: str) -> Dict:
"""
Updates the ``BatchPredictionName`` of a ``BatchPrediction`` .
You can use the ``GetBatchPrediction`` operation to view the contents of the updated data element.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/UpdateBatchPrediction>`_
**Request Syntax**
::
response = client.update_batch_prediction(
BatchPredictionId='string',
BatchPredictionName='string'
)
**Response Syntax**
::
{
'BatchPredictionId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of an ``UpdateBatchPrediction`` operation.
You can see the updated content by using the ``GetBatchPrediction`` operation.
- **BatchPredictionId** *(string) --*
The ID assigned to the ``BatchPrediction`` during creation. This value should be identical to the value of the ``BatchPredictionId`` in the request.
:type BatchPredictionId: string
:param BatchPredictionId: **[REQUIRED]**
The ID assigned to the ``BatchPrediction`` during creation.
:type BatchPredictionName: string
:param BatchPredictionName: **[REQUIRED]**
A new user-supplied name or description of the ``BatchPrediction`` .
:rtype: dict
:returns:
"""
pass
def update_data_source(self, DataSourceId: str, DataSourceName: str) -> Dict:
"""
Updates the ``DataSourceName`` of a ``DataSource`` .
You can use the ``GetDataSource`` operation to view the contents of the updated data element.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/UpdateDataSource>`_
**Request Syntax**
::
response = client.update_data_source(
DataSourceId='string',
DataSourceName='string'
)
**Response Syntax**
::
{
'DataSourceId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of an ``UpdateDataSource`` operation.
You can see the updated content by using the ``GetBatchPrediction`` operation.
- **DataSourceId** *(string) --*
The ID assigned to the ``DataSource`` during creation. This value should be identical to the value of the ``DataSourceID`` in the request.
:type DataSourceId: string
:param DataSourceId: **[REQUIRED]**
The ID assigned to the ``DataSource`` during creation.
:type DataSourceName: string
:param DataSourceName: **[REQUIRED]**
A new user-supplied name or description of the ``DataSource`` that will replace the current description.
:rtype: dict
:returns:
"""
pass
def update_evaluation(self, EvaluationId: str, EvaluationName: str) -> Dict:
"""
Updates the ``EvaluationName`` of an ``Evaluation`` .
You can use the ``GetEvaluation`` operation to view the contents of the updated data element.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/UpdateEvaluation>`_
**Request Syntax**
::
response = client.update_evaluation(
EvaluationId='string',
EvaluationName='string'
)
**Response Syntax**
::
{
'EvaluationId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of an ``UpdateEvaluation`` operation.
You can see the updated content by using the ``GetEvaluation`` operation.
- **EvaluationId** *(string) --*
The ID assigned to the ``Evaluation`` during creation. This value should be identical to the value of the ``Evaluation`` in the request.
:type EvaluationId: string
:param EvaluationId: **[REQUIRED]**
The ID assigned to the ``Evaluation`` during creation.
:type EvaluationName: string
:param EvaluationName: **[REQUIRED]**
A new user-supplied name or description of the ``Evaluation`` that will replace the current content.
:rtype: dict
:returns:
"""
pass
def update_ml_model(self, MLModelId: str, MLModelName: str = None, ScoreThreshold: float = None) -> Dict:
"""
Updates the ``MLModelName`` and the ``ScoreThreshold`` of an ``MLModel`` .
You can use the ``GetMLModel`` operation to view the contents of the updated data element.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/machinelearning-2014-12-12/UpdateMLModel>`_
**Request Syntax**
::
response = client.update_ml_model(
MLModelId='string',
MLModelName='string',
ScoreThreshold=...
)
**Response Syntax**
::
{
'MLModelId': 'string'
}
**Response Structure**
- *(dict) --*
Represents the output of an ``UpdateMLModel`` operation.
You can see the updated content by using the ``GetMLModel`` operation.
- **MLModelId** *(string) --*
The ID assigned to the ``MLModel`` during creation. This value should be identical to the value of the ``MLModelID`` in the request.
:type MLModelId: string
:param MLModelId: **[REQUIRED]**
The ID assigned to the ``MLModel`` during creation.
:type MLModelName: string
:param MLModelName:
A user-supplied name or description of the ``MLModel`` .
:type ScoreThreshold: float
:param ScoreThreshold:
The ``ScoreThreshold`` used in binary classification ``MLModel`` that marks the boundary between a positive prediction and a negative prediction.
Output values greater than or equal to the ``ScoreThreshold`` receive a positive result from the ``MLModel`` , such as ``true`` . Output values less than the ``ScoreThreshold`` receive a negative response from the ``MLModel`` , such as ``false`` .
:rtype: dict
:returns:
"""
pass
| 69.180519 | 2,268 | 0.601651 |
5a90f3689f6e6afeb739ef85f9dba3f9098fa2f6 | 3,636 | py | Python | Phase_4/ds-clustering_kvo32-main/src/k_means_plotter.py | VaneezaAhmad/ds-east-042621-lectures | 334f98bb4bd4f8020055e95994764b1587a809c0 | [
"MIT"
] | 1 | 2021-08-12T21:48:21.000Z | 2021-08-12T21:48:21.000Z | Phase_4/ds-clustering_kvo32-main/src/k_means_plotter.py | VaneezaAhmad/ds-east-042621-lectures | 334f98bb4bd4f8020055e95994764b1587a809c0 | [
"MIT"
] | null | null | null | Phase_4/ds-clustering_kvo32-main/src/k_means_plotter.py | VaneezaAhmad/ds-east-042621-lectures | 334f98bb4bd4f8020055e95994764b1587a809c0 | [
"MIT"
] | 20 | 2021-04-27T19:27:58.000Z | 2021-06-16T15:08:50.000Z | def k_means(X, Y, k=2):
"""This function will calculate and plot k-means clusters for two-dimensional data input as X and Y"""
from matplotlib import pyplot as plt
import random
import numpy as np
import pandas as pd
fig, ax = plt.subplots(1, 5, figsize = (15, 3))
ax[0].set_title('the data')
ax[0].scatter(X, Y, c='k')
all_pts = list(zip(X, Y))
pts = random.sample(all_pts, k)
arr_pts = np.array(pts)
ax[1].set_title('initialize centroids')
ax[1].scatter(X, Y, c='k')
ax[1].scatter(arr_pts[:, 0], arr_pts[:, 1], c='r')
clusts = []
for pt in pts:
clusts.append([pt])
for pt in all_pts:
dists = []
for c_pt in pts:
dists.append(np.linalg.norm(np.array(pt) - np.array(c_pt)))
dist_min = dists.index(min(dists))
clusts[dist_min].append(pt)
clusts = [list(set(clust)) for clust in clusts]
# Calculate centroids
centers = []
for clust in clusts:
arr_clust = np.array(clust)
centers.append((arr_clust[:, 0].mean(), arr_clust[:, 1].mean()))
arr_centers = np.array(centers)
ax[2].set_title('first cluster assignments')
ax[2].scatter(X, Y, c='k')
ax[2].scatter(arr_centers[:, 0], arr_centers[:, 1], c='r')
df = pd.DataFrame(clusts).T
for j in range(len(df.T)):
points = df[j].dropna()
ax[2].scatter([i[0] for i in points], [i[1] for i in points])
new_clusts = []
for cen in centers:
new_clusts.append([cen])
for pt in all_pts:
dists = []
for c_pt in centers:
dists.append(np.linalg.norm(np.array(pt) - np.array(c_pt)))
dist_min = dists.index(min(dists))
new_clusts[dist_min].append(pt)
for i in range(len(centers)):
if centers[i] not in all_pts:
new_clusts[i].remove(centers[i])
# Are the new clusters different? If so, recalculate centroids!
verdict = 'done'
for i in range(len(clusts)):
if set(clusts[i]) != set(new_clusts[i]):
verdict = 'not done'
break
else:
continue
while verdict == 'not done':
old_clusts = new_clusts
centers = []
for clust in new_clusts:
arr_clust = np.array(clust)
centers.append((arr_clust[:, 0].mean(), arr_clust[:, 1].mean()))
new_clusts = []
for cen in centers:
new_clusts.append([cen])
for pt in all_pts:
dists = []
for c_pt in centers:
dists.append(np.linalg.norm(np.array(pt) - np.array(c_pt)))
dist_min = dists.index(min(dists))
new_clusts[dist_min].append(pt)
for i in range(len(centers)):
if centers[i] not in all_pts:
new_clusts[i].remove(centers[i])
verdict = 'done'
for i in range(len(clusts)):
if set(old_clusts[i]) != set(new_clusts[i]):
verdict = 'not done'
break
else:
continue
last_centers = centers
arr_last_centers = np.array(last_centers)
ax[3].set_title('final centroids')
ax[3].scatter(X, Y, c='k')
ax[3].scatter(arr_last_centers[:, 0], arr_last_centers[:, 1], c='r')
df = pd.DataFrame(new_clusts).T
ax[4].set_title('final clusters')
ax[4].scatter(arr_last_centers[:, 0], arr_last_centers[:, 1], c='r')
for j in range(len(df.T)):
points = df[j].dropna()
ax[4].scatter([i[0] for i in points], [i[1] for i in points])
return df | 31.894737 | 106 | 0.55033 |
a4f6f54c3ace898914053146339372793c9e15e8 | 2,498 | py | Python | .venv/lib/python2.7/site-packages/gitdb/db/git.py | aruneli/rancher-tests | f0ff5539420ac354fc951ed239b002cecde52505 | [
"Apache-2.0"
] | null | null | null | .venv/lib/python2.7/site-packages/gitdb/db/git.py | aruneli/rancher-tests | f0ff5539420ac354fc951ed239b002cecde52505 | [
"Apache-2.0"
] | null | null | null | .venv/lib/python2.7/site-packages/gitdb/db/git.py | aruneli/rancher-tests | f0ff5539420ac354fc951ed239b002cecde52505 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
#
# This module is part of GitDB and is released under
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
from gitdb.db.base import (
CompoundDB,
ObjectDBW,
FileDBBase
)
from gitdb.db.loose import LooseObjectDB
from gitdb.db.pack import PackedDB
from gitdb.db.ref import ReferenceDB
from gitdb.exc import InvalidDBRoot
import os
__all__ = ('GitDB', )
class GitDB(FileDBBase, ObjectDBW, CompoundDB):
"""A git-style object database, which contains all objects in the 'objects'
subdirectory"""
# Configuration
PackDBCls = PackedDB
LooseDBCls = LooseObjectDB
ReferenceDBCls = ReferenceDB
# Directories
packs_dir = 'pack'
loose_dir = ''
alternates_dir = os.path.join('info', 'alternates')
def __init__(self, root_path):
"""Initialize ourselves on a git objects directory"""
super(GitDB, self).__init__(root_path)
def _set_cache_(self, attr):
if attr == '_dbs' or attr == '_loose_db':
self._dbs = list()
loose_db = None
for subpath, dbcls in ((self.packs_dir, self.PackDBCls),
(self.loose_dir, self.LooseDBCls),
(self.alternates_dir, self.ReferenceDBCls)):
path = self.db_path(subpath)
if os.path.exists(path):
self._dbs.append(dbcls(path))
if dbcls is self.LooseDBCls:
loose_db = self._dbs[-1]
# END remember loose db
# END check path exists
# END for each db type
# should have at least one subdb
if not self._dbs:
raise InvalidDBRoot(self.root_path())
# END handle error
# we the first one should have the store method
assert loose_db is not None and hasattr(loose_db, 'store'), "First database needs store functionality"
# finally set the value
self._loose_db = loose_db
else:
super(GitDB, self)._set_cache_(attr)
# END handle attrs
#{ ObjectDBW interface
def store(self, istream):
return self._loose_db.store(istream)
def ostream(self):
return self._loose_db.ostream()
def set_ostream(self, ostream):
return self._loose_db.set_ostream(ostream)
#} END objectdbw interface
| 30.463415 | 114 | 0.609287 |
ffa0d5a6d88efef870e72dd4d3f1dd6571fc1f04 | 4,803 | py | Python | chat-plugin/chat/hooks.py | lyoung83/chat | b21a3255db6c825a22a4ef02642fb9c3cb72d9c3 | [
"Apache-2.0"
] | 17 | 2016-04-09T09:54:43.000Z | 2021-06-29T04:59:54.000Z | chat-plugin/chat/hooks.py | lyoung83/chat | b21a3255db6c825a22a4ef02642fb9c3cb72d9c3 | [
"Apache-2.0"
] | 210 | 2016-01-27T09:57:29.000Z | 2020-10-29T17:19:44.000Z | chat-plugin/chat/hooks.py | lyoung83/chat | b21a3255db6c825a22a4ef02642fb9c3cb72d9c3 | [
"Apache-2.0"
] | 21 | 2016-06-22T11:48:56.000Z | 2019-01-07T17:08:45.000Z | # Copyright 2017 Oursky Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from skygear.encoding import serialize_record
from .database import Database
from .decorators import (AFTER_CONVERSATION_CREATED_HOOK,
AFTER_CONVERSATION_DELETED_HOOK,
AFTER_CONVERSATION_UPDATED_HOOK,
AFTER_MESSAGE_DELETED_HOOK, AFTER_MESSAGE_SENT_HOOK,
AFTER_MESSAGE_UPDATED_HOOK,
AFTER_USERS_ADDED_TO_CONVERSATION_HOOK,
AFTER_USERS_REMOVED_FROM_CONVERSATION_HOOK,
TYPING_STARTED_HOOK)
from .predicate import Predicate
from .query import Query
from .utils import _get_container
def __send_hook(name, params):
container = _get_container()
container.send_action(name, {'args': params})
def __get_users_by_user_ids(user_ids):
container = _get_container()
database = Database(container, '')
predicate = Predicate(_id__in=user_ids)
query = Query('user', predicate=predicate, limit=10000)
users = database.query(query)
return [serialize_record(u) for u in users]
def send_after_message_sent_hook(message, conversation, participant_ids):
participants = __get_users_by_user_ids(participant_ids)
__send_hook(AFTER_MESSAGE_SENT_HOOK, {'message': message,
'conversation': conversation,
'participants': participants})
def send_after_message_updated_hook(message, conversation, participant_ids):
participants = __get_users_by_user_ids(participant_ids)
__send_hook(AFTER_MESSAGE_UPDATED_HOOK, {'message': message,
'conversation': conversation,
'participants': participants})
def send_after_message_deleted_hook(message, conversation, participant_ids):
participants = __get_users_by_user_ids(participant_ids)
__send_hook(AFTER_MESSAGE_DELETED_HOOK, {'message': message,
'conversation': conversation,
'participants': participants})
def send_typing_started_hook(conversation, participant_ids, events):
participants = __get_users_by_user_ids(participant_ids)
__send_hook(TYPING_STARTED_HOOK, {'conversation': conversation,
'participants': participants,
'events': events})
def send_after_conversation_created_hook(conversation, participant_ids):
participants = __get_users_by_user_ids(participant_ids)
data = {'conversation': conversation, 'participants': participants}
__send_hook(AFTER_CONVERSATION_CREATED_HOOK, data)
def send_after_conversation_updated_hook(conversation, participant_ids):
participants = __get_users_by_user_ids(participant_ids)
data = {'conversation': conversation, 'participants': participants}
__send_hook(AFTER_CONVERSATION_UPDATED_HOOK, data)
def send_after_conversation_deleted_hook(conversation, participant_ids):
participants = __get_users_by_user_ids(participant_ids)
data = {'conversation': conversation, 'participants': participants}
__send_hook(AFTER_CONVERSATION_DELETED_HOOK, data)
def send_after_users_added_to_conversation_hook(conversation,
participant_ids,
new_user_ids):
participants = __get_users_by_user_ids(participant_ids)
new_users = __get_users_by_user_ids(new_user_ids)
data = {'conversation': conversation,
'participants': participants,
'new_users': new_users}
__send_hook(AFTER_USERS_ADDED_TO_CONVERSATION_HOOK, data)
def send_after_users_removed_from_conversation_hook(conversation,
participant_ids,
old_user_ids):
participants = __get_users_by_user_ids(participant_ids)
old_users = __get_users_by_user_ids(old_user_ids)
data = {'conversation': conversation,
'participants': participants,
'old_users': old_users}
__send_hook(AFTER_USERS_REMOVED_FROM_CONVERSATION_HOOK, data)
| 42.504425 | 77 | 0.679367 |
12644818103b1b01403384ab7aa73f765b7be920 | 1,615 | py | Python | VBYO-2017/kodlar/YuksekBasarimliHesaplama/Theano/theano_0.py | metinuslu/VeriBilimiYazOkullari | 474c4481a779532fb667874a44bcb03f8852e8e0 | [
"MIT"
] | 3 | 2020-02-17T19:17:56.000Z | 2021-09-03T10:38:32.000Z | VBYO-2017/kodlar/YuksekBasarimliHesaplama/Theano/theano_0.py | gokhangemici/VeriBilimiYazOkullari | 474c4481a779532fb667874a44bcb03f8852e8e0 | [
"MIT"
] | null | null | null | VBYO-2017/kodlar/YuksekBasarimliHesaplama/Theano/theano_0.py | gokhangemici/VeriBilimiYazOkullari | 474c4481a779532fb667874a44bcb03f8852e8e0 | [
"MIT"
] | 3 | 2019-12-07T01:11:03.000Z | 2021-09-03T10:38:35.000Z | import numpy
import theano.tensor as T
from theano import function
from theano import pp
#In Theano, all symbols must be typed.
#In particular, T.dscalar is the type we assign to 0-dimensional arrays (scalar) of doubles (d)
#It is a Theano Type.
x = T.dscalar('x') #try the same with matrix
y = T.dscalar('y')
print '-' * 50
#what is x: it is a TensorVariable
print 'type(x):', type(x)
#their type field is assigned to T.dscalar
print 'x.type:', x.type
print 'T.dscalar:', T.dscalar
print x.type is T.dscalar
print '-' * 50
#dscalar is not a class. Therefore, neither x nor y are actually instances of dscalar.
#They are instances of TensorVariable. x and y are, however, assigned the theano Type dscalar in their type field.
z = x + y #z is another variable that represents x+y
print 'z.type:', z.type
print 'x:', pp(x)
print 'y:', pp(y)
print 'z:', pp(z)
print '-' * 50
f = function([x, y], z) #or function([x, y], x + y)
print f
print '-' * 50
print f(2, 3)
print numpy.allclose(f(16.3, 12.1), 28.4) #allclose: teturns True if two arrays are element-wise equal within a tolerance.
print '-' * 50
#now try with matrices
x = T.dmatrix('x')
y = T.dmatrix('y')
z = x + y
f = function([x, y], z)
print f([[1, 2], [3, 4]], [[10, 20], [30, 40]])
print f(numpy.array([[1, 2], [3, 4]]), numpy.array([[10, 20], [30, 40]]))
print '-' * 50
#exercise:
a = T.vector() # declare variable
out = a + a ** 10 # build symbolic expression
f = function([a], out) # compile function
print(f([0, 1, 2]))
#Modify and execute this code to compute this expression: a ** 2 + b ** 2 + 2 * a * b.
#see solution_0.py for exercise
| 27.844828 | 122 | 0.657585 |
9a7c53c7c3d063c8f4d76f87cd0de9f0ca94c98d | 399 | py | Python | eHealth_Version1.0_01_13_2022_12_47_pm_Release/BUS/BUS_ECGPatient.py | kyvipro113/Graduation_Thesis | 71e9e3aa2adf64cff7319d056592f8b6ef1fd5ec | [
"MIT"
] | null | null | null | eHealth_Version1.0_01_13_2022_12_47_pm_Release/BUS/BUS_ECGPatient.py | kyvipro113/Graduation_Thesis | 71e9e3aa2adf64cff7319d056592f8b6ef1fd5ec | [
"MIT"
] | null | null | null | eHealth_Version1.0_01_13_2022_12_47_pm_Release/BUS/BUS_ECGPatient.py | kyvipro113/Graduation_Thesis | 71e9e3aa2adf64cff7319d056592f8b6ef1fd5ec | [
"MIT"
] | null | null | null | from DAL.DAL_ECGPatient import DAL_ECGPatient
class BUS_ECGPatient():
def __init__(self):
self.dalECGPatient = DAL_ECGPatient()
def firstLoadLinkECG(self, IDPatient):
return self.dalECGPatient.selectLinkECGViaIDPatient(IDPatient=IDPatient)
def loadLinkECG(self, IDPatient, ECGType):
return self.dalECGPatient.selectLinkECG(IDPatient=IDPatient, ECGType=ECGType) | 36.272727 | 85 | 0.766917 |
ef1d6f7654a23f708ab56760f21d86993e8d24bc | 510 | py | Python | awslambda_lookup/exceptions.py | ITProKyle/runway-hook-awslambda | a346465277049b004c7f3c0feb759f152369eb21 | [
"Apache-2.0"
] | 1 | 2021-09-09T14:59:53.000Z | 2021-09-09T14:59:53.000Z | awslambda_lookup/exceptions.py | ITProKyle/runway-hook-awslambda | a346465277049b004c7f3c0feb759f152369eb21 | [
"Apache-2.0"
] | 68 | 2021-07-26T15:58:22.000Z | 2022-01-10T13:09:25.000Z | awslambda_lookup/exceptions.py | ITProKyle/runway-hook-awslambda | a346465277049b004c7f3c0feb759f152369eb21 | [
"Apache-2.0"
] | null | null | null | """High-level exceptions."""
from __future__ import annotations
from runway.cfngin.exceptions import CfnginError
class CfnginOnlyLookupError(CfnginError):
"""Attempted to use a CFNgin lookup outside of CFNgin."""
lookup_name: str
def __init__(self, lookup_name: str) -> None:
"""Instantiate class."""
self.lookup_name = lookup_name
self.message = (
f"attempted to use CFNgin only lookup {lookup_name} outside of CFNgin"
)
super().__init__()
| 26.842105 | 82 | 0.670588 |
ea89c8b519a2feaa9c4af22e0c08dbd043b6ee19 | 12,697 | py | Python | layers/Optimizernp.py | wahyutirta/CNN-numpy | d66e10a53304a0c72c40f278486866493f573d5e | [
"MIT"
] | 3 | 2021-05-20T09:22:37.000Z | 2021-07-16T07:04:43.000Z | layers/Optimizernp.py | wahyutirta/cnn-numpy | d66e10a53304a0c72c40f278486866493f573d5e | [
"MIT"
] | null | null | null | layers/Optimizernp.py | wahyutirta/cnn-numpy | d66e10a53304a0c72c40f278486866493f573d5e | [
"MIT"
] | null | null | null | import numpy as np
#Adagrad Optimizer
class Optimizer_Adagrad:
"""
Dokumentasi
input :
output :
"""
# Initialize optimizer - set settings
def __init__(self, learning_rate=1., decay=0., epsilon=1e-7):
# params
# learning_rate = fixed learning rate
# current_learning_rate = dinamic learning rate, learning rate decreased
# epsilon param prevent zero division
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
# Call once before any parameter updates
def pre_update_params(self):
"""
Dokumentasi
input :
output :
"""
if self.decay:
self.current_learning_rate = self.learning_rate * \
(1. / (1. + self.decay * self.iterations))
# Update parameters
def update_params(self, layer):
"""
Dokumentasi
input :
output :
"""
# If layer does not contain cache arrays,
# create them filled with zeros
if not hasattr(layer, 'kernel_cache'):
layer.kernel_cache = np.zeros_like(layer.kernel)
layer.bias_cache = np.zeros_like(layer.bias)
# Update cache with squared current gradients
# The cache holds a history of squared gradients
layer.kernel_cache += layer.delta_K**2
layer.bias_cache += layer.delta_b**2
# Vanilla SGD parameter update + normalization
# with square rooted cache
# \ or backslash used as break in python
layer.kernel += -self.current_learning_rate * \
layer.delta_K / \
(np.sqrt(layer.kernel_cache) + self.epsilon)
layer.bias += -self.current_learning_rate * \
layer.delta_b / \
(np.sqrt(layer.bias_cache) + self.epsilon)
# Call once after any parameter updates
def post_update_params(self):
self.iterations += 1
# SGD optimizer
class Optimizer_SGD:
"""
Dokumentasi
input :
output :
"""
# Initialize optimizer - set settings,
# learning rate of 1. is default for this optimizer
def __init__(self, learning_rate=0.001, decay=0.0, momentum=0.9):
"""
Dokumentasi
input :
output :
"""
# params
# learning_rate = fixed learning rate
# current_learning_rate = dinamic learning rate, learning rate decreased each epoch
# momentums value between 0 and 1
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.momentum = momentum
# Call once before any parameter updates
def pre_update_params(self):
"""
Dokumentasi
input :
output :
"""
# if used, vlue wont be 0
# decaying learning rate is about decreasing learning rate each epoch
if self.decay:
self.current_learning_rate = self.learning_rate * \
(1. / (1. + self.decay * self.iterations))
# Update parameters
def update_params(self, layer):
"""
Dokumentasi
input :
output :
"""
# If we use momentum
# if used, vlue wont be 0
# momentum uses the previous update’s direction to influence the next update’s direction
if self.momentum:
# If layer does not contain momentum arrays, create them
# filled with zeros
if not hasattr(layer, 'weight_history'):
layer.weight_history = np.zeros_like(layer.kernel)
# If there is no momentum array for weights
# make momentums attribute
# layer momentums start form zero it means no initial directions
# The array doesn't exist for biases yet either.
layer.bias_history = np.zeros_like(layer.bias)
# Build weight updates with momentum - take previous
# updates multiplied by retain factor and update with
# current gradients
# \ or backslash used as break in python
weight_updates = (self.momentum * layer.weight_history) + ((1 - self.momentum) * layer.delta_K)
"""
weight_updates = \
self.momentum * layer.weight_history - \
self.current_learning_rate * layer.delta_K
"""
# update layer weight momentums directions
layer.weight_history = weight_updates
# Build bias updates
bias_updates = (self.momentum * layer.bias_history) + ((1 - self.momentum) * layer.delta_b)
"""
bias_updates = \
self.momentum * layer.bias_history - \
self.current_learning_rate * layer.delta_b
"""
# update layer bias momentums directions
layer.bias_history = bias_updates
# Vanilla SGD updates (as before momentum update)
else:
weight_updates = -self.current_learning_rate * \
layer.delta_K
bias_updates = -self.current_learning_rate * \
layer.delta_b
# Update weights and biases using either
# vanilla or momentum updates
layer.kernel -= (self.current_learning_rate * weight_updates)
layer.bias -= ( self.current_learning_rate * bias_updates)
# Call once after any parameter updates
# marked the iteration position
def post_update_params(self):
self.iterations += 1
# RMSprop optimizer
class Optimizer_RMSprop:
"""
Dokumentasi
input :
output :
"""
# Initialize optimizer - set settings
def __init__(self, learning_rate=0.001, decay=0., epsilon=1e-7, rho=0.9):
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
self.rho = rho
# Call once before any parameter updates
def pre_update_params(self):
if self.decay:
self.current_learning_rate = self.learning_rate * \
(1. / (1. + self.decay * self.iterations))
# Update parameters
def update_params(self, layer):
"""
Dokumentasi
input :
output :
"""
# If layer does not contain cache arrays,
# create them filled with zeros
if not hasattr(layer, 'weight_cache'):
layer.weight_cache = np.zeros_like(layer.kernel)
layer.bias_cache = np.zeros_like(layer.bias)
# The cache holds a history of squared gradients
# Update cache with squared current gradients
layer.weight_cache = np.add(np.multiply(self.rho, layer.weight_cache), \
np.multiply((1 - self.rho), np.power(layer.delta_K,2)))
layer.bias_cache = np.add(np.multiply(self.rho, layer.bias_cache), \
np.multiply((1 - self.rho), np.power(layer.delta_b, 2)))
"""
self.rho * layer.bias_cache + \
(1 - self.rho) * layer.delta_b**2
"""
# Vanilla SGD parameter update + normalization
# with square rooted cache
# \ or backslash used as break in python
layer.kernel = np.add(layer.kernel, np.divide(np.multiply(np.negative(self.current_learning_rate), layer.delta_K), \
np.add(np.sqrt(layer.weight_cache), self.epsilon)))
layer.bias = np.add(layer.bias, np.divide(np.multiply(np.negative(self.current_learning_rate), layer.delta_b), \
np.add(np.sqrt(layer.bias_cache), self.epsilon)))
"""
layer.kernel += -self.current_learning_rate * \
layer.delta_K / \
(np.sqrt(layer.weight_cache) + self.epsilon)
layer.bias += -self.current_learning_rate * \
layer.delta_b / \
(np.sqrt(layer.bias_cache) + self.epsilon)
"""
# Call once after any parameter updates
def post_update_params(self):
"""
Dokumentasi
input :
output :
"""
self.iterations += 1
class Optimizer_Adam:
"""
Dokumentasi
input :
output :
"""
# Initialize optimizer - set settings
def __init__(self, learning_rate=0.001, decay=0., epsilon=1e-7,
beta_1=0.9, beta_2=0.999):
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
self.beta_1 = beta_1
self.beta_2 = beta_2
# Call once before any parameter updates
def pre_update_params(self):
"""
Dokumentasi
input :
output :
"""
if self.decay:
self.current_learning_rate = self.learning_rate * \
(1. / (1. + self.decay * self.iterations))
# Update parameters
def update_params(self, layer):
"""
Dokumentasi
input :
output :
"""
# If layer does not contain cache arrays,
# create them filled with zeros
if not hasattr(layer, 'weight_cache'):
layer.weight_momentums = np.zeros_like(layer.kernel)
layer.weight_cache = np.zeros_like(layer.kernel)
layer.bias_momentums = np.zeros_like(layer.bias)
layer.bias_cache = np.zeros_like(layer.bias)
# Update momentum with current gradients
"""
layer.weight_momentums = self.beta_1 * \
layer.weight_momentums + \
(1 - self.beta_1) * layer.delta_K
layer.bias_momentums = self.beta_1 * \
layer.bias_momentums + \
(1 - self.beta_1) * layer.delta_b
"""
layer.weight_momentums = np.add(np.multiply(self.beta_1, layer.weight_momentums), \
np.multiply((1 - self.beta_1), layer.delta_K))
layer.bias_momentums = np.add(np.multiply(self.beta_1, layer.bias_momentums), \
np.multiply((1 - self.beta_1), layer.delta_b))
# Get corrected momentum
# self.iteration is 0 at first pass
# and we need to start with 1 here
weight_momentums_corrected = np.divide(layer.weight_momentums,\
(1- np.power(self.beta_1, (self.iterations + 1))))
bias_momentums_corrected = np.divide(layer.bias_momentums, \
(1- np.power(self.beta_1, (self.iterations + 1))))
# Update cache with squared current gradients
layer.weight_cache = np.add(np.multiply(self.beta_2, layer.weight_cache), \
np.multiply((1 - self.beta_2), np.power(layer.delta_K,2)))
layer.bias_cache = np.add(np.multiply(self.beta_2, layer.bias_cache), \
np.multiply((1 - self.beta_2), np.power(layer.delta_b,2)))
# Get corrected cache
weight_cache_corrected = np.divide(layer.weight_cache, \
(1 - np.power(self.beta_2, (self.iterations + 1))))
bias_cache_corrected = np.divide(layer.bias_cache, \
(1 - np.power(self.beta_2, (self.iterations + 1))))
# Vanilla SGD parameter update + normalization
# with square rooted cache
layer.kernel = np.add(layer.kernel, np.multiply(-self.current_learning_rate, \
np.divide(weight_momentums_corrected, \
(np.sqrt(weight_cache_corrected) + self.epsilon))))
layer.bias = np.add(layer.bias, np.multiply(-self.current_learning_rate, \
np.divide(bias_momentums_corrected, \
(np.sqrt(bias_cache_corrected) + self.epsilon)))
)
# Call once after any parameter updates
def post_update_params(self):
"""
Dokumentasi
input :
output :
"""
self.iterations += 1
| 34.691257 | 124 | 0.558951 |
fc9d5206cc8597b16d4e4677f1852298a20b1e8f | 4,746 | py | Python | lakesuperior/globals.py | whikloj/lakesuperior | 733ac54e9525dcb7c3161bc70f04415e81650c06 | [
"Apache-2.0"
] | null | null | null | lakesuperior/globals.py | whikloj/lakesuperior | 733ac54e9525dcb7c3161bc70f04415e81650c06 | [
"Apache-2.0"
] | null | null | null | lakesuperior/globals.py | whikloj/lakesuperior | 733ac54e9525dcb7c3161bc70f04415e81650c06 | [
"Apache-2.0"
] | null | null | null | import logging
from collections import deque
from importlib import import_module
from os import path
from lakesuperior.dictionaries.namespaces import ns_collection as nsc
RES_CREATED = '_create_'
"""A resource was created."""
RES_DELETED = '_delete_'
"""A resource was deleted."""
RES_UPDATED = '_update_'
"""A resource was updated."""
ROOT_UID = '/'
"""Root node UID."""
ROOT_RSRC_URI = nsc['fcres'][ROOT_UID]
"""Internal URI of root resource."""
class AppGlobals:
"""
Application Globals.
This class is instantiated and used as a carrier for all connections and
various global variables outside of the Flask app context.
The variables are set on initialization by passing a configuration dict.
Usually this is done when starting an application. The instance with the
loaded variables is then assigned to the :data:`lakesuperior.env`
global variable.
You can either load the default configuration::
>>>from lakesuperior import env_setup
Or set up an environment with a custom configuration::
>>> from lakesuperior import env
>>> from lakesuperior.app_globals import AppGlobals
>>> my_config = {'name': 'value', '...': '...'}
>>> env.app_globals = AppGlobals(my_config)
"""
def __init__(self, config):
"""
Generate global variables from configuration.
"""
from lakesuperior.messaging.messenger import Messenger
app_conf = config['application']
# Initialize RDF layout.
rdfly_mod_name = app_conf['store']['ldp_rs']['layout']
rdfly_mod = import_module('lakesuperior.store.ldp_rs.{}'.format(
rdfly_mod_name))
rdfly_cls = getattr(rdfly_mod, self.camelcase(rdfly_mod_name))
#logger.info('RDF layout: {}'.format(rdfly_mod_name))
# Initialize file layout.
nonrdfly_mod_name = app_conf['store']['ldp_nr']['layout']
nonrdfly_mod = import_module('lakesuperior.store.ldp_nr.{}'.format(
nonrdfly_mod_name))
nonrdfly_cls = getattr(nonrdfly_mod, self.camelcase(nonrdfly_mod_name))
#logger.info('Non-RDF layout: {}'.format(nonrdfly_mod_name))
## Initialize metadata store.
#from lakesuperior.store.metadata_store import MetadataStore
# Set up messaging.
self._messenger = Messenger(app_conf['messaging'])
# Exposed globals.
self._config = config
self._rdfly = rdfly_cls(app_conf['store']['ldp_rs'])
self._nonrdfly = nonrdfly_cls(app_conf['store']['ldp_nr'])
#self._md_store = MetadataStore(path.join(
# app_conf['data_dir'], 'metadata'), create=True)
self._changelog = deque()
@property
def config(self):
"""
Global configuration.
This is a collection of all configuration options **except** for the
WSGI configuration which is initialized at a different time and is
stored under :data:`lakesuperior.env.wsgi_options`.
*TODO:* Update class reference when interface will be separated from
implementation.
"""
return self._config
@property
def rdfly(self):
"""
Current RDF layout.
This is an instance of
:class:`~lakesuperior.store.ldp_rs.rsrc_centric_layout.RsrcCentricLayout`.
*TODO:* Update class reference when interface will be separated from
implementation.
"""
return self._rdfly
@property
def rdf_store(self):
"""
Current RDF low-level store.
This is an instance of
:class:`~lakesuperior.store.ldp_rs.lmdb_store.LmdbStore`.
"""
return self._rdfly.store
@property
def nonrdfly(self):
"""
Current non-RDF (binary contents) layout.
This is an instance of
:class:`~lakesuperior.store.ldp_nr.base_non_rdf_layout.BaseNonRdfLayout`.
"""
return self._nonrdfly
#@property
#def md_store(self):
# """
# Metadata store (LMDB).
# This is an instance of
# :class:`~lakesuperior.store.metadata_store.MetadataStore`.
# """
# return self._md_store
@property
def messenger(self):
"""
Current message handler.
This is an instance of
:class:`~lakesuperior.messaging.messenger.Messenger`.
"""
return self._messenger
@property
def changelog(self):
return self._changelog
def camelcase(self, word):
"""
Convert a string with underscores to a camel-cased one.
Ripped from https://stackoverflow.com/a/6425628
"""
return ''.join(x.capitalize() or '_' for x in word.split('_'))
| 29.296296 | 82 | 0.638643 |
a03b55c646a0c96e008a7f8985001d2c46c365b8 | 4,037 | py | Python | main/api/v1/resource.py | gae-init/gae-init-upload | 314c151b438a02724f7925f5be1af3b5ec93630d | [
"MIT"
] | 23 | 2015-01-13T19:26:04.000Z | 2019-12-21T06:23:20.000Z | main/api/v1/resource.py | gae-init/gae-init-upload | 314c151b438a02724f7925f5be1af3b5ec93630d | [
"MIT"
] | 8 | 2015-02-07T15:56:04.000Z | 2019-07-17T19:28:00.000Z | main/api/v1/resource.py | gae-init/gae-init-upload | 314c151b438a02724f7925f5be1af3b5ec93630d | [
"MIT"
] | 18 | 2015-03-13T19:33:04.000Z | 2019-06-27T04:31:44.000Z | # coding: utf-8
from __future__ import absolute_import
from google.appengine.api import images
from google.appengine.ext import blobstore
from google.appengine.ext import ndb
import flask
import flask_restful
import werkzeug
from api import helpers
import auth
import config
import model
import util
from main import api_v1
###############################################################################
# Endpoints
###############################################################################
@api_v1.resource('/resource/', endpoint='api.resource.list')
class ResourceListAPI(flask_restful.Resource):
@auth.admin_required
def get(self):
resource_keys = util.param('resource_keys', list)
if resource_keys:
resource_db_keys = [ndb.Key(urlsafe=k) for k in resource_keys]
resource_dbs = ndb.get_multi(resource_db_keys)
return helpers.make_response(resource_dbs, model.Resource.FIELDS)
resource_dbs, next_cursor = model.Resource.get_dbs()
return helpers.make_response(
resource_dbs, model.Resource.FIELDS, next_cursor,
)
@auth.admin_required
def delete(self):
resource_keys = util.param('resource_keys', list)
if not resource_keys:
helpers.make_not_found_exception(
'Resource(s) %s not found' % resource_keys
)
resource_db_keys = [ndb.Key(urlsafe=k) for k in resource_keys]
delete_resource_dbs(resource_db_keys)
return flask.jsonify({
'result': resource_keys,
'status': 'success',
})
@api_v1.resource('/resource/<string:key>/', endpoint='api.resource')
class ResourceAPI(flask_restful.Resource):
@auth.login_required
def get(self, key):
resource_db = ndb.Key(urlsafe=key).get()
if not resource_db and resource_db.user_key != auth.current_user_key():
helpers.make_not_found_exception('Resource %s not found' % key)
return helpers.make_response(resource_db, model.Resource.FIELDS)
@auth.login_required
def delete(self, key):
resource_db = ndb.Key(urlsafe=key).get()
if not resource_db or resource_db.user_key != auth.current_user_key():
helpers.make_not_found_exception('Resource %s not found' % key)
delete_resource_key(resource_db.key)
return helpers.make_response(resource_db, model.Resource.FIELDS)
@api_v1.resource('/resource/upload/', endpoint='api.resource.upload')
class ResourceUploadAPI(flask_restful.Resource):
@auth.login_required
def get(self):
count = util.param('count', int) or 1
urls = []
for i in range(count):
urls.append({'upload_url': blobstore.create_upload_url(
flask.request.path,
gs_bucket_name=config.CONFIG_DB.bucket_name or None,
)})
return flask.jsonify({
'status': 'success',
'count': count,
'result': urls,
})
@auth.login_required
def post(self):
resource_db = resource_db_from_upload()
if resource_db:
return helpers.make_response(resource_db, model.Resource.FIELDS)
flask.abort(500)
###############################################################################
# Helpers
###############################################################################
def delete_resource_dbs(resource_db_keys):
ndb.delete_multi(resource_db_keys)
def resource_db_from_upload():
try:
uploaded_file = flask.request.files['file']
except:
return None
headers = uploaded_file.headers['Content-Type']
blob_info_key = werkzeug.parse_options_header(headers)[1]['blob-key']
blob_info = blobstore.BlobInfo.get(blob_info_key)
image_url = None
if blob_info.content_type.startswith('image'):
try:
image_url = images.get_serving_url(blob_info.key())
except:
pass
resource_db = model.Resource(
user_key=auth.current_user_key(),
blob_key=blob_info.key(),
name=blob_info.filename,
content_type=blob_info.content_type,
size=blob_info.size,
image_url=image_url,
bucket_name=config.CONFIG_DB.bucket_name or None,
)
resource_db.put()
return resource_db
| 30.816794 | 79 | 0.66163 |
9fc3f5f8f145d4f85410a649dbc467dc621f2b1b | 444 | py | Python | scripts/zManager/items/bones.py | jonntd/maya-ziva-dynamics-utils | 8cc29941da49004c45beb4e9c673dd4334a8d773 | [
"MIT"
] | null | null | null | scripts/zManager/items/bones.py | jonntd/maya-ziva-dynamics-utils | 8cc29941da49004c45beb4e9c673dd4334a8d773 | [
"MIT"
] | null | null | null | scripts/zManager/items/bones.py | jonntd/maya-ziva-dynamics-utils | 8cc29941da49004c45beb4e9c673dd4334a8d773 | [
"MIT"
] | 1 | 2019-06-10T08:14:46.000Z | 2019-06-10T08:14:46.000Z | from maya import cmds
from . import base, mesh
class BonesItem(base.LabelItem):
def __init__(self, parent, solver):
super(BonesItem, self).__init__(parent, text="bones")
self.setExpanded(False)
# get bones
meshes = cmds.zQuery(solver, mesh=True, type="zBone")
meshes.sort()
# add bones
for m in meshes:
item = mesh.MeshItem(self, m)
item.setExpanded(True)
| 24.666667 | 61 | 0.603604 |
0468f66891d821e09e4ace525b13945a19ed9a59 | 656 | py | Python | fsindex/test/tests/Path/level2_path_duplicate.py | jakeogh/fsindex | 1b090089e32685f205841d9b1f9ec7365698257a | [
"MIT"
] | null | null | null | fsindex/test/tests/Path/level2_path_duplicate.py | jakeogh/fsindex | 1b090089e32685f205841d9b1f9ec7365698257a | [
"MIT"
] | null | null | null | fsindex/test/tests/Path/level2_path_duplicate.py | jakeogh/fsindex | 1b090089e32685f205841d9b1f9ec7365698257a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from fsindex.test.test_enviroment import *
with self_contained_session(CONFIG.database_timestamp) as session:
BASE.metadata.create_all(session.bind)
path = Path.construct(session=session, path=b"/level1/level2")
session.add(path)
session.commit()
path_duplicate = Path.construct(session=session, path=b"/level1/level2")
session.add(path_duplicate)
session.commit()
assert path.id == path_duplicate.id
assert id(path) == id(path_duplicate)
db_result = [('select COUNT(*) from filename;', 3),
('select COUNT(*) from path;', 3)]
check_db_result(config=CONFIG, db_result=db_result)
| 29.818182 | 76 | 0.713415 |
76b614764a55402dd85429e161ee13f1d9c70e0b | 2,609 | py | Python | entrypoint.py | algorithmiaio/build-wait-action | b9a3dddd00cda265df65e84a71a4cb32272db270 | [
"MIT"
] | null | null | null | entrypoint.py | algorithmiaio/build-wait-action | b9a3dddd00cda265df65e84a71a4cb32272db270 | [
"MIT"
] | null | null | null | entrypoint.py | algorithmiaio/build-wait-action | b9a3dddd00cda265df65e84a71a4cb32272db270 | [
"MIT"
] | 1 | 2021-03-31T23:24:23.000Z | 2021-03-31T23:24:23.000Z | #!/usr/bin/python
import Algorithmia
import os
import requests
import time
class BearerAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers["authorization"] = "Simple " + self.token
return r
# TODO: Make sure this works even when a build id isn't visible just yet
def get_build_id(api_key, api_address, algo_name, hash, marker=None):
if marker:
url = "{}/v1/algorithms/{}/builds?limit={}&marker={}".format(api_address, algo_name, 10, marker)
else:
url = "{}/v1/algorithms/{}/builds?limit={}".format(api_address, algo_name, 10)
result = get_api_request(url, api_key)
if "error" in result:
raise Exception(result['error']['message'])
else:
builds = result['results']
for build in builds:
if hash in build['commit_sha']:
build_id = build['build_id']
return build_id
marker = result['marker']
return get_build_id(api_key, api_address, algo_name, hash, marker)
def wait_for_result(api_key, api_address, algo_name, build_id):
waiting = True
url = "{}/v1/algorithms/{}/builds/{}".format(api_address, algo_name, build_id)
url_logs = "{}/v1/algorithms/{}/builds/{}/logs".format(api_address, algo_name, build_id)
while waiting:
result = get_api_request(url, api_key)
if "error" in result:
raise Exception(result['error']['message'])
else:
if result['status'] != u'in-progress':
if result['status'] == u'succeeded':
waiting = False
else:
log_data = get_api_request(url_logs, api_key)
raise Exception("build failure:\n{}".format(log_data['logs']))
else:
time.sleep(5)
def get_api_request(url, api_key):
response = requests.get(auth=BearerAuth(api_key), url=url)
if response.status_code == 200:
return response.json()
else:
raise Exception("request failed with status: {}".format(response.status_code))
if __name__ == "__main__":
api_key = os.getenv("INPUT_MGMT_API_KEY")
api_address = os.getenv("INPUT_API_ADDRESS")
algo_name = os.getenv("INPUT_ALGORITHM_NAME")
algo_hash = os.getenv("GITHUB_SHA")
print("--- Finding build in progress ---")
build_id = get_build_id(api_key, api_address, algo_name, algo_hash)
print("--- Build ID found, waiting for result ---")
wait_for_result(api_key, api_address, algo_name, build_id)
print("--- Build successful ---")
| 35.739726 | 104 | 0.627827 |
13000025a2cdb33c5bd1d009cf2b903d747c540d | 13,131 | py | Python | code/run_model.py | evgenyneu/covid19 | cecc164c5200eb788ffbe7f6a138d0f98eb4c3bd | [
"Unlicense"
] | 1 | 2020-11-21T18:08:53.000Z | 2020-11-21T18:08:53.000Z | code/run_model.py | evgenyneu/covid19 | cecc164c5200eb788ffbe7f6a138d0f98eb4c3bd | [
"Unlicense"
] | null | null | null | code/run_model.py | evgenyneu/covid19 | cecc164c5200eb788ffbe7f6a138d0f98eb4c3bd | [
"Unlicense"
] | null | null | null | # Modelling spread of infectious desease using logisic growth model
import os
import shutil
from shutil import copyfile
from pathlib import Path
import requests
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
from pandas.plotting import register_matplotlib_converters
from datetime import datetime, timedelta
from dateutil import rrule
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from cmdstanpy import CmdStanModel
from dataclasses import dataclass
from tarpan.cmdstanpy.analyse import save_analysis
from tarpan.shared.info_path import InfoPath
from tarpan.cmdstanpy.cache import run
import tarpan
# Parameters for data analysys
@dataclass
class AnalysisSettings:
# Data for Stan model (dictionary)
data = None
csv_path: str = "data/time_series_19-covid-Confirmed.csv"
# URL to the data
data_url: str = "https://raw.githubusercontent.com/CSSEGISandData/\
COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/\
time_series_covid19_confirmed_global.csv"
# Path to the .stan model file
stan_model_path: str = "code/stan_model/logistic.stan"
# Location of plots and summaries
info_path: InfoPath = InfoPath()
plots_dir: str = "plots"
# Stan's sampling parameter
max_treedepth: float = 10
# Number of hours to wait before downloading the data from the Web
max_hours_diff = 12
# Width of HPDI (highest posterior density interval) that is used
# to plot the shaded region around the predicted mean line.
hpdi_width: float = 0.95
# Maximum number of people that can be infected
population_size: float = 2_900_000
# Difference between the maximum number of confirmed cases
# and the actual number of confirmed cases at which we consider
# all people to be reported
tolerance_cases = 1000
marker_color: str = "#F4A92800"
marker_edgecolor: str = "#F4A928"
mu_line_color: str = "#28ADF4"
mu_hpdi_color: str = "#6ef48688"
cases_hpdi_color: str = "#e8e8f455"
# Plot's background color
background_color = '#023D45'
marker: str = "o"
grid_color: str = "#aaaaaa"
grid_alpha: float = 0.2
def download_data(settings):
"""
Downloads the CSV file containing data about convermed COVID-19 cases
"""
data_path = settings.csv_path
data_url = settings.data_url
time_now = datetime.now()
mod_time = datetime.fromtimestamp(os.path.getmtime(data_path))
delta = time_now - mod_time
delta_hours = delta.total_seconds() / 60 / 60
if delta_hours < settings.max_hours_diff:
# Data is up to date
return
# Remove data directory
shutil.rmtree(settings.info_path.dir(), ignore_errors=True)
print(f"Data last downloaded {round(delta_hours)} hours ago.")
print(f"Re-downloading data from:\n{data_url}")
# Download
response = requests.get(data_url)
try:
# Check if download was successful
response.raise_for_status()
except requests.exceptions.HTTPError:
print(f"Error downloading data from {data_url}.")
print(f"Using previous data")
return
data = response.text
# Save to file
with open(data_path, "w") as text_file:
text_file.write(data)
# Save with time stamp in archive folder
# ------
path = Path(data_path)
data_dir = path.parent
archive_dir = os.path.join(data_dir, "archive", "confirmed")
if not os.path.exists(archive_dir):
os.makedirs(archive_dir, exist_ok=True)
archive_file_name = time_now.strftime('%Y-%m-%d.csv')
archive_path = os.path.join(archive_dir, archive_file_name)
copyfile(data_path, archive_path)
def load_data(settings):
"""
Load data.
Parameters
----------
data_path : str
Path to the CSV file.
Returns
-------
list of datetime:
Days corresponding to the confirmed cases
list of float:
Cumulative number of people infected (confirmed).
"""
data_path = settings.csv_path
download_data(settings=settings)
df = pd.read_csv(data_path)
# Exclude China because its data do not show exponential growth
df = df[df['Country/Region'] != 'Mainland China']
df = df[df['Country/Region'] != 'China']
column_names = list(df)
i_first_day = column_names.index('1/22/20') # First date column
dates = []
cases = []
for i_day in range(i_first_day, len(column_names)):
column_name = column_names[i_day]
date = datetime.strptime(column_name, '%m/%d/%y')
dates.append(date)
confirmed = df[column_name].sum()
cases.append(int(confirmed))
return dates, cases
def data_for_stan(cases, settings):
"""
Returns data for the model.
Parameters
----------
list of float:
Cumulative number of people infected (confirmed).
Returns
-------
dict:
Data that is supplied to Stan model.
"""
q = -1 + settings.population_size / cases[0]
return {
"n": len(cases),
"cases": cases,
"k": settings.population_size,
"q": q
}
def run_stan(output_dir, settings: AnalysisSettings):
"""
Run Stan model and return the samples from posterior distributions.
Parameters
----------
output_dir: str
Directory where Stan's output will be created
settings: AnalysisSettings
Analysis settings.
Returns
-------
cmdstanpy.CmdStanMCMC
Stan's output containing samples of posterior distribution
of parameters.
"""
model = CmdStanModel(stan_file=settings.stan_model_path)
fit = model.sample(
data=settings.data, seed=333,
adapt_delta=0.99, max_treedepth=settings.max_treedepth,
iter_sampling=4000, iter_warmup=1000,
chains=4, cores=4,
show_progress=True,
output_dir=output_dir)
# Make summaries and plots of parameter distributions
save_analysis(fit, param_names=["r", "sigma"])
return fit
def check_all_days_present(dates):
"""
Throws exception if there are days missing in the `dates` array
(for example, if it's Sep 1, Sep 2, Sep 3, Sep 5, where Sep 4 is missing).
"""
prev_day = None
for date in dates:
if prev_day is None:
prev_day = date
continue
delta = date - prev_day
if delta.days != 1:
raise ValueError(
f'ERROR: missing days between {prev_day} and {date}')
prev_day = date
def model_function(x, k, q, b):
"""
Calculates number of infected people using logistic function.
Parameters
---------
x: numpy.ndarray
Day numbers
k, q, b: float
Parameters of logitic function.
Returns
-------
numpy.ndarray:
Cumulative number of infected people
"""
return float(k) / (1 + q * np.exp(-(b * x)))
def simulated(mu, sigma):
return stats.norm.rvs(size=len(sigma), loc=mu, scale=sigma)
def calculate_all_infected_day(k, q, b, settings):
"""
Calculates the day when almost all almost people that can be reported
are reported.
Parameters
----------
k, q, b: float
Parameters of logitic function.
Returns
-------
The day number when almost all people that can be reported as infected
are reported.
"""
day_all_infected = 0
b_mean = b.mean()
while True:
sim_confirmed = model_function(x=day_all_infected, k=k, q=q, b=b_mean)
# Stop if number of confirmed cases is almost at maximum level
if abs(sim_confirmed - k) < settings.tolerance_cases:
break
day_all_infected += 1
return day_all_infected
def plot_data_and_model(fit, dates, cases, settings):
sns.set(style="ticks")
plt.style.use('dark_background')
posterior = fit.get_drawset(params=['r', 'sigma'])
fig, ax = plt.subplots(nrows=1, ncols=1)
ax.set_facecolor(settings.background_color)
fig.set_facecolor(settings.background_color)
# Plot posterior
# ---------
# Model parameters
b = posterior["r"].to_numpy() # Growth rate
sigma = posterior["sigma"].to_numpy() # Spear of observations
k = settings.data["k"] # Maximum number cases that can be confirmed
q = settings.data["q"] # Parameter related to initial number of infected
n = settings.data['n'] # Number of data points
day_all_infected = calculate_all_infected_day(k=k, q=q, b=b,
settings=settings)
x_values = np.array(range(0, day_all_infected))
mu = [
model_function(x=x, k=k, q=q, b=b)
for x in x_values
]
mu = np.array(mu)
# Plot mean
mu_mean = mu.mean(axis=1)
x_dates = list(rrule.rrule(freq=rrule.DAILY,
count=day_all_infected, dtstart=dates[0]))
x_dates = np.array(x_dates)
ax.plot(x_dates, mu_mean, color=settings.mu_line_color, label="Model",
zorder=10)
# Plot HPDI interval
# --------
hpdi = np.apply_along_axis(tarpan.shared.stats.hpdi, 1, mu,
probability=settings.hpdi_width)
ax.fill_between(x_dates, hpdi[:, 0], hpdi[:, 1],
facecolor=settings.mu_hpdi_color, zorder=7,
linewidth=0)
# Plot simulated observations
simulated_cases = [
simulated(mu=mu[i, :], sigma=sigma)
for i in range(len(x_values))
]
simulated_cases = np.array(simulated_cases)
cases_hpdi = np.apply_along_axis(
tarpan.shared.stats.hpdi, 1, simulated_cases,
probability=settings.hpdi_width)
ax.fill_between(x_dates,
cases_hpdi[:, 0], cases_hpdi[:, 1],
facecolor=settings.cases_hpdi_color,
linewidth=0,
label=f"{round(settings.hpdi_width*100)}% HPDI", zorder=5)
# Plot data
# ----------
ax.scatter(dates, cases,
marker=settings.marker,
color=settings.marker_color,
edgecolor=settings.marker_edgecolor,
label="Reported",
zorder=9)
# Format plot
# ----------
fig.autofmt_xdate()
date_format = mdates.DateFormatter('%b %d')
ax.xaxis.set_major_formatter(date_format)
date_str = datetime.now().strftime('%b %d, %Y')
title = (
"Total confirmed cases of COVID-19 worldwide, excluding China.\n"
f"Data retrieved from Johns Hopkins University on {date_str}."
)
ax.set_title(title)
ax.set_ylabel("Total confirmed cases")
ax.grid(color=settings.grid_color, linewidth=1,
alpha=settings.grid_alpha)
# Set thousand separator
ax.get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))
ax.legend(facecolor=settings.background_color)
fig.tight_layout()
# Save images
# ------
os.makedirs(settings.plots_dir, exist_ok=True)
# Plot predictions into the future
# ---------
day_margin = timedelta(days=5)
ax.set_xlim([dates[0] - day_margin, x_dates[-1] + day_margin])
info_path = InfoPath(**settings.info_path.__dict__)
filename_date = datetime.now().strftime('%Y_%m_%d')
filename = f"{filename_date}_extrapolated.png"
image_path = os.path.join(settings.plots_dir, filename)
fig.savefig(image_path, dpi=info_path.dpi, facecolor=fig.get_facecolor())
print("Created plots:")
print(image_path)
filename = f"recent_extrapolated.png"
image_path = os.path.join(settings.plots_dir, filename)
fig.savefig(image_path, dpi=info_path.dpi, facecolor=fig.get_facecolor())
# Plot at scale of observations
# ---------
last_data = cases[n - 1]
margins = last_data * 0.1
day_margin = timedelta(days=2)
ax.set_xlim([dates[0] - day_margin, dates[-1] + day_margin])
ax.set_ylim([0 - margins, last_data + margins])
filename = f"{filename_date}_observed.png"
image_path = os.path.join(settings.plots_dir, filename)
fig.savefig(image_path, dpi=info_path.dpi, facecolor=fig.get_facecolor())
print(image_path)
filename = f"recent_observed.png"
image_path = os.path.join(settings.plots_dir, filename)
fig.savefig(image_path, dpi=info_path.dpi, facecolor=fig.get_facecolor())
# plt.show()
def do_work():
register_matplotlib_converters()
settings = AnalysisSettings()
dates, cases = load_data(settings=settings)
check_all_days_present(dates)
settings.data = data_for_stan(cases, settings=settings)
output_dir = os.path.join(settings.info_path.dir(), "stan_cache")
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir, exist_ok=True)
fit = run_stan(output_dir=output_dir, settings=settings)
# fit = run(func=run_stan, settings=settings)
plot_data_and_model(fit=fit, dates=dates, cases=cases, settings=settings)
if __name__ == '__main__':
print("Running the model...")
do_work()
print('We are done')
| 26.852761 | 78 | 0.651359 |
2324ae8c3788d9064e8bc08d8a878ad68decf84d | 941 | py | Python | networkx-d3-v2/lib/gdata/Crypto/__init__.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | 2,293 | 2015-01-02T12:46:10.000Z | 2022-03-29T09:45:43.000Z | networkx-d3-v2/lib/gdata/Crypto/__init__.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | 315 | 2015-05-31T11:55:46.000Z | 2022-01-12T08:36:37.000Z | networkx-d3-v2/lib/gdata/Crypto/__init__.py | suraj-testing2/Clock_Websites | 0e65331da40cfd3766f1bde17f0a9c7ff6666dea | [
"Apache-2.0"
] | 1,033 | 2015-01-04T07:48:40.000Z | 2022-03-24T09:34:37.000Z |
"""Python Cryptography Toolkit
A collection of cryptographic modules implementing various algorithms
and protocols.
Subpackages:
Crypto.Cipher Secret-key encryption algorithms (AES, DES, ARC4)
Crypto.Hash Hashing algorithms (MD5, SHA, HMAC)
Crypto.Protocol Cryptographic protocols (Chaffing, all-or-nothing
transform). This package does not contain any
network protocols.
Crypto.PublicKey Public-key encryption and signature algorithms
(RSA, DSA)
Crypto.Util Various useful modules and functions (long-to-string
conversion, random number generation, number
theoretic functions)
"""
__all__ = ['Cipher', 'Hash', 'Protocol', 'PublicKey', 'Util']
__version__ = '2.0.1'
__revision__ = "$Id: __init__.py,v 1.12 2005/06/14 01:20:22 akuchling Exp $"
| 36.192308 | 78 | 0.621679 |
d62174e29c99372f410874e8502e9c0488f735dd | 7,090 | py | Python | trafficrecognition.py | udaykumarjangra/algorithms | 6fbd945af1f5372689122ab9373126d2f401abd4 | [
"MIT"
] | 2 | 2020-10-28T15:02:41.000Z | 2021-10-02T13:18:24.000Z | trafficrecognition.py | udaykumarjangra/algorithms | 6fbd945af1f5372689122ab9373126d2f401abd4 | [
"MIT"
] | 4 | 2020-10-07T05:59:13.000Z | 2021-10-02T08:01:27.000Z | trafficrecognition.py | udaykumarjangra/algorithms | 6fbd945af1f5372689122ab9373126d2f401abd4 | [
"MIT"
] | 51 | 2020-10-01T03:07:30.000Z | 2021-10-05T16:25:22.000Z | import tkinter as tk
from tkinter import filedialog
from tkinter import *
from PIL import ImageTk, Image
import numpy
#To classify sign load the trained model.
from keras.models import load_model
model = load_model('traffic_classifier.h5')
#dictionary for labelling all traffic signs classes.
classes = { 1:'Speed limit (20km/h)',
2:'Speed limit (30km/h)',
3:'Speed limit (50km/h)',
4:'Speed limit (60km/h)',
5:'Speed limit (70km/h)',
6:'Speed limit (80km/h)',
7:'End of speed limit (80km/h)',
8:'Speed limit (100km/h)',
9:'Speed limit (120km/h)',
10:'No passing',
11:'No passing veh over 3.5 tons',
12:'Right-of-way at intersection',
13:'Priority road',
14:'Yield',
15:'Stop',
16:'No vehicles',
17:'Veh > 3.5 tons prohibited',
18:'No entry',
19:'General caution',
20:'Dangerous curve left',
21:'Dangerous curve right',
22:'Double curve',
23:'Bumpy road',
24:'Slippery road',
25:'Road narrows on the right',
26:'Road work',
27:'Traffic signals',
28:'Pedestrians',
29:'Children crossing',
30:'Bicycles crossing',
31:'Beware of ice/snow',
32:'Wild animals crossing',
33:'End speed + passing limits',
34:'Turn right ahead',
35:'Turn left ahead',
36:'Ahead only',
37:'Go straight or right',
38:'Go straight or left',
39:'Keep right',
40:'Keep left',
41:'End no passing veh > 3.5 tons',
42:'Roundabout mandatory',
43:'End of no passing',
#initializing GUI
top=tk.Tk()
top.geometry('800x600')
top.title('Traffic Sign Recognition')
top.configure(background='#CDCDCD')
label=Label(top,background='#CDCDCD', font=('times new roman',30,'bold'))
sign_image = Label(top)
def classify(file_path):
global label_packed
image = Image.open(file_path)
image = image.resize((30,30))
image = numpy.expand_dims(image, axis=0)
image = numpy.array(image)
print(image.shape)
pred = model.predict_classes([image])[0]
sign = classes[pred+1]
print(sign)
label.configure(foreground='#011638', text=sign)
def show_classify_button(file_path):
classify_b=Button(top,text="Classify the Sign",command=lambda: classify(file_path),padx=10,pady=5)
classify_b.configure(background='#364156', foreground='white',font=('times new roman',30,'bold'))
classify_b.place(relx=0.79,rely=0.46)
def upload_image():
try:
file_path=filedialog.askopenfilename()
uploaded=Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()/2.25),(top.winfo_height()/2.25)))
im=ImageTk.PhotoImage(uploaded)
sign_image.configure(image=im)
sign_image.image=im
label.configure(text='')
show_classify_button(file_path)
except:
pass
upload=Button(top,text="Upload the traffic sign for classification/recognition",command=upload_image,padx=10,pady=5)
upload.configure(background='#364156', foreground='white',font=('times new roman',30,'bold'))
upload.pack(side=BOTTOM,pady=50)
sign_image.pack(side=BOTTOM,expand=True)
label.pack(side=BOTTOM,expand=True)
heading = Label(top, text="Know The traffic Signs",pady=30, font=('times new roman',30,'bold'))
heading.configure(background='#CDCDCD',foreground='#364156')
heading.pack()
top.mainloop()
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
from PIL import Image
import os
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
data = []
labels = []
classes = 43
cur_path = os.getcwd()
#Images and their labels are retrieved in this block.
for i in range(classes):
path = os.path.join(cur_path,'train',str(i))
images = os.listdir(path)
for a in images:
try:
image = Image.open(path + '\\'+ a)
image = image.resize((30,30))
image = np.array(image)
#sim = Image.fromarray(image)
data.append(image)
labels.append(i)
except:
print("Error in loading image")
# Lists conversion into numpy arrays
data = np.array(data)
labels = np.array(labels)
print(data.shape, labels.shape)
#Splitting training and testing dataset
Y_train, Y_test, x_train, x_test = train_test_split(data, labels, test_size=0.2, random_state=42)
print(Y_train.shape, Y_test.shape, x_train.shape, x_test.shape)
#Converting the labels into one hot encoding
x_train = to_categorical(x_train, 43)
x_test = to_categorical(x_test, 43)
#In this block we will be building the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=X_train.shape[1:]))
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(43, activation='softmax'))
#Model compilation
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
epochs = 15
history = model.fit(Y_train, x_train, batch_size=32, epochs=epochs, validation_data=(Y_test, x_test))
model.save("my_model.h5")
#To easily understand the acccuracy we will plot the graphs.
plt.figure(0)
plt.plot(history.history['accuracy'], label='training accuracy')
plt.plot(history.history['val_accuracy'], label='val accuracy')
plt.title('Accuracy')
plt.ylabel('epochs')
plt.xlabel('accuracy')
plt.legend()
plt.show()
plt.figure(1)
plt.plot(history.history['loss'], label='training loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.title('Loss')
plt.ylabel('epochs')
plt.xlabel('loss')
plt.legend()
plt.show()
#Here we will check the accuracy on the test dataset that is available
from sklearn.metrics import accuracy_score
x_test = pd.read_csv('Test.csv')
labels = x_test["ClassId"].values
imgs = x_test["Path"].values
data=[]
for img in imgs:
image = Image.open(img)
image = image.resize((30,30))
data.append(np.array(image))
Y_test=np.array(data)
pred = model.predict_classes(X_test)
#Getting accuracy from test dataset.
from sklearn.metrics import accuracy_score
print(accuracy_score(labels, pred))
| 31.936937 | 116 | 0.643441 |
257d6e8b6dddc92559077ec2d4fad19a4351c294 | 1,314 | py | Python | 4_Tran_Thi_Nhu_Huong_LOCAL/bai3.3.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | null | null | null | 4_Tran_Thi_Nhu_Huong_LOCAL/bai3.3.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | null | null | null | 4_Tran_Thi_Nhu_Huong_LOCAL/bai3.3.py | lpython2006e/exercies | 84343eae57d86708a7984aa02f77183a4688a508 | [
"MIT"
] | 8 | 2020-07-10T14:13:54.000Z | 2020-08-03T08:17:50.000Z | """Write a program that allow user to enter classmate (name, Birthday, Email),
validate if enter values are valid format (limit length apply), else ask user to enter again per field"""
from datetime import datetime
import re
# todo: naming convention: length (120 max),first-last-middle(opt)
def _check_valid_name(a):
list = ["Huong", " Thach", "Dung", "Thien"]
if a not in list:
return False
else:
return True
# todo: y/m/d - base on locate N-US
def _check_valid_birthday(b):
try:
datetime.strptime(b, '%m/%d/%y')
except:
return False
return True
#todo: https://emailregex.com/
def _check_valid_email(c):
return bool(re.search(r"^[\w\.\+\-]+\@[\w]+\.[a-z]{2,3}$", c))
# todo: while is not valid not.
name = input("Please input your classmate name: ")
while _check_valid_name(name) is False:
name = input("Please input your classmate name again: ")
else:
birth = input("Please input your classmate birthday with format DD/MM/YY")
while _check_valid_birthday(birth) is False:
birth = input("Please input your classmate birthday with format DD/MM/YY")
else:
email = input("please enter your email: ")
while _check_valid_email(email) is False:
email = input("please enter your email: ")
else:
print("Welcome to our class")
| 28.565217 | 105 | 0.677321 |
917957697d138e37a2e13fcd57b339d433885591 | 2,135 | py | Python | z2/part3/updated_part2_batch/jm/parser_errors_2/252921490.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 1 | 2020-04-16T12:13:47.000Z | 2020-04-16T12:13:47.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/252921490.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:50:15.000Z | 2020-05-19T14:58:30.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/252921490.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:45:13.000Z | 2020-06-09T19:18:31.000Z | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 252921490
"""
"""
random actions, total chaos
"""
board = gamma_new(3, 4, 2, 3)
assert board is not None
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_busy_fields(board, 1) == 2
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_busy_fields(board, 2) == 1
assert gamma_move(board, 1, 1, 0) == 1
assert gamma_busy_fields(board, 1) == 3
assert gamma_move(board, 2, 1, 3) == 1
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 0, 2) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 1, 1, 1) == 1
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 1, 3, 0) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 0, 3) == 1
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_golden_possible(board, 1) == 1
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_free_fields(board, 1) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_busy_fields(board, 2) == 5
gamma_delete(board)
| 30.070423 | 43 | 0.673536 |
d036625c2d5b44dc65d21ef2bbdceca962371f80 | 7,552 | py | Python | backend/service/payments.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 7 | 2018-05-20T08:56:08.000Z | 2022-03-11T15:50:54.000Z | backend/service/payments.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 2 | 2021-06-08T21:12:51.000Z | 2022-01-13T01:25:27.000Z | backend/service/payments.py | deti/boss | bc0cfe3067bf1cbf26789f7443a36e7cdd2ac869 | [
"Apache-2.0"
] | 5 | 2016-10-09T14:52:09.000Z | 2020-12-25T01:04:35.000Z | import conf
import json
import logbook
import requests
import posixpath
from arrow import utcnow
from decimal import Decimal
from urllib.parse import urljoin
from requests.auth import HTTPBasicAuth
from utils.i18n import preferred_language, _
from model import db, MessageTemplate
from task.mail import send_email
from api import request_base_url
class CloudpaymentsClient(object):
@staticmethod
def payment_proceed(amount, currency, customer_id, token,
customer_email=None, description=None):
"""
:param amount:
:param currency:
:param customer_id:
:param token:
:param customer_email:
:param description:
:return:
bool, dict - Transaction success status, additional info.
"""
payload = {
"Amount": amount,
"Currency": currency,
"AccountId": customer_id,
"Token": token}
if customer_email:
payload.update({"Email": customer_email})
if description:
payload.update({"Description": description})
logbook.info("[payment_preceed] Request payload: {}", payload)
try:
r = requests.post(conf.payments.cloudpayments.auto_payments_url,
json=payload,
auth=HTTPBasicAuth(conf.payments.cloudpayments.public_id,
conf.payments.cloudpayments.api_secret))
except requests.exceptions.RequestException as e:
logbook.error("[payment_proceed] Request exception: {}. Customer_id: {}", e, customer_id)
return False, {}
if r.status_code != 200:
logbook.error("[payment_proceed] Invalid request for customer {}. Response:{}", customer_id, r.text)
return False, {}
response_data = r.json()
if response_data['Success'] is not True:
if response_data.get('Message'):
logbook.error('[payment_proceed] Request fails for customer {}. Response: {}',
customer_id, response_data)
return False, {}
model_info = response_data.get('Model')
if model_info:
logbook.error('[payment_proceed] Payment rejected for customer {}. Response: {}',
customer_id, response_data)
return False, model_info
logbook.info("[payment_proceed] Request status code: {}; response: {}", r.status_code, r.text)
return True, response_data
class PaymentService(object):
@classmethod
def withdraw(cls, card, amount, currency, customer, description):
success, aux_info = CloudpaymentsClient.payment_proceed(amount, currency,
customer.customer_id, card.token,
customer.email, description)
if not success:
# Payment fails
if aux_info:
# Transaction rejected - disable this card
card.change_status(card.STATUS_INVALID)
# Send message to user
cls.send_email_auto_payment_processed(customer, amount, currency, card.card_type, card.last_four,
aux_info['CardHolderMessage'], accepted=False)
return
# Cloudpayment should call back us for 'pay' method
logbook.info('[withdraw] Customer:{} auto payment for {} {} successful.', customer, amount, currency)
@classmethod
def auto_withdraw(cls, customer, card):
amount = Decimal(customer.auto_withdraw_amount)
currency = customer.tariff.currency
logbook.info('[auto_withdraw] Customer: {} amount: {}, currency: {}, card: {}', customer, amount, currency, card)
request_description = _("Automated balance recharge via CloudPayments. Customer email: {}")
PaymentService.withdraw(card, amount, currency, customer, request_description.format(customer.email))
@classmethod
def manual_withdraw(cls, customer, card, amount):
assert isinstance(amount, Decimal)
currency = customer.tariff.currency
logbook.info('[manual_withdraw] Customer: {} amount: {}, currency: {}, card: {}', customer, amount, currency, card)
request_description = _("Manual balance recharge via CloudPayments. Customer email: {}")
PaymentService.withdraw(card, amount, currency, customer, request_description.format(customer.email))
@staticmethod
def send_email_about_balance_modifying(customer, delta, currency, balance, comment):
assert isinstance(delta, Decimal)
subscription_info = customer.subscription_info()['billing']
if subscription_info['enable']:
modifying_date = utcnow().datetime
if delta > 0:
template_id = MessageTemplate.CUSTOMER_RECHARGE
else:
template_id = MessageTemplate.CUSTOMER_WITHDRAW
base_url = request_base_url()
from api.cabinet.customer import CustomerApi
url = urljoin(base_url, posixpath.join(CustomerApi.CABINET_FRONTEND_PATH, "transactions"))
subject, body = MessageTemplate.get_rendered_message(template_id, language=customer.locale_language(),
money={'money': abs(delta), 'currency': currency},
balance={'money': balance, 'currency': currency},
comment=comment,
withdraw_date=modifying_date,
transactions_url=url)
send_email.delay(subscription_info['email'], subject, body)
@classmethod
def send_email_auto_payment_processed(cls, customer, delta, currency,
card_type, card_last_four, comment, accepted=True):
assert isinstance(delta, Decimal)
subscription_info = customer.subscription_info()['billing']
if subscription_info['enable']:
modifying_date = utcnow().datetime
if accepted:
template_id = MessageTemplate.CUSTOMER_RECHARGE_AUTO
else:
template_id = MessageTemplate.CUSTOMER_RECHARGE_AUTO_REJECT
base_url = request_base_url()
from api.cabinet.customer import CustomerApi
url = urljoin(base_url, posixpath.join(CustomerApi.CABINET_FRONTEND_PATH, "transactions"))
subject, body = MessageTemplate.get_rendered_message(template_id,
language=customer.locale_language(),
money={'money': abs(delta), 'currency': currency},
withdraw_date=modifying_date,
card_type=card_type,
card_last_four=card_last_four,
transactions_url=url,
comment=comment)
send_email.delay(subscription_info['email'], subject, body)
| 48.722581 | 123 | 0.568591 |
771656148a034efe4e7ee82f3eba07ebce235c9a | 393 | py | Python | techtest/techtest/wsgi.py | rahulkannojia07/gaming-store-adyen | 4c81bd78dce88f244a4e56784914c3cc4359b669 | [
"MIT"
] | null | null | null | techtest/techtest/wsgi.py | rahulkannojia07/gaming-store-adyen | 4c81bd78dce88f244a4e56784914c3cc4359b669 | [
"MIT"
] | null | null | null | techtest/techtest/wsgi.py | rahulkannojia07/gaming-store-adyen | 4c81bd78dce88f244a4e56784914c3cc4359b669 | [
"MIT"
] | null | null | null | """
WSGI config for techtest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'techtest.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
8373e6817b5c343661b1d5bb04e5352363060394 | 1,090 | py | Python | editor/attributes/player/player_attribute_dk_style.py | PeterC10/COFPES-OF-Editor-6 | 0a9c9b75fada8264634bdc968c9da209c44b29e2 | [
"MIT"
] | 1 | 2022-03-11T12:25:57.000Z | 2022-03-11T12:25:57.000Z | editor/attributes/player/player_attribute_dk_style.py | PeterC10/COFPES-OF-Editor-6 | 0a9c9b75fada8264634bdc968c9da209c44b29e2 | [
"MIT"
] | null | null | null | editor/attributes/player/player_attribute_dk_style.py | PeterC10/COFPES-OF-Editor-6 | 0a9c9b75fada8264634bdc968c9da209c44b29e2 | [
"MIT"
] | null | null | null | from editor.attributes.player.player_attribute import (
PlayerAttribute,
PlayerAttributeTypes,
)
class PlayerAttributeDkStyle(PlayerAttribute):
@classmethod
def att_class_name(cls):
return "Dropkick Style"
@classmethod
def att_class_type(cls):
return PlayerAttributeTypes.BasicSettings
def get_raw_value(self):
return self.parent.get_value()
def get_value(self):
return self.parent.get_value()
def get_label(self):
"""
Get full label from parent
and return second value (Dropkick Style is set second)
"""
full_label = self.parent.get_label()
return full_label[1]
def set_value(self, value):
return self.parent.set_value(value)
def set_value_from_label(self, label):
registered_postion_label = self.parent.registered_position.get_label()
dribble_style_label = self.parent.dribble_style.get_label()
full_label = (registered_postion_label, label, dribble_style_label)
return self.parent.set_value_from_label(full_label)
| 27.948718 | 78 | 0.69633 |
f77cf15dfa876f2760c7b8bebd06ea0e09b92918 | 2,354 | py | Python | docs/conf.py | jojacobsen/semsquare | a0f0e9c35e853ec77ce0b7fce8d3ccf4091cc94d | [
"MIT"
] | null | null | null | docs/conf.py | jojacobsen/semsquare | a0f0e9c35e853ec77ce0b7fce8d3ccf4091cc94d | [
"MIT"
] | 6 | 2022-03-18T17:31:44.000Z | 2022-03-31T17:36:14.000Z | docs/conf.py | jojacobsen/semsquare | a0f0e9c35e853ec77ce0b7fce8d3ccf4091cc94d | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
if os.getenv("READTHEDOCS", default=False) == "True":
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_READ_DOT_ENV_FILE"] = "True"
os.environ["USE_DOCKER"] = "no"
else:
sys.path.insert(0, os.path.abspath("/app"))
os.environ["DATABASE_URL"] = "sqlite:///readthedocs.db"
os.environ["CELERY_BROKER_URL"] = os.getenv("REDIS_URL", "redis://redis:6379")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "SEMsquare"
copyright = """2022, Johannes Jacob"""
author = "Johannes Jacob"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| 36.78125 | 79 | 0.669074 |
d3db43a397c0ee4c8025738ce8ea6d0a86e3f27b | 11,567 | py | Python | llgeo/quad4m/db_utils.py | lauralunacabrera/LLGEO | 166daf7417071ec9a8ffb90824f5687c5d69d096 | [
"MIT"
] | 2 | 2021-01-06T16:38:52.000Z | 2021-07-06T02:16:14.000Z | llgeo/quad4m/db_utils.py | lauralunacabrera/LLGEO | 166daf7417071ec9a8ffb90824f5687c5d69d096 | [
"MIT"
] | null | null | null | llgeo/quad4m/db_utils.py | lauralunacabrera/LLGEO | 166daf7417071ec9a8ffb90824f5687c5d69d096 | [
"MIT"
] | null | null | null | ''' Utilities for handling "databases" for QUAD4M analyses.
DESCRIPTION:
This module helps create and manage "databases" of:
* Geometries (db_geoms)
* Earhtquakes (db_accs)
* Non-linear properties (db_nonlins)
* Random fields (db_rfs)
MAIN FUNCTIONS:
This module contains the following functions:
* uptdate_db_geoms
*
'''
# ------------------------------------------------------------------------------
# Import Modules
# ------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import os
import warnings
# LLGEO
import llgeo.quad4m.geometry as q4m_geom
import llgeo.utilities.files as llgeo_fls
# ------------------------------------------------------------------------------
# Main Functions
# ------------------------------------------------------------------------------
def update_db_accs(path_db, file_db, acc, tstep):
'''
This will be completed at another time.
I initially processed the ground motions before doing things this way, and I
don't want to waste time re-doing work. So, problem for another time.
For now, all the motions have been processed already and are saved.
Either way, not sure that this function can even be written, since the nature
of the text files would be different for each type of project. So maybe it's
just better to do individually each time.
'''
return False
def update_db_geoms(path_db, file_db, path_DXF, new_DXF_files, path_check):
''' Adds new entries to database of geometries
Purpose
-------
Given a list of dxf files, this:
* Processes new entries by generating elems and nodes dataframes and
getting sizes of mesh.
* Saves pkl for each new geometry with all info
* Updates the summary file "file_db" with new entries and returns it.
* Returns list of dict with geometry info that was saved.
Each processed geometry dictionary contains the following keys:
*id | entry id
*name | entry name
*fname | name of file where dfs are saved (includes extension .pkl)
*W | maximum width of the overall mesh
*H | maximum height of the overall meesh
*nelm | number of elements in the mesh
*welm | average width of all elements in mesh
*helm | average height of all elements in mesh
nodes | dataframe with node info (see llgeo/quad4m/geometry.py)
elems | dataframe with element info (see llgeo/quad4m/geometry.py)
readme | short description of file
(Items marked with * are included in summary file)
Parameters
----------
path_db : str
directory containing geometry "database".
file_db : str
name of "database" summary file (usually ending in .pkl).
path_DXF : str
directory contianing new DXF files to be processed
new_DXF_files : list of str
list of dxf file names (usually ending in .dxf)
path_check : str
directory where "check" DXFs will be printed out
If doesn't exist, will exit eith error.
if set to False, then no check DXFs will be printed
Returns
-------
db_geoms : dataframe
"database" summary file, which now includes information on new_DXF_files
geom_dicts : list of dictionaries
Each element corresponds to a the DXF files provided in "new_DXF_files".
Each element is a dict containing geometry info as described above.
'''
# Get the current database
db_geoms = get_db(path_db, file_db, db_type = 'geoms' )
# Determine current id based on database
if len(db_geoms) > 0:
i = np.max(db_geoms['id'])
else:
i = 0
# Readme to be included in new entries
readme = ''' This geometry was processed using llgeo/quad4m/db_utils.
It contains dataframes of elems and nodes, and some summary
info. Will be used to probabilistically run ground response
analyses using QUAD4MU.'''
# Loop through new files and process them
geom_dicts = []
for new_DXF_file in new_DXF_files:
# Name of entry to be processed
name = new_DXF_file.replace('.dxf', '')
# If name already exists, read data continue to next entry
if name in db_geoms['name'].tolist():
# Warn user that no new data is being processed
mssg = 'Entry alread exists: {:10s}'.format(name)
mssg += '\n Reading (not creating) data'
warnings.showwarning(mssg , UserWarning, 'db_utils.py', '')
# Determine name of entry
f_exist = db_geoms.loc[db_geoms['name'] == name, 'fname'].item()
# Read existing file and add to output dictionary
geom_dicts += [llgeo_fls.read_pkl(path_db, f_exist)]
continue
# Otherwise, process new entry
i += 1 # Update entry ID
nodes, elems = q4m_geom.dxf_to_dfs(path_DXF, new_DXF_file)
W, H, N, w, h = q4m_geom.get_mesh_sizes(nodes, elems)
# Save new entry to pickle in database directory
fname = '{i:03d}_{name}.pkl'.format(i = i, name = name)
out_data = {'id': i, 'name': name, 'fname': fname, 'W': W, 'H': H,
'nelm': N, 'welm': w, 'helm':h, 'nodes':nodes,
'elems':elems, 'readme': readme}
llgeo_fls.save_pkl(path_db, fname, out_data, True)
# Make sure check directory exists (if needed)
if path_check and not os.path.exists(path_check):
err = 'DXF check directory does not exists\n'
err += 'Create it, or set path_check = False'
raise Exception(err)
# Output DXFs as a check (if path_check is not False)
elif path_check:
file_check = fname.replace('.pkl', '.dxf')
q4m_geom.dfs_to_dxf(path_check, file_check, nodes, elems)
# Add summary info to db_geoms
cols = list(db_geoms)
new_row = pd.DataFrame([[i, name, fname, W, H, N, w, h]], columns= cols)
db_geoms = db_geoms.append(new_row, ignore_index = True)
# Add new data for list export
geom_dicts += [out_data]
# Save db_geoms summary file
db_geoms.to_pickle(path_db + file_db)
return db_geoms, geom_dicts
def get_unique_accs(db_accs, cols = ['T', 'type', 'name']):
''' Sometimes, acceleration database contains duplicate earthquakes
(same earhquake and return period, but different orientation).
This function returns unique earthquakes (as defined by "cols").
Just returns the first entry it finds, so it's pretty arbitary.
'''
# Remove duplicates looking only at "cols"
opts = {'keep':'first', 'inplace':True, 'ignore_index':True}
db_accs.drop_duplicates(subset = cols, **opts)
return db_accs
# ------------------------------------------------------------------------------
# Helper Functions
# ------------------------------------------------------------------------------
def search(db, conditions, return_col = 'all'):
''' Returns entries from db that meet desired condition
Purpose
-------
Given a "database" summary file (db), this returns the entries that match
the conditions specified in the dictionary "conditions".
Parameters
----------
db : dataframe
Database summary file
conditions : dict
Conditions to be met. Ex: {'T': 2475} will return db entries in which
the column T has a value of 2475. So far, only equality is checked
(no > or <)
return_col : list of str (or str) (optional)
list of column names to return, or a single string for one coloumn
if a single column is given, then the return will be a numpy array (not
dataframe series). Otherwise, the return will be a DataFrame.
Defaults to returning all columns.
Returns
-------
result : numpy array or dataframe
db entries that match condition, with output columns dictated by
return_col. If there is only one return_col, then result is np array,
otherwise it is a dataframe.
Notes
-----
* TODO-wishlist: could this include > and < at some point?
'''
# Find which db entries meet ALL conditions
masks = [ db[col] == val for col, val in conditions.items()]
all_mask = np.all(masks, axis = 0)
# If return_col is 'all', then return all columns.
if return_col == 'all':
return_col = list(db)
# Extract desied columns
result = db.loc[all_mask, return_col]
# If only one column was requested, change to numpy array
if not isinstance(return_col, list):
result = result.values
return result
def get_db(path_db, file_db, db_type = False, reset = False):
''' Gets the summary dataframe of available geometries.
Purpose
-------
This function gets the dataframe that contains summary information of the
available geometries in the "database" stored in "path_db".
If path_db + file_db does not exist:
An empty DF will be created, saved as pkl, and returned.
If path_db + file_db already exists and reset = False:
Nothing will be created/saved. Existing pkl will be read and returned.
(BE CAREFUL WITH THIS USE)
If path_db + file_db already exists and reset = True:
An empty DF will be created, saved as pkl, and returned.
CAREFUL: this will override existing file.
(Not generally used directly)
Parameters
----------
path_db : str
path to the geometry "database".
file_db : str
name of "database" summary file (usually ending in .pkl).
db_type : str
type of dataframe to get. One of: geoms | accs | nonlins | rfs |
only needed if database is being created for the first time.
reset : bool (optional)
set TRUE to replace summary file with an empty one (BE CAREFUL!).
Returns
-------
db : DataFrame
Returns dataframe with summary info of available geometries. It is
either an empty DF, or a read of a file_db (depends on inputs)
'''
# Check whether file exists
exists = os.path.isfile(path_db + file_db)
# Print warning if reset = True
if reset:
mssg = 'db_' + db_type + ' summary file was deleted!!!'
mssg += ' Make sure to remove pickle files as well.'
warnings.showwarning(mssg, UserWarning, 'db_utils.py', '')
# Create new file, if needed
if not exists or reset:
# Columns to add in summary file
if db_type == 'geoms':
cols = ['id', 'name', 'fname', 'W', 'H', 'nelm', 'welm', 'helm']
elif db_type == 'accs':
raise Exception('heh, you didnt code this in silly')
# cols = ['id', 'name', 'fname', 'T', 'kind', 'dir', 'n', 'step', 'max']
elif db_type == 'nonlins':
pass
elif db_type == 'rfs':
pass
else:
raise Exception('type of db not recognized.')
db = pd.DataFrame([], columns = cols)
db.to_pickle(path_db + file_db)
mssg = 'New database summary file created! :)'
warnings.showwarning(mssg, UserWarning, 'db_utils.py', '')
# If no new file is needed, read existing one
else:
db = llgeo_fls.read_pkl(path_db, file_db)
return db | 34.528358 | 84 | 0.600156 |
c20ba76dda60cb5f729e5367bb8fbeed1a0f2404 | 162 | py | Python | src/Cylinder/Calculations/CylinderCalcs.py | thepvguy/calctoys | f7ef4e422d8a27cc387c1a24b5fb6e318d774f57 | [
"Unlicense"
] | 7 | 2018-07-17T08:01:34.000Z | 2021-06-14T03:33:58.000Z | src/Cylinder/Calculations/CylinderCalcs.py | thepvguy/calctoys | f7ef4e422d8a27cc387c1a24b5fb6e318d774f57 | [
"Unlicense"
] | null | null | null | src/Cylinder/Calculations/CylinderCalcs.py | thepvguy/calctoys | f7ef4e422d8a27cc387c1a24b5fb6e318d774f57 | [
"Unlicense"
] | 6 | 2018-10-01T10:29:58.000Z | 2022-01-24T22:34:16.000Z | class CylinderCalcParams:
def __init__(self):
pass
class CylinderCalcs:
def __init__(self, params: CylinderCalcParams):
self.p = params
| 18 | 51 | 0.679012 |
0eaaebfbde8985d855c37583f2085b259f266e7c | 2,416 | py | Python | pyexfil/network/POP3/pop_exfil_client.py | goffinet/PyExfil | 8729442b6822f7587475dae7e8bd0f927c7df45c | [
"MIT"
] | 1 | 2020-03-06T09:13:59.000Z | 2020-03-06T09:13:59.000Z | pyexfil/network/POP3/pop_exfil_client.py | goffinet/PyExfil | 8729442b6822f7587475dae7e8bd0f927c7df45c | [
"MIT"
] | null | null | null | pyexfil/network/POP3/pop_exfil_client.py | goffinet/PyExfil | 8729442b6822f7587475dae7e8bd0f927c7df45c | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
import time
import zlib
import socket
import base64
import progressbar
# Configurations
host = '127.0.0.1'
port = 1100
conns = 5
# Globals
MAX_SIZE = 4000
CHUNK = 256
ERR = 1
OKAY = 0
FLOC = "/etc/passwd"
def get_file(file_name):
try:
f = open(file_name, "rb")
f_content = f.read()
f.close()
except IOError, e:
sys.stderr.write("[-] Error reading file %s.\n" % e)
sys.exit(ERR)
sys.stdout.write("[+] File is ready and is in memory.\n")
return base64.b64encode(f_content), zlib.crc32(f_content)
def connect_to_server():
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
sys.stdout.write("[+] Connected to the exfiltration server.\n")
return sock
except socket.error, e:
sys.stderr.write("[-] Could not connect to server.\n%s\n" % str(e))
sys.exit(ERR)
if __name__ == "__main__":
b64_file, file_crc = get_file(FLOC)
sock = connect_to_server()
data = sock.recv(MAX_SIZE)
if data.find("+OK POP3 service") == -1:
sys.stderr.write("[-] Server header did not match.\nHalting exfiltration.\n")
sys.exit(ERR)
sock.send("USER exfil\n")
data = sock.recv(MAX_SIZE)
if data.find("+OK password required for user exfil") == -1:
sys.stderr.write("[-] Server did not accept the user. Something is wrong.\n")
sys.exit(ERR)
all_data_packets = [b64_file[i:i+CHUNK] for i in range(0, len(b64_file), CHUNK)]
sock.send(base64.b64encode("%s;%s;%s;0" % (FLOC, file_crc, len(all_data_packets)))) # filename, crc32, packets_count, this_packet_count
sys.stdout.write("[+] Server passed auth and has received the header.\n")
data = sock.recv(MAX_SIZE)
if data.find("-ERR [AUTH] Authentication failed") == -1:
sys.stderr.write("[-] Did not get confirmations for file content.\n")
sys.exit(ERR)
progress = progressbar.ProgressBar()
for i in progress(range(len(all_data_packets))):
sock.send("%s;%s" % (i, all_data_packets[i]))
time.sleep(0.1)
data = sock.recv(MAX_SIZE)
if data.find("-ERR [AUTH] Authentication failed") == -1:
sys.stderr.write("[!] Error seding packet %s.\n" % i)
break
sock.send("0000")
sock.close()
sys.stdout.write("[+] Finished sending file. Closing socket.\n")
| 30.582278 | 139 | 0.628311 |
a0e9b3857df9d9cc0c22b766e3d16121eea7a27e | 1,140 | bzl | Python | go/static.bzl | the-superpirate/rules_docker | 46d29e34399a992087c857b13d8dcb8ec80dfd85 | [
"Apache-2.0"
] | null | null | null | go/static.bzl | the-superpirate/rules_docker | 46d29e34399a992087c857b13d8dcb8ec80dfd85 | [
"Apache-2.0"
] | null | null | null | go/static.bzl | the-superpirate/rules_docker | 46d29e34399a992087c857b13d8dcb8ec80dfd85 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Generated file with dependencies for language rule."""
# !!!! THIS IS A GENERATED FILE TO NOT EDIT IT BY HAND !!!!
#
# To regenerate this file, run ./update_deps.sh from the root of the
# git repository.
DIGESTS = {
# "gcr.io/distroless/static:debug" circa 2021-09-25 14:45 -0400
"debug": "sha256:a8fc00a6b3fe7b536b0731df01574c2113cad63c6196246126224c698265a885",
# "gcr.io/distroless/static:latest" circa 2021-09-25 14:45 -0400
"latest": "sha256:aadea1b1f16af043a34491eec481d0132479382096ea34f608087b4bef3634be",
}
| 42.222222 | 88 | 0.754386 |
b2f2e3c9062de980d5dcbe0547634cdd3f82261f | 320 | py | Python | casearch/log.py | openpyer/casearch | e9b7dd85275a49ba4018621b73081d8677f917df | [
"MIT"
] | 1 | 2020-08-27T09:26:07.000Z | 2020-08-27T09:26:07.000Z | casearch/log.py | openpyer/casearch | e9b7dd85275a49ba4018621b73081d8677f917df | [
"MIT"
] | 1 | 2020-05-27T12:48:03.000Z | 2020-05-27T12:52:33.000Z | casearch/log.py | EathonTnT/casearch | e9b7dd85275a49ba4018621b73081d8677f917df | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: ZhangXiaocheng
# @File: log.py
# @Time: 2019/8/27 10:13
import logging
logging.basicConfig(format='[%(asctime)s] %(levelname)s %(message)s',
datefmt="%y-%m-%d %H:%M:%S",
level=logging.INFO)
logger = logging.getLogger()
| 21.333333 | 69 | 0.571875 |
c3d419ee047550f261d26c3946541ba1b4cb36e0 | 3,127 | py | Python | kha/scraper.py | claui/kommtheuteaktenzeichen | 2afbdfd1731a8dd6e222d094b0ee26c1a1945e61 | [
"Apache-2.0"
] | 2 | 2021-06-06T15:29:08.000Z | 2021-06-07T20:37:38.000Z | kha/scraper.py | claui/kommtheuteaktenzeichen | 2afbdfd1731a8dd6e222d094b0ee26c1a1945e61 | [
"Apache-2.0"
] | null | null | null | kha/scraper.py | claui/kommtheuteaktenzeichen | 2afbdfd1731a8dd6e222d094b0ee26c1a1945e61 | [
"Apache-2.0"
] | 1 | 2021-05-31T16:48:08.000Z | 2021-05-31T16:48:08.000Z | """Scrape episodes from online sources."""
from datetime import datetime
import re
from typing import Dict, Iterable, Match, Optional, Tuple
import requests
from .episode import Episode
from .settings \
import WUNSCHLISTE_IMPLIED_TIMEZONE, \
WUNSCHLISTE_QUERY_PARAMETERS, WUNSCHLISTE_URL
WUNSCHLISTE_SELECT_EPISODE_PATTERN = r'(?ms)<li.*?</li>'
WUNSCHLISTE_PARSE_EPISODE_PATTERN = r"""(?msx)
(?:
heute|
morgen|
[A-Z][a-z],[^<]+ # Weekday
(?P<day>\d{2})\.
(?P<month>\d{2})\.
<.*?> # Multiple text nodes or tags
(?P<year>\d{4})
)
<.*?> # Multiple text nodes or tags
(?P<hour>\d{1,2}):
(?P<minute>\d{2})[^<]+h
<.*?"Episode"> # Multiple text nodes or tags
(?P<episode_number>[^<]+)
(?:<[^>]+>)+ # Multiple tags
(?P<name>[^<]+)
(?:<[^>]+>)+ # Multiple tags
(?P<rerun>(?:\s+\(Wdh.\))?)
"""
def scrape_wunschliste(html: Optional[str] = None) \
-> Iterable[Episode]:
"""Scrape episodes from wunschliste.de"""
def get_html() -> str:
response = requests.get(WUNSCHLISTE_URL,
params=WUNSCHLISTE_QUERY_PARAMETERS)
response.raise_for_status()
return response.text
def parse_episodes(html_source: str) \
-> Iterable[Tuple[str, Optional[Match[str]]]]:
return (
(
episode_html,
re.search(WUNSCHLISTE_PARSE_EPISODE_PATTERN,
episode_html)
)
for episode_html
in re.findall(WUNSCHLISTE_SELECT_EPISODE_PATTERN,
html_source)
)
def cleanup_html(html_dict: Dict[str, str]) -> Dict[str, str]:
return {
key: re.sub(r'(?m)(?:\s|\\n)+(?=\s|\\n)', '', value)
for key, value in html_dict.items()
}
def to_episode(raw_episode_dict: Dict[str, str]) -> Episode:
return Episode(
int(raw_episode_dict['episode_number']),
name=raw_episode_dict['name'],
date_published=datetime(
int(raw_episode_dict['year']),
int(raw_episode_dict['month']),
int(raw_episode_dict['day']),
hour=int(raw_episode_dict['hour']),
minute=int(raw_episode_dict['minute']),
tzinfo=WUNSCHLISTE_IMPLIED_TIMEZONE,
),
sd_date_published=datetime.now(),
is_rerun=bool(raw_episode_dict['rerun']),
is_spinoff=not raw_episode_dict['name'].startswith('Folge'),
tz=WUNSCHLISTE_IMPLIED_TIMEZONE,
)
for episode_html, episode_match \
in parse_episodes(html or get_html()):
if not episode_match:
raise RuntimeError(
f'Unable to parse episode from {repr(episode_html)}')
if episode_match.groupdict()['day']:
yield to_episode(
cleanup_html(episode_match.groupdict())
)
| 33.265957 | 72 | 0.532779 |
799dc322981d41c18d8aa0faee557414ac426ad2 | 207 | py | Python | lib/algorithms/base_algorithm.py | farfanoide/libhdd-sched | cb81d68aeb42d77357006e369caa71ee17620092 | [
"MIT"
] | null | null | null | lib/algorithms/base_algorithm.py | farfanoide/libhdd-sched | cb81d68aeb42d77357006e369caa71ee17620092 | [
"MIT"
] | null | null | null | lib/algorithms/base_algorithm.py | farfanoide/libhdd-sched | cb81d68aeb42d77357006e369caa71ee17620092 | [
"MIT"
] | null | null | null | class BaseAlgorithm(object):
""" An abstract class wich defines
functionality shared among algorithms
"""
pfs = []
reqs = []
@staticmethod
def get_pfs(a_lot):
pass
| 15.923077 | 41 | 0.589372 |
27a7e6f4037e5f19e6043c9533e9f55dee90d7c1 | 3,331 | py | Python | cscs-checks/tools/profiling_and_debugging/gdb4hpc.py | jfavre/reframe | ddcdb2e3503f28168d04867e68db5c2266619d02 | [
"BSD-3-Clause"
] | null | null | null | cscs-checks/tools/profiling_and_debugging/gdb4hpc.py | jfavre/reframe | ddcdb2e3503f28168d04867e68db5c2266619d02 | [
"BSD-3-Clause"
] | 1 | 2019-07-03T19:41:50.000Z | 2019-07-03T19:41:50.000Z | cscs-checks/tools/profiling_and_debugging/gdb4hpc.py | GiuseppeLoRe/reframe | a1e5aec54dd29925af96e4bb7095f47ea9547c5a | [
"BSD-3-Clause"
] | null | null | null | import os
import reframe as rfm
import reframe.utility.sanity as sn
class Gdb4hpcCheck(rfm.RegressionTest):
def __init__(self, lang, extension):
super().__init__()
self.name = type(self).__name__ + '_' + lang.replace('+', 'p')
self.descr = 'Cray gdb4hpc check for %s' % lang
self.lang = lang
self.extension = extension
self.build_system = 'Make'
# NOTE: Restrict concurrency to allow creation of Fortran modules
if lang == 'F90':
self.build_system.max_concurrency = 1
self.executable = 'gdb4hpc'
self.executable_opts = ['-v']
self.target_executable = './jacobi'
self.gdbcmds = './%s.in' % self.executable
self.gdbslm = '%s.slm' % self.executable
self.gdbrpt = '%s.rpt' % self.executable
self.sourcesdir = os.path.join('src', lang)
self.valid_prog_environs = ['PrgEnv-gnu']
self.modules = ['gdb4hpc']
self.prgenv_flags = ['-g', '-O2', '-fopenmp']
self.build_system.cflags = self.prgenv_flags
self.build_system.cxxflags = self.prgenv_flags
self.build_system.fflags = self.prgenv_flags
self.num_tasks = 1
self.num_tasks_per_node = 1
self.num_cpus_per_task = 4
self.num_tasks_per_core = 1
self.num_iterations = 5
self.variables = {
'CRAYPE_LINK_TYPE': 'dynamic',
'OMP_NUM_THREADS': str(self.num_cpus_per_task),
'ITERATIONS': str(self.num_iterations),
'OMP_PROC_BIND': 'true',
}
self.maintainers = ['JG']
self.tags = {'production'}
# gdb4hpc has its own way to launch a debugging job and needs an
# additional jobscript. The reframe jobscript can be copied for that
# purpose, by adding the cray_debug_ comments around the job launch
# command to be debugged, gdb4hpc is then activated by removing the
# #GDB4HPC comments in the next (post_run) step.
self.pre_run = [
'#GDB4HPC #cray_debug_start',
'#GDB4HPC srun %s' % self.target_executable,
'#GDB4HPC #cray_debug_end'
]
def setup(self, partition, environ, **job_opts):
super().setup(partition, environ, **job_opts)
# create extra jobscript for gdb4hpc:
self.post_run = [
'sed "s-#GDB4HPC --" %s | '
'egrep -v "output=|error=|^gdb4hpc" &> %s' %
(self.job.script_filename, self.gdbslm),
'gdb4hpc -b %s &> %s' % (self.gdbcmds, self.gdbrpt)
]
@rfm.required_version('>=2.14')
@rfm.parameterized_test(['F90', 'F90'], ['C++', 'cc'])
class Gdb4hpcCpuCheck(Gdb4hpcCheck):
def __init__(self, lang, extension):
super().__init__(lang, extension)
self.valid_systems = ['dom:gpu', 'dom:mc']
self.sanity_patterns = sn.all([
sn.assert_reference(sn.extractsingle(
r'^tst\{0\}:\s+(?P<result>\d+.\d+[eE]-\d+)',
'gdb4hpc.rpt', 'result', float),
2.572e-6, -1e-1, 1.0e-1),
sn.assert_found(r'gdb4hpc \d\.\d - Cray Line Mode Parallel Debug',
'gdb4hpc.rpt'),
sn.assert_found(r'Shutting down debugger and killing application',
'gdb4hpc.rpt')
])
| 39.188235 | 78 | 0.580907 |
66c02f4da73f50199911b55b42157976f30761bf | 4,361 | py | Python | parse_daily_report.py | guardiantest/dailyreport | 60a075432f3356d4a4a75568205e683fee58466d | [
"Unlicense"
] | null | null | null | parse_daily_report.py | guardiantest/dailyreport | 60a075432f3356d4a4a75568205e683fee58466d | [
"Unlicense"
] | 6 | 2020-03-24T16:08:06.000Z | 2021-06-10T20:53:08.000Z | parse_daily_report.py | guardiantest/dailyreport | 60a075432f3356d4a4a75568205e683fee58466d | [
"Unlicense"
] | null | null | null | import csv
import os
from pymongo import MongoClient
import operator
from datetime import datetime
client = MongoClient('localhost', 27017)
db = client['stock']
collectReport = db['daily_report']
collectStock = db['stock']
folder = 'daily/TWSE'
for f in os.listdir(folder):
stock_no = f.split('.')[0]
result = {}
if '.DS_Store' in f:
continue
dateTime = f
folder_path = 'daily/TWSE/{0}'.format(f)
for file in os.listdir(folder_path):
stock_no = file.split('.')[0]
if '.DS_Store' in file:
continue
with open(folder_path + '/{0}'.format(file)) as csv_file:
csv_data = csv.reader(csv_file)
data_list = []
columnList = []
for index, item in enumerate(csv_data):
try:
if index < 3:
item = [value.strip().replace(',', '') for value in item]
else:
item = [value.strip().replace('\u3000', '') for value in item]
column1 = item[:5]
column2 = item[6:]
columnList.append(column1)
columnList.append(column2)
except Exception as e:
print("except on {0}".format(e))
continue
sellList = dict()
buyList = dict()
totalBuy = 0
totalSell = 0
for item in columnList:
try:
if len(item) < 0:
continue
if item[0] == '':
continue
item = [value.strip().replace('\u3000', '') for value in item]
brokerageId = str(item[1])[:4]
brokerageName = str(item[1])[4:]
price = float(item[2])
buy = float(item[3])
sell = float(item[4])
totalBuy += buy
totalSell += sell
if buy > 0:
if brokerageId not in buyList:
buyList[brokerageId] = buy
else:
newBuy = buyList[brokerageId] + buy
buyList.update({brokerageId: newBuy})
if sell > 0:
if brokerageId not in sellList:
sellList[brokerageId] = sell
else:
newSell = sellList[brokerageId] + sell
sellList.update({brokerageId: newSell})
data = {
'brokerage': brokerageId + brokerageName,
'price': float(price),
'buy': float(buy),
'sell': float(sell)
}
data_list.append(data)
except Exception as e:
print("stock_no : {0} except {1}".format(stock_no, e))
continue
buyBrokerageCount = len(buyList) - len(sellList)
sortedBuyList = sorted(buyList.items(), key=operator.itemgetter(1), reverse=True)[:15]
sortedSellList = sorted(sellList.items(), key=operator.itemgetter(1), reverse=True)[:15]
totalBuy = 0
for buy in sortedBuyList:
totalBuy += buy[1]
totalSell = 0
for sell in sortedSellList:
totalSell += sell[1]
buySell = (totalBuy - totalSell)
new_date = datetime.strptime(dateTime + '00:00:00', '%Y%m%d%H:%M:%S')
jsonData = {'date': new_date, 'list': data_list}
collectStock.update({"stockNo": stock_no, "details.date": new_date},
{'$set': {"details.$.buySell": buySell,
"details.$.buyBrokerageCount": buyBrokerageCount}})
print('insert : {0} {1}'.format(stock_no, new_date))
columnList.clear()
# stock_collect_date = collectReport.find({'stockNo': stock_no, 'details.date': new_date})
# if stock_collect_date.count() == 0:
# post_id = collectReport.update_one({"stockNo": stock_no}, {"$push": {"details": jsonData}}).upserted_id
| 37.594828 | 117 | 0.462967 |
0b603ade7b0cf0de84f09fdbc7797f3a34d87c30 | 88 | py | Python | output/models/ms_data/regex/re_i43_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/regex/re_i43_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/regex/re_i43_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.ms_data.regex.re_i43_xsd.re_i43 import Doc
__all__ = [
"Doc",
]
| 14.666667 | 61 | 0.715909 |
965078a54541b6d83180a53fffc790557bdf1be9 | 3,699 | py | Python | envs/mujoco/swimmer.py | hyyh28/SAIL | 125ad3e64eefcf532931f567b95a5320737851e9 | [
"MIT"
] | 16 | 2020-04-29T03:25:41.000Z | 2022-03-22T02:19:38.000Z | envs/mujoco/swimmer.py | hyyh28/SAIL | 125ad3e64eefcf532931f567b95a5320737851e9 | [
"MIT"
] | null | null | null | envs/mujoco/swimmer.py | hyyh28/SAIL | 125ad3e64eefcf532931f567b95a5320737851e9 | [
"MIT"
] | 4 | 2020-04-29T03:22:53.000Z | 2021-12-01T02:40:16.000Z | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class DisableSwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
mujoco_env.MujocoEnv.__init__(self, '%s/assets/disabled_swimmer.xml' % dir_path, 4)
utils.EzPickle.__init__(self)
def step(self, a):
ctrl_cost_coeff = 0.0001
xposbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.sim.data.qpos[0]
reward_fwd = (xposafter - xposbefore) / self.dt
reward_ctrl = - ctrl_cost_coeff * np.square(a).sum()
reward = reward_fwd + reward_ctrl
ob = self._get_obs()
return ob, reward, False, dict(reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos.flat[2:], qvel.flat])
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv)
)
return self._get_obs()
class HeavySwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
mujoco_env.MujocoEnv.__init__(self, '%s/assets/heavy_swimmer.xml' % dir_path, 4)
utils.EzPickle.__init__(self)
def step(self, a):
ctrl_cost_coeff = 0.0001
xposbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.sim.data.qpos[0]
reward_fwd = (xposafter - xposbefore) / self.dt
reward_ctrl = - ctrl_cost_coeff * np.square(a).sum()
reward = reward_fwd + reward_ctrl
ob = self._get_obs()
return ob, reward, False, dict(reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos.flat[2:], qvel.flat])
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv)
)
return self._get_obs()
class LightSwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
mujoco_env.MujocoEnv.__init__(self, '%s/assets/light_swimmer.xml' % dir_path, 4)
utils.EzPickle.__init__(self)
def step(self, a):
ctrl_cost_coeff = 0.0001
xposbefore = self.sim.data.qpos[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.sim.data.qpos[0]
reward_fwd = (xposafter - xposbefore) / self.dt
reward_ctrl = - ctrl_cost_coeff * np.square(a).sum()
reward = reward_fwd + reward_ctrl
ob = self._get_obs()
return ob, reward, False, dict(reward_fwd=reward_fwd, reward_ctrl=reward_ctrl)
def _get_obs(self):
qpos = self.sim.data.qpos
qvel = self.sim.data.qvel
return np.concatenate([qpos.flat[2:], qvel.flat])
def reset_model(self):
self.set_state(
self.init_qpos + self.np_random.uniform(low=-.1, high=.1, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-.1, high=.1, size=self.model.nv)
)
return self._get_obs() | 38.936842 | 91 | 0.650987 |
73d14292aa1bb06f98ab823c62a9fe35fe0c40f4 | 453 | py | Python | Algorithms/TwoPointers/lcci-17-21-volume-of-histogram-lcci.py | sandychn/LeetCode-Solutions | d0dd55d62b099c5b7db947822ab2111a4ecdc941 | [
"MIT"
] | null | null | null | Algorithms/TwoPointers/lcci-17-21-volume-of-histogram-lcci.py | sandychn/LeetCode-Solutions | d0dd55d62b099c5b7db947822ab2111a4ecdc941 | [
"MIT"
] | null | null | null | Algorithms/TwoPointers/lcci-17-21-volume-of-histogram-lcci.py | sandychn/LeetCode-Solutions | d0dd55d62b099c5b7db947822ab2111a4ecdc941 | [
"MIT"
] | null | null | null | class Solution:
def trap(self, height: List[int]) -> int:
ans = 0
l, r = 0, len(height) - 1
lMax = rMax = 0
while l < r:
lMax = max(lMax, height[l])
rMax = max(rMax, height[r])
if height[l] < height[r]:
ans += lMax - height[l]
l += 1
else:
ans += rMax - height[r]
r -= 1
return ans
| 28.3125 | 46 | 0.375276 |
88ad0e0788f1920caf978bf4c8f83b3e8a3ad06f | 4,263 | py | Python | tomogram_shift_alignment/tomogram_shift_alignment.py | EuanPyle/tomogram_shift_alignment | d4d9c8a990e843c8dab252092aaec659a872c6c2 | [
"MIT"
] | null | null | null | tomogram_shift_alignment/tomogram_shift_alignment.py | EuanPyle/tomogram_shift_alignment | d4d9c8a990e843c8dab252092aaec659a872c6c2 | [
"MIT"
] | null | null | null | tomogram_shift_alignment/tomogram_shift_alignment.py | EuanPyle/tomogram_shift_alignment | d4d9c8a990e843c8dab252092aaec659a872c6c2 | [
"MIT"
] | null | null | null | from pathlib import Path
from thefuzz import process
from .run_corrsearch3d import run_corrsearch3d
from .apply_shifts import apply_shifts
from typing import Optional
import os,sys
import starfile
import typer
import numpy
cli = typer.Typer()
@cli.command()
def tomogram_shift_alignment(
original_tomograms_dir: Path,
new_tomograms_dir: Path,
particles_star: Path,
tomogram_binning: float,
tomogram_trimming: Optional[float] = typer.Argument(40),
):
"""tomogram_shift_alignment
Requirements
---------------
Your tomogram names must match rlnTomoName in the particles star file and must use either the .mrc or .st extensions
Parameters
---------------
original_tomograms_dir : path to the directory containing the tomograms from which subtomogram averaging has already been done \n
new_tomograms_dir : path to the directory containing the tomograms which have been generated using a 'new' tilt series alignment methods
and are therefore shifted compared to the original tomograms \n
particles_star : path to the star file containing subtomogram particle positions for the tomograms in original_tomograms_dir \n
tomogram_binning : binning level (IMOD convention) of your tomograms so particle shifts can be written in unbinned coordinates for
RELION 4.0 \n
(Optional, default 40%) tomogram_trimming : number (in percent) to trim the tomograms by before comparing the two. Useful if there is a lot of empty space at the
top/bottom/sides of a tomogram. Enter 0 is you want to use the whole tomogram, but sometimes this gives errors.
Returns
---------------
tomogram_coordinates_shifted.star : a star file with adjusted subtomogram coordinates which should match the tomograms in new_tomograms_dir \n
Example Input
---------------
tomogram_shift_alignment './' '../new_tomos/' './particles.star' 8
"""
if os.path.exists('./tomogram_coordinates_shifted.star'):
print('tomogram_coordinates_shifted.star already exists.')
user = input('Delete this file? (y/n): ')
if user == 'y' or 'Y' or 'yes' or 'Yes':
os.remove('tomogram_coordinates_shifted.star')
else:
print('Rename or move tomogram_coordinates_shifted.star elsewhere for this program to run')
sys.exit()
#Test IMOD is loaded
imod_test = os.popen('dm2mrc').read()
if imod_test == '':
print('Can\'t find dm2mrc, try loading imod outside of this script first. Birkbeck users type: module load imod')
sys.exit()
original_tomo_list = list(Path(original_tomograms_dir).glob('*.mrc'))
if original_tomo_list == []:
original_tomo_list = list(Path(original_tomograms_dir).glob('*.st'))
for idx in range(len(original_tomo_list)):
original_tomo_list[idx] = original_tomo_list[idx].name
new_tomo_list = list(Path(new_tomograms_dir).glob('*.mrc'))
if new_tomo_list == []:
new_tomo_list = list(Path(new_tomograms_dir).glob('*.st'))
for idx in range(len(new_tomo_list)):
new_tomo_list[idx] = new_tomo_list[idx].name
#Match tomograms from original to new
matched_new_tomograms = {}
for tomo in original_tomo_list:
matched_new_tomograms.update({tomo:process.extractOne(tomo,new_tomo_list)[0]})
if not(os.path.isdir('tomogram_shifts')):
os.mkdir('tomogram_shifts')
for tomo in matched_new_tomograms:
if os.path.exists('./tomogram_coordinates_shifted.star'):
particles_star = Path('./tomogram_coordinates_shifted.star')
run_corrsearch3d((Path(original_tomograms_dir) / tomo),(Path(new_tomograms_dir) / matched_new_tomograms[tomo]),tomogram_trimming)
apply_shifts((Path(original_tomograms_dir) / tomo),particles_star,tomogram_binning)
if os.path.exists('./tomogram_coordinates_shifted.star'):
print('\n\nProduced tomogram_coordinates_shifted.star in this directory. Import the new tomograms into RELION 4, then use tomogram_coordinates_shifted.star as input for RELION 4\'s Import Coordinates function.\n\n')
else:
print('No output, error somewhere')
| 42.63 | 223 | 0.6981 |
904951bba6c53ee0d7b8085b358f9168f9cabb02 | 3,655 | py | Python | codershq/challenge/migrations/0001_initial.py | Buhannad/CodersHQ | 27b643cfab2094aa1ad1b069085c4e2d165345e9 | [
"MIT"
] | 45 | 2021-02-24T11:10:29.000Z | 2022-03-25T07:14:46.000Z | codershq/challenge/migrations/0001_initial.py | Buhannad/CodersHQ | 27b643cfab2094aa1ad1b069085c4e2d165345e9 | [
"MIT"
] | 96 | 2021-03-01T16:36:37.000Z | 2022-03-26T17:32:26.000Z | codershq/challenge/migrations/0001_initial.py | Buhannad/CodersHQ | 27b643cfab2094aa1ad1b069085c4e2d165345e9 | [
"MIT"
] | 29 | 2021-08-08T18:24:50.000Z | 2022-03-29T12:17:06.000Z | # Generated by Django 3.0.11 on 2021-08-05 04:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '__first__'),
]
operations = [
migrations.CreateModel(
name='Challenge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name of Challenge')),
('description', models.TextField(help_text='Describe the Challenge', max_length=5000, verbose_name='Challenge description')),
('logo', models.ImageField(upload_to='challenges/logo/', verbose_name='Challenge Logo')),
('github_link', models.TextField(default=None, verbose_name='Challenge github link')),
('website', models.TextField(max_length=100, verbose_name='Website link')),
('slack_group', models.TextField(default=None, max_length=100, verbose_name='Slack group')),
('cloud_provider', models.TextField(default=None, max_length=100, verbose_name='Cloud Provider')),
('cloud_provider_url', models.URLField(default=None, verbose_name='Cloud Provider URL')),
('cloud_provider_token', models.TextField(default=None, max_length=500, verbose_name='Cloud Provider token')),
],
),
migrations.CreateModel(
name='ScoreCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.TextField(max_length=100, verbose_name='Category of challenge scoring')),
],
),
migrations.CreateModel(
name='Sprint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField(verbose_name='Sprint start date')),
('end_date', models.DateField(verbose_name='Sprint end date')),
('title', models.TextField(max_length=100, verbose_name='Sprint title')),
('description', models.TextField(max_length=5000, verbose_name='Sprint Description')),
('challenge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='challenge.Challenge')),
],
),
migrations.CreateModel(
name='SprintEnrollment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sprint', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='challenge.Sprint')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Team')),
],
),
migrations.CreateModel(
name='ChallengeScore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.PositiveIntegerField(verbose_name='Sprint score')),
('score_category', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='challenge.ScoreCategory')),
('sprints', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='challenge.Sprint')),
('team', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='users.Team')),
],
),
]
| 53.75 | 141 | 0.619425 |
241452ffb8eaf6befb418a52f586338129ddb423 | 347 | py | Python | airbnb-kaggle/kaggle-airbnb-master/setup.py | raafatzahran/Udacity-DataScience | a27eb164d840fb72fb9ab5f021e43856e60cf243 | [
"MIT"
] | 56 | 2016-02-17T23:29:04.000Z | 2021-10-18T21:37:37.000Z | airbnb-kaggle/kaggle-airbnb-master/setup.py | raafatzahran/Udacity-DataScience | a27eb164d840fb72fb9ab5f021e43856e60cf243 | [
"MIT"
] | 11 | 2016-02-16T21:38:18.000Z | 2016-08-05T14:52:57.000Z | airbnb-kaggle/kaggle-airbnb-master/setup.py | raafatzahran/Udacity-DataScience | a27eb164d840fb72fb9ab5f021e43856e60cf243 | [
"MIT"
] | 34 | 2016-03-04T22:04:10.000Z | 2022-01-04T16:30:10.000Z | """Standard setup file."""
from setuptools import setup, find_packages
setup(
name='kairbnb',
version='1.0',
description='Airbnb Kaggle competition code.',
author='David Gasquez',
author_email='davidgasquez@gmail.com',
license='MIT',
url='https://github.com/davidgasquez/airbnb-kaggle',
packages=find_packages()
)
| 24.785714 | 56 | 0.691643 |
0ef0299af0be6f4403ddbf6bc9801b26ba188122 | 1,657 | py | Python | scripts/01_deploy_data_types.py | LaMemeBete/nodys-smart-contract | f67b88d98ebf7063b72f46cb2b014d5de96eb56d | [
"MIT",
"Unlicense"
] | null | null | null | scripts/01_deploy_data_types.py | LaMemeBete/nodys-smart-contract | f67b88d98ebf7063b72f46cb2b014d5de96eb56d | [
"MIT",
"Unlicense"
] | null | null | null | scripts/01_deploy_data_types.py | LaMemeBete/nodys-smart-contract | f67b88d98ebf7063b72f46cb2b014d5de96eb56d | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/python3
import time
from brownie import (
DataTypes,
TransparentUpgradeableProxy,
ProxyAdmin,
config,
network,
Contract,
)
from scripts.helpful_scripts import get_account, encode_function_data
def main():
account = get_account()
print(config["networks"][network.show_active()])
print(f"Deploying to {network.show_active()}")
data_types = DataTypes.deploy(
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
# Optional, deploy the ProxyAdmin and use that as the admin contract
proxy_admin = ProxyAdmin.deploy(
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
# If we want an intializer function we can add
# `initializer=box.store, 1`
# to simulate the initializer being the `store` function
# with a `newValue` of 1
# data_types_encoded_initializer_function = encode_function_data(data_types.setDataTypes)
data_types_encoded_initializer_function = encode_function_data(
data_types.setDataTypes, 10
)
proxy = TransparentUpgradeableProxy.deploy(
data_types.address,
proxy_admin.address,
data_types_encoded_initializer_function,
# gas limit removed fort an issue not very clear
# {"from": account, "gas_limit": 100000000000},
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
print(f"Proxy deployed to {proxy} ! You can now upgrade it to dataTypesV2!")
proxy_data_types = Contract.from_abi("DataTypes", proxy.address, DataTypes.abi)
| 35.255319 | 93 | 0.692818 |
03e9629c81e3cbe71e138fa6f593bcbfaa37b7d9 | 3,912 | py | Python | align_rudder/run_four_bc.py | ml-jku/align-rudder | 26cf4b62a713e180063cefc2921981484ebb9165 | [
"MIT"
] | 12 | 2020-09-30T08:15:44.000Z | 2021-12-22T03:36:33.000Z | align_rudder/run_four_bc.py | ml-jku/align-rudder | 26cf4b62a713e180063cefc2921981484ebb9165 | [
"MIT"
] | null | null | null | align_rudder/run_four_bc.py | ml-jku/align-rudder | 26cf4b62a713e180063cefc2921981484ebb9165 | [
"MIT"
] | 1 | 2020-12-09T21:33:28.000Z | 2020-12-09T21:33:28.000Z | import os
import pkg_resources
import numpy as np
import random
from ray import tune
import ray
import gym
from align_rudder.learning.q_learning import Qlearning
import shutil
config = {
'env_id': 'align_rudder:FourRooms-v0', # environment for the experiment
'exp_name': 'align-rudder-bc', # name of the experiment
'gamma': 1.0, # Discount factor for q learning algorithm
'total_timesteps': 10000000,
'max_episodes': 100000,
'learning_rate': 0.01,
'epsilon': 0.2, # exploration constant
'num_seq_store': 10, # max sequences to use for alignment or storing
'num_clusters': 10, # Number of clusters to use in k-means
'consensus_thresh': 0.9, # Threshold for consensus
'eval': 40,
'top_n': 12,
'rudder': False, # Use rudder or not
'mode': 'log',
'stop_criteria': '80opt',
'enough_seq': 3, # How many sequences are enough for sequence alignment
'num_demo_use': tune.grid_search([2, 5, 10, 50, 100]), # number of demonstrations
'consensus_type': 'all', # Select between most common or threshold all sequences: all, most_common
'cluster_type': 'AP', # Use default clustering, SpectralClustering, AffinityPropogation: default, SC, AP
'seed': tune.grid_search([i for i in range(10)]), # Seed for experiment
'anneal_eps': 1.0, # annealing rate for exploration
'eps_lb': 0.0, # eps anneal lower bound
'rr_thresh': 0.005, # Inverse visitation freq below thresh, set rr to zero
'log_every': 10, # log every timesteps
'normalise_rr_by_max': True, # normalize rr by maximum reward in rr
'normalisation_scale': 10, # scale factor compared to original reward
'use_succ': False,
'use_demo': True,
'demo_path': "demonstrations/four_rooms.npy",
'update_cluster_every': 500,
'update_alignment:': False,
'max_reward': 1,
'use_exp_replay': False,
'memory_len': 30000,
'init_mean': False
}
def run(config):
run_path = os.getcwd()
env_id = config['env_id']
env = gym.make(env_id)
demo_path = pkg_resources.resource_filename("align_rudder", config["demo_path"])
# set seed
np.random.seed(config['seed'])
random.seed(config['seed'])
rl = Qlearning(env=env, eps=config['epsilon'], alpha=config['learning_rate'],
total_timesteps=config['total_timesteps'],
num_store_seq=config['num_seq_store'], rudder=config['rudder'], enough_seq=config['enough_seq'],
num_clusters=config['num_clusters'], top_n=config['top_n'],
consensus_type=config['consensus_type'],
consensus_thresh=config['consensus_thresh'], cluster_type=config['cluster_type'],
run_path=run_path,
anneal_eps=config['anneal_eps'], eps_lb=config['eps_lb'], rr_thresh=config['rr_thresh'],
log_every=config['log_every'], normalise_rr_by_max=config['normalise_rr_by_max'],
normalisation_scale=config['normalisation_scale'], eval=config['eval'], use_succ=config['use_succ'],
use_demo=config['use_demo'],
demo_path=demo_path,
num_demo_use=config['num_demo_use'],
max_episodes=config['max_episodes'], max_reward=config['max_reward'],
mode=config['mode'],
gamma=config['gamma'], stop_criteria=config['stop_criteria'], seed=config['seed'],
init_mean=config['init_mean'])
rl.learn()
if __name__ == "__main__":
# clear output dir
if os.path.exists(os.path.join("results", "four_rooms_bc")):
shutil.rmtree(os.path.join("results", "four_rooms_bc"))
ray.init(temp_dir='/tmp/ray-four-bc', log_to_driver=False)
print("Starting Runs...")
tune.run(run, config=config, local_dir="results/", name="four_rooms_bc", resources_per_trial={'cpu': 1})
print("Finished!")
| 44.454545 | 119 | 0.652607 |
2168592e2454ca9d4a0a17ad0e7a6578fdc2e548 | 34,952 | py | Python | ubuntu20/projects/libRadtran-2.0.4/src_py/molecular_options.py | AmberCrafter/docker-compose_libRadtran | 0182f991db6a13e0cacb3bf9f43809e6850593e4 | [
"MIT"
] | null | null | null | ubuntu20/projects/libRadtran-2.0.4/src_py/molecular_options.py | AmberCrafter/docker-compose_libRadtran | 0182f991db6a13e0cacb3bf9f43809e6850593e4 | [
"MIT"
] | null | null | null | ubuntu20/projects/libRadtran-2.0.4/src_py/molecular_options.py | AmberCrafter/docker-compose_libRadtran | 0182f991db6a13e0cacb3bf9f43809e6850593e4 | [
"MIT"
] | null | null | null | """--------------------------------------------------------------------
* $Id$
*
* This file is part of libRadtran.
* Copyright (c) 1997-2012 by Arve Kylling, Bernhard Mayer,
* Claudia Emde, Robert Buras
*
* ######### Contact info: http://www.libradtran.org #########
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*--------------------------------------------------------------------"""
from option_definition import *
class setup_molecular_group():
group_name = 'Molecular atmosphere'
def __init__(self):
documentation = get_molecular_documentation()
atmosphere_file = option(
name='atmosphere_file',
group='atmosphere',
helpstr='Location of the atmosphere data file.',
documentation=documentation['atmosphere_file'],
tokens= addToken(name='Input.atmosphere_filename', datatype=str, valid_range=['subarctic_winter', 'subarctic_summer', 'midlatitude_summer', 'midlatitude_winter', 'tropics', 'US-standard', file]),
parents=['uvspec'],
plot = {'plot_type': '2D',
'optional_args': {'column_names': (
"altitude",
"pressure",
"temperature",
"air",
"ozone",
"oxygen",
"water vapour",
"CO2",
"NO2",)
}
},
mandatory=True,
)
atmosphere_file_3D = option(
name='atmosphere_file_3D',
group='atmosphere',
helpstr='Location of the 3D atmosphere data file.',
documentation=documentation['atmosphere_file_3D'],
tokens= [addToken(name='Input.atmosphere3d_filename', datatype=str),
addSetting(name='Input.atmosphere3d', setting='True'),
addSetting(name='isp', setting='get_caoth_index(&Input.caoth,&Input.n_caoth,"molecular_3d",0)'),
addSetting(name='Input.caoth[isp].source', setting='CAOTH_FROM_3D'),
addSetting(name='Input.caoth[isp].filename', setting='"profile_mol3d_dummy.dat"'),
addSetting(name='Input.caoth[isp].properties', setting='PROP_HU')],
parents=['uvspec'],
showInGui = False,
developer = False,
threedmystic =True
)
radiosonde = not_yet_lex2py_option(
name='radiosonde',
group='atmosphere',
helpstr='Specify density profiles.',
documentation=documentation['radiosonde'],
gui_inputs=(TextInput(name=''),),
parents=['uvspec'],
)
radiosonde_levels_only = option(
name='radiosonde_levels_only',
group='atmosphere',
helpstr='',
documentation=documentation['radiosonde_levels_only'],
tokens=addSetting(name='Input.atm.rs_add_upper_levels',setting='FALSE'),
parents=['uvspec'],
)
mol_file = option(
name='mol_file',
group='molecular',
documentation=documentation['mol_file'],
gui_inputs=(ListInput(name='mol_id',
valid_range=['O3', 'O2', 'H2O',
'CO2', 'NO2', 'BRO',
'OCLO', 'HCHO', 'O4',
'SO2', 'CH4', 'N2O',
'CO', 'N2'],
optional=False),
FileInput(name='Input.atm.filename[mol_id]'),
ListInput(name='Input.atm.unit_profile[mol_id]', valid_range=['','cm_3', 'm_3', 'MMR', 'VMR', 'RH'], optional=True),),
tokens = [ addLogical( name='mol_id', logicals=['O3', 'O2', 'H2O', 'CO2', 'NO2', 'BRO', 'OCLO', 'HCHO', 'O4', 'SO2', 'CH4', 'N2O', 'CO', 'N2'], setting='MOL_' ) ,
addToken( name='Input.atm.filename[mol_id]', datatype=file ),
addLogical(name='Input.atm.unit_profile[mol_id]', logicals=['cm_3', 'm_3', 'MMR', 'VMR', 'RH'], optional=True) ],
parents=['uvspec'],
non_unique=True,
)
pressure = option(
name='pressure',
group='atmosphere',
helpstr='Surface pressure',
documentation=documentation['pressure'],
gui_inputs=(FloatInput(name='Input.pressure', default='NOT_DEFINED_FLOAT', valid_range=[0, 1000000.0]),),
tokens=addToken(name='Input.pressure', datatype=float, default='NOT_DEFINED_FLOAT', valid_range=[0,1e6]),
parents=['uvspec'],
)
refractive_index_file = option(
name='refractive_index_file',
group='atmosphere',
helpstr='',
documentation=documentation['refractive_index_file'],
gui_inputs=(TextInput(name='Input.filename[FN_REFIND]'),),
tokens=addToken(name='Input.filename[FN_REFIND]', datatype=str),
parents=['uvspec'],
)
crs_model = option(
name='crs_model',
group='molecular',
helpstr='Specify cross section.',
documentation=documentation['crs_model'],
gui_inputs=(ListInput(name='mol_id', valid_range=['no2', 'o3', 'o4', 'rayleigh'], optional=False), ListInput(name='crs_model', valid_range=['Bass_and_Paur', 'Molina', 'Daumont', 'Serdyuchenko', 'Bogumil', 'Bodhaine', 'Bodhaine29', 'Nicolet', 'Penndorf', 'Burrows', 'Vandaele', 'Greenblatt', 'Thalman'], optional=False),),
tokens= [ addLogical( name='mol_id', logicals=['no2', 'o3', 'o4', 'rayleigh'], setting='CRS_MOL_' ),
addLogical( name='Input.crs_model[mol_id]', logicals=[ 'Bodhaine', 'Nicolet', 'Penndorf', 'Bodhaine29', 'Bass_and_Paur', 'Molina', 'Daumont', 'Serdyuchenko', 'Bogumil', 'Burrows', 'Vandaele', 'Greenblatt', 'Thalman'], setting='CRS_MODEL_') ],
parents=['uvspec'],
non_unique=True,
)
crs_file = option(
name='crs_file',
group='molecular',
documentation=documentation['crs_file'],
gui_inputs=(ListInput(name='mol_id', valid_range=['O3', 'O2', 'H2O', 'CO2', 'NO2', 'BRO', 'OCLO', 'HCHO', 'O4', 'SO2', 'CH4', 'N2O', 'CO', 'N2'], optional=False), FileInput(name='Output.crs.filename[mol_id]'),),
tokens = [ addLogical( name='mol_id', logicals=['O3', 'O2', 'H2O', 'CO2', 'NO2', 'BRO', 'OCLO', 'HCHO', 'O4', 'SO2', 'CH4', 'N2O', 'CO', 'N2'], setting='MOL_' ) ,
addToken( name='Output.crs.filename[mol_id]', datatype=file ) ],
parents=['uvspec'],
non_unique=True,
)
rayleigh_depol = option(
name='rayleigh_depol',
group='atmosphere',
helpstr='Rayleigh depolarization factor.',
documentation=documentation['rayleigh_depol'],
gui_inputs=(FloatInput(name='Input.rayleigh_depol'),),
tokens=addToken(name='Input.rayleigh_depol', datatype=float, default='NOT_DEFINED_FLOAT'),
parents=['uvspec'],
)
mol_abs_param = option(
name='mol_abs_param',
group='spectral',
helpstr='Set correlated_k scheme. ',
documentation=documentation['mol_abs_param'],
tokens = [ addLogical( name='Input.ck_scheme', logicals=['kato', 'kato2', 'kato2.96','kato2andwandji','fu','avhrr_kratz','sbdart','lowtran','reptran','reptran_channel','crs',file], setting='CK_'),
addToken(name='Input.ck_reptran_arg', datatype=str, optional=True)],
parents=['uvspec'],
childs= ['ck_lowtran_absorption','ck_fu_h2o_continuum'],
continious_update=True,
)
reptran_file = option(
name='reptran_file',
group='spectral',
helpstr='File containing representative wavelengths.',
documentation=documentation['reptran_file'],
tokens=addToken(name='Input.filename[FN_REPTRAN]', datatype=file),
parents=['uvspec'],
showInGui = False,
)
ck_lowtran_absorption = option(
name='ck_lowtran_absorption',
group='molecular',
helpstr='Switch off absorption by individual minor trace gases.',
documentation=documentation['ck_lowtran_absorption'],
gui_inputs=(ListInput(name='Input.absorption_gas', valid_range=['O4', 'N2', 'CO', 'SO2', 'NH3', 'NO', 'HNO3']),
ListInput(name='On/Off',valid_range=['on','off'], default='On'),),
tokens = [ addLogical(name='mol_id', logicals=['O4','N2','CO','SO2','NH3','NO','HNO3'], setting='CK_ABS_' ) ,
addLogical(name='Input.ck_abs[mol_id]', logicals=['on','off'], setting='SWITCH_') ],
parents=['mol_abs_param'],
speaker="mol_abs_param",
enable_values=("lowtran",),
non_unique=True,
)
ck_fu_h2o_continuum = option(
name='ck_fu_h2o_continuum',
group='molecular',
helpstr='', #TODO
documentation=documentation['ck_fu_h2o_continuum'],
tokens=addLogical(name='Input.ck_h2ocont',logicals=['on','off','v2.1','v2.4'],setting='CK_H2OCONT_'),
parents=['uvspec'],
speaker='mol_abs_param',
enable_values=('fu',), #TODO:stimmt das?
developer=True,
)
mol_tau_file = option(
name='mol_tau_file',
group='molecular',
helpstr='Location of Molecular optical depth file.',
documentation=documentation['mol_tau_file'],
gui_inputs=(ListInput(name='id', valid_range=['sca', 'abs'], optional=False), FileInput(name='Input.filename[id]'),),
tokens = [ addLogical(name='id', logicals=[ 'sca','abs' ], setting='FN_MOL_TAU_'),
addToken(name='Input.filename[id]', datatype=file) ],
parents=['uvspec'],
non_unique=True,
)
mol_modify = option(
name='mol_modify',
group='molecular',
helpstr='Modify column of molecular specie',
documentation=documentation['mol_modify'],
gui_inputs = ( ListInput(name='moltype', valid_range=[ 'O3','O2','H2O','CO2','NO2','BRO','OCLO','HCHO','O4','SO2','CH4','N2O','CO','N2' ], optional=False),
FloatInput(name='value', valid_range=[0, 1000000.0]),
ListInput(name='unit', valid_range=[ 'DU', 'CM_2', 'MM' ], optional=False)),
tokens = [ addLogical(name='id', logicals=[ 'O3','O2','H2O','CO2','NO2','BRO','OCLO','HCHO','O4','SO2','CH4','N2O','CO','N2' ], setting='MOL_'),
addToken(name='Input.atm.column[id]', datatype=float),
addLogical( name='Input.atm.unit_column[id]', logicals=[ 'DU', 'CM_2', 'MM' ], setting='MOL_UNIT_') ],
parents=['uvspec'],
non_unique=True,
)
mixing_ratio = option(
name='mixing_ratio',
group='molecular',
helpstr='Mixing ratio of molecular specie',
documentation=documentation['mixing_ratio'],
gui_inputs = ( ListInput(name='moltype', valid_range=[ 'O2','H2O','CO2','NO2','CH4','N2O','F11','F12','F22' ], optional=False),
FloatInput(name='value', valid_range=[0, 1000000.0])),
tokens = [ addLogical(name='id', logicals=[ 'O2','H2O','CO2','NO2','CH4','N2O','F11','F12','F22' ], setting='MX_'),
addToken(name='Input.mixing_ratio[id]', datatype=float, valid_range=[0,1e6]) ],
parents=['uvspec'],
non_unique=True,
)
self.options = [atmosphere_file, atmosphere_file_3D,
radiosonde, radiosonde_levels_only,
mol_file, mixing_ratio, mol_modify,
pressure,
refractive_index_file,
crs_model,
crs_file,
rayleigh_depol,
mol_abs_param,
ck_lowtran_absorption,
ck_fu_h2o_continuum,
mol_tau_file,
reptran_file,
]
def __iter__(self):
return iter(self.options)
def get_molecular_documentation():
return {
'ck_lowtran_absorption' : r'''
Switch off absorption by individual minor trace gases which are currently only
included when \code{mol\_abs\_param lowtran} is chosen. The syntax is
\fcode{
ck\_lowtran\_absorption species on/off
}
where species may be one of O4, N2, CO, SO2, NH3, NO, HNO3. By default all
are switched on.
This option may also be used to turn on/off absorption by O4 in spectral
resolution. It is on by default.
''',
'atmosphere_file' : r'''
Location of the atmospheric data file.
\fcode{
atmosphere\_file file
}
The file must have at least three columns containing the altitude,
pressure, and temperature. Missing profiles are filled with 0 (e.g., if you did not specify
the ozone profile, there will be no ozone absorption!), with exception of the air density which
is calculated from pressure and temperature. Other trace gases may be set by \code{mol\_file}.
The columns are interpreted as follows:
\begin{description}
\item[1] Altitude above sea level in km
\item[2] Pressure in hPa
\item[3] Temperature in K
\item[4] air density in cm$^{-3}$
\item[5] Ozone density in cm$^{-3}$
\item[6] Oxygen density in cm$^{-3}$
\item[7] Water vapour density in cm$^{-3}$
\item[8] CO2 density in cm$^{-3}$
\item[9] NO2 density in cm$^{-3}$
\end{description}
The atmosphere is specified
top-down, that is, the top level is the first line in the file, the bottom
(surface) level the last line. All properties refer to model \emph{level} z,
not to model \emph{layer}. It is important that the correct units are
used, otherwise unpredictable results are guaranteed.
Comments start with \code{\#}. Empty lines are ignored. Please note that there
is some redundancy: For air as an ideal gas the density $\rho$, can be
calculated from pressure and temperature, $\rho = p / kT$. \code{uvspec} will check
if this relation is fulfilled and will stop if it is not.
{\sl libRadtran} provides the six standard atmospheres by \citet{Anderson1986}:
\begin{description}
\item[afglt] Tropical (\code{tropics})
\item[afglms] Midlatitude Summer (\code{midlatitude\_summer})
\item[afglmw] Midlatitude Winter (\code{midlatitude\_winter})
\item[afglss] Subarctic Summer (\code{subarctic\_summer})
\item[afglsw] Subarctic Winter (\code{subarctic\_winter})
\item[afglus] U.S. Standard (\code{US-standard})
\end{description}
which may be chosen by for example
\fcode{
atmosphere\_file tropics
}
or by specifying the full file name. These atmosphere files are found in
\file{data/atmmod}.
If no \code{atmosphere\_file} is defined, {\sl uvspec} will automatically select one.
If the information \code{time}, \code{latitude} and \code{longitude} are provided in
the input file {\sl uvspec} will choose from the first 5 files, otherwise it takes
the U.S. Standard atmosphere.
''',
'atmosphere_file_3D' : r'''
Specify filename for a 3D molecular atmosphere. The file includes 3D fields of pressure, temperature and water vapor. Other species are not yet implemented in 3D and are specified as 1D altitude profiles using the option \code{atmosphere\_file}. The format of the 3D atmosphere file is as follows:
\fcode{Nx Ny Nz flag \\
dx dy z(1) z(2) z(3) ... z(n) \\
ix iy iz p T H2O \\
...
}
where \code{Nx}, \code{Ny} and \code{Nz} are the number of grid boxes in
\code{x}, \code{y}, and \code{z}-direction.
The parameter \code{flag} is not yet used.
In the second line \code{dx} and \code{dy} are the sizes of the boxes in x-
and y-direction in km. In the third and following lines the indices \code{ix}, \code{iy}, and \code{iz} specify atmosphere pixels. \code{p} is the pressure in hPa, \code{T} the temperature in K and \code{H2O} the water vapor concentration in kg/kg.
See also the examples \code{examples/UVSPEC_MC_ABS3D.INP} and \code{examples/UVSPEC_MC_ABS3D_THERMAL.INP} which use the 3D atmosphere file \code{examples/UVSPEC_MC_ABS3D_AFGLUS3D.INP} as input. This atmosphere file includes the same atmospheric profile (US standard) in 3$\times$2 pixels and the examples check for solar and thermal radiation, whether the 3D input yields the same results as 1D calculations.
Currently the implementation has some restrictions:
\begin{itemize}
\item the altitude profiles of the 1D atmosphere file, the 3D atmosphere file and also other 3D profile files (including \code{wc_file 3D} and \code{ic_file 3D}) must include the same vertical grids
\item the conversion of heating rates to K/d is only approximate, because it uses pressure and temperature of the first pixel, rather than the 3D fields
\item ... may be more?
\end{itemize}
{\sl \code{atmosphere_file_3D} is an experimental option! Please check your results carefully and contact Claudia Emde in case you find any bugs or inconsistencies.}
''',
'radiosonde' : r'''
This option allows to change the temperature and pressure profile, and optionally to
specify one or more density profiles. The entry in the input file looks like this:
\fcode{
radiosonde filename [gas\_species] [unit] ...
}
Currently the following gas\_species are included: ozone (O3), nitrogen dioxide (NO2),
water vapor (H2O), bromine oxide (BRO), chlorine dioxide (OCLO), formaldehyde (HCHO),
carbon dioxide (CO2), sulphur dioxide (SO2), and the oxygen dimer (O4).
Each gas species is identified by its abbrevations given in parentheses above.
Unit is an optional argument to defines the unit of the density. The profiles can
be given in particles per cm$^3$ (CM-3), in particles per m$^3$ (M-3), as volume
mixing ratio (VMR), as mass mixing ratio in kg/kg (MMR), or as relative humidity
(RH) (only for water). The default unit is RH for water vapour,
MMR for ozone, and CM3 for all other gases.
The radiosonde file must have (2 + number of gases) columns:
\begin{description}
\item[1] pressure in hPa
\item[2] temperature in Kelvin
\item[3, 4, ...] density of trace gas in the specified unit
\end{description}
A new z-grid will be calculated, starting at \code{altitude} and assuming a linear temperature variation
between levels. The air density will be recalculated according to the ideal gas law, and the density of
the well mixed gases O2 and CO2 will be scaled accordingly.
The atmospheric data above the radiosonde data is taken from the \code{atmosphere\_file} level by level, starting
at the first pressure level above the radiosonde data. The z-grid of the \code{atmosphere\_file} in
this height region is shifted accordingly.
Also if the density in the radiosonde file is specified as -1 at a level,
the value from the \code{atmosphere\_file} is used.
Possible calls are
\fcode{
radiosonde ../examples/radiosonde.dat
}
just in order to change the temperature and pressure profile, or
\fcode{
radiosonde ../examples/radiosonde2.dat H2O RH O3 MMR NO2
}
where water vapour density will be given as relative humidity, ozone as mass mixing ratio,
and NO2 in cm$^{-3}$ (default).
''',
'radiosonde_levels_only' : r'''
The atmosphere considered in the simulation has the same height range as the data in
the \code{radiosonde}-file. No further levels are added above those.
This option has only an effect in combination with \code{radiosonde}.
''',
'mol_file' : r'''
Specify density profiles (or matrix, see below) of various trace gases to be included in the radiative
transfer calculation.
\fcode{
mol\_file gas\_species filename [unit]
}
At the moment following \code{gas\_species} are included: ozone (O3), nitrogen dioxide (NO2), water vapor (H2O),
bromine oxide (BRO), chlorine dioxide (OCLO), formaldehyde (HCHO), carbon dioxide (CO2),
sulphur dioxide (SO2), oxygen (O2), the oxygen dimer (O4), methane (CH4), nitrous oxide (N20),
carbon monoxide (CO), and nitrogen (N2).
The gas species is identified
by their abbrevations given in the parenthesis above.
The model expects a density file with two columns:
\begin{description}
\item[1] Altitude above sea level in km.
\item[2] The density of trace gas [in the specified unit]
\end{description}
The altitude grid may be different from that in \code{atmosphere\_file}. All densities inside the range
of the \code{mol\_file} are replaced. For all other altitudes the values from the
\code{atmosphere\_file} are used. If the density is specified as -1 at a level,
the value from \code{atmosphere\_file} is used.
Altitude ranges not covered by the \code{atmosphere\_file} are ignored.
\code{unit} is an optional argument to define the unit of the density. The profiles can
be given in particles per cm$^{3}$ (\code{cm\_3}), in particles per m$^{3}$ (\code{m\_3}), as volume mixing ratio (\code{vmr}), as mass mixing
ratio (\code{mmr}), or as relative humidity (\code{rh}) (only for water). The default for \code{unit} is cm$^{-3}$.
To scale the profile to a total column value use \code{mol\_modify}.
For airmass factor calculations it is for some species necessary to account for the
variation of the profile with sza. This may be accomplished by specifying a \code{mol\_file}
in the following format:
\fcode{
0.0 SZA1 SZA2 ...\\
z(1) dens(1,1) ...\\
z(2) . .\\
. . .
}
where z(i) are the altitude levels above sea level in km, SZA is the solar zenith
angle in degrees, and dens is the density [in the specified unit] of the trace gases as
function of solar zenith angle and altitude.
The matrix may only be specified for one species. It may however be combined with profiles
of other species.
A density matrix can only be used in connection with \code{rte\_solver sdisort}!
''',
'pressure' : r'''
The surface pressure (at the user-defined \code{altitude}) in hPa.
\fcode{
pressure value
}
The pressure profile as well as air, O2 and CO2 density profiles
are scaled accordingly.
''',
'refractive_index_file' : r'',
'crs_model' : r'''
Choose between various cross sections.
\fcode{
crs\_model species crs
}
Following \code{species} are included:
\begin{description}
\parameter{rayleigh} Specify the Rayleigh cross section.
Choose between the following Rayleigh scattering cross sections (\code{crs}):
\begin{description}
\item[Bodhaine] \citet{Bodhaine1999} Rayleigh scattering cross section using their Eqs. 22-23.
\item[Bodhaine29] \citet{Bodhaine1999} Rayleigh scattering cross section using their Eq. 29.
\item[Nicolet] \citet{Nicolet1984} Rayleigh scattering cross section.
\item[Penndorf] \citet{Penndorf1957} Rayleigh scattering cross section.
\end{description}
\citet{Bodhaine1999} is default.
\parameter{o3} Choose ozone cross section.
\code{crs} can be one of the following:
\begin{description}
\item[Bass\_and\_Paur] \citet{Bass1985} ozone cross section.
\item[Molina] \citet{Molina1986} ozone cross section.
\item[Daumont] Ozone cross section by \citet{Daumont1992}, \citet{Malicet1995}.
\item[Bogumil] Ozone cross section from \citet{Bogumil2003}.
\item[Serdyuchenko] Ozone cross section from Serdyuchenko.
\end{description}
\citet{Molina1986} is default.
\parameter{no2} Choose between the various NO2 cross sections.
\code{crs} is one of:
\begin{description}
\item[Burrows] \citet{Burrows1998} NO2 cross section.
\item[Bogumil] NO2 cross section from \citet{Bogumil2003}.
\item[Vandaele] NO2 cross section from Vandaele et al.
\end{description}
\citet{Burrows1998} is default.
\parameter{o4} Choose between the various O4 cross sections.
\code{crs} is one of:
\begin{description}
\item[Greenblatt] O4 cross section by \citet{greenblatt1990}.
\item[Thalman] O4 cross section by \citet{thalman2013}.
\end{description}
\end{description}
''',
'crs_file' : r'''
May be used to specify cross sections of O3, O2, H2O, CO2, NO2, BRO, OCLO, HCHO,
O4, SO2, CH4, N2O, CO, or N2 to be used instead of those supplied with
{\sl libRadtran}. No temperature dependence may be specified. Use as follows:
\fcode{
crs\_file NO2 ../examples/no2\_crs.dat
}
The species, e.g. \code{NO2}, must be specified to identify the
species for which the cross section applies.
The cross section file has two columns:
\begin{description}
\item[1] wavelength (nm)
\item[2] cross section (cm$^2$)
\end{description}
''',
'rh_file' : r'''
File that defines a profile of relative humidity.
\fcode{
rh\_file file
}
If specified, the water vapour
profile in \code{atmosphere\_file} is over-written. If -1 is specified at a level, the value
from \code{atmosphere\_file} is used.
''',
'ck_fu_h2o_continuum' : r'''
Undocumented option to switch the H2O continuum on or off or select a specific
version of the continuum.
''',
'mixing_ratio' : r'''
Mixing ratio in ppm.
\fcode{
mixing\_ratio species value
}
\code{species} can be one of the following:
\begin{description}
\item[O2] The mixing ratio of O2 in ppm. Scale the profile so that the mixing
ratio at the user-defined \code{altitude} assumes the specified value.
\item[H2O] The mixing ratio of H2O in ppm. Scale the profile so that the mixing
ratio at the user-define \code{altitude} assumes the specified value.
\item[CO2] The mixing ratio of CO2 in ppm. Scale the profile so that the mixing
ratio at the user-defined \code{altitude} assumes the specified value.
\item[NO2] The mixing ratio of NO2 in ppm. Scale the profile so that the mixing
ratio at the user-defined \code{altitude} assumes the specified value.
\item[CH4] The mixing ratio of CH4 in ppm (default: 1.6 ppm).
\item[N2O] The mixing ratio of N2O in ppm (default: 0.28 ppm).
\item[F11] The mixing ratio of F11 in ppm (default: 0.000268 ppm).
\item[F12] The mixing ratio of F12 in ppm (default: 0.000503 ppm).
\item[F22] The mixing ratio of F22 in ppm (default: 0.000105 ppm).
\end{description}
The \code{mixing_ratio} of F11, F12, and F22 and the default values for CH4 and N2O are ignored in case of \code{mol_abs_param reptran}.
''',
'mol_modify' : r'''
Set the total column of a density profile. The column is integrated between the
user-defined \code{altitude} and TOA (top of atmosphere). The syntax is
\fcode{
mol\_modify species column unit
}
where \code{species} is one of O3, O2, H2O, CO2, NO2, BRO, OCLO, HCHO, O4, SO2,
CH4, N2O, CO, or N2, see also \code{mol\_file}.
The second argument is the total column value, and the third argument is the unit,
in which the column is given. The unit can be DU (Dobson units), CM\_2 (molecules/cm$^2$) or MM.
Please note that the unit MM is only valid for species H2O and specifies the precipitable water
in kg / m2 (which is approximately 1mm).The water vapor profile is scaled accordingly. The precipitable water
is integrated from the user-defined \code{altitude} to TOA (top of atmosphere).
The default units are DU for O3, and CM\_2 for all other gases. It is possible to have
several \code{mol\_modify} commands in the input file (maximum one per species). The following sets
the NO$_2$ total column to 1.2 DU.
\fcode{
mol\_modify NO2 1.2 DU
}
''',
'rayleigh_depol' : r'''
Rayleigh depolarization factor.
\fcode{
rayleigh\_depol value
}
The Rayleigh scattering phase function is
$p(\mu) = a + b \mu^2$ where $a = 1.5{(1+\texttt{depol})/(2+\texttt{depol})}$ and
$b = 1.5{(1-\texttt{depol})/(2+\texttt{depol})}$. By default the depolarization is calculated
using the expressions from \citet{Bodhaine1999}.
''',
'mol_abs_param' : r'''
To calculate integrated shortwave or longwave irradiance, or to simulate
satellite instrument channels, use
\fcode{
mol\_abs\_param type
}
to choose between the following types of schemes:
\begin{description}
\item[reptran]
Representative wavelengths parameterization adapted for spectral bands.
This parameterization is used by default if no \code{mol\_abs\_param} option is given
in the {\sl uvspec} input file.
Different band widths may be selected by
\fcode{
mol\_abs\_param reptran [fine|medium|coarse]
}
(fine: 1cm$^{-1}$; medium: 5cm$^{-1}$; coarse: 15cm$^{-1}$; coarse is default).
The data files for coarse resolution are included in the libRadtran package.
The files required for fine and medium resolution can be downloaded from the libRadtran homepage.
Absorption data is mainly based on HITRAN 2004. Absorption by H2O, CO2, O3, N2O, CO, CH4, O2, N2, and NO2
is considered, and absorption by all other gases is zero.
By default volume mixing ratios of N2O, CO, CH4, and N2 (those are not in the
\code{atmosphere\_file}) from the US standard atmosphere are applied.
Use \code{mol\_file} or \code{mol\_modify} to change the gas profiles.
In case of radiative transfer problems with solar source, the extraterrestrial spectrum from
Kurudz is applied by default. This parameterization is described in detail by \citet{gasteiger2014}.
\item[reptran\_channel]
Representative wavelengths parameterization for satellite channels. Usage
\fcode{
mol\_abs\_param reptran\_channel channel\_name
}
Channel-integrated quantities are obtained using \code{output\_process per\_band}.
The file \file{data/correlated\_k/reptran/channel\_list.txt} provides a list of available channels;
more information on the channels is provided in \file{data/filter/}.
See \citet{gasteiger2014} for details about the approach.
\item[crs]
Switch off spectral parameterizations. Only molecular absorption cross sections from
\code{crs_file} (including the default ones) are considered.
\item[kato]
\citet{Kato1999b} correlated-k distribution, shortwave; based on HITRAN 96. Please note that the
bands above 2.5 micrometer are not very reliable which, however, this has only little impact
on integrated shortwave radiation.
\item[kato2]
\citet{Kato1999b}, shortwave; optimized version (Seiji Kato, personal communication, 2003);
please note that \code{kato2} only has 148 subbands (that is, calls to the \code{rte\_solver})
compared to 575 for \code{kato} which translates to a increase in computational speed by
up to a factor of 4 with only little increase in uncertainty. The absorption data are
based on HITRAN 2000. Please note that the bands above 2.5 micrometer are not very reliable which,
however, this has only little impact on integrated shortwave radiation.
\item[kato2andwandji]
Similar to \code{kato2}, but the UV bands \#3 and \#4 use the improved parameterization
by \citet{WandjiNyamsi2015}.
\item[kato2.96]
\citet{Kato1999b}, shortwave; optimized version (Seiji Kato, personal communication, 2003);
similar to \code{kato2} but based on HITRAN96. Please note that the bands above 2.5 micrometer
are not very reliable which, however, has only little impact on integrated shortwave radiation.
\item[fu]
\citet{fu92,fu93}, shortwave and longwave; fast parameterization, developed for climate models.
\item[avhrr\_kratz]
\citet{Kratz1995}, AVHRR instrument channels
\item[lowtran]
Gas absorption parameterization from LOWTRAN; code adopted from SBDART \citep{Ricchiazzi1998b};
please see the section on "Spectral resolution".
\item[sbdart]
Identical to LOWTRAN.
\end{description}
If \code{mol\_abs\_param} kato/kato2/kato2.96/fu/avhrr\_kratz is specified, the extraterrestrial
flux is taken from
internally defined files specific for each parameterization, not
from \code{source solar file}. The output is the integrated irradiance for
each band. To get e.g. integrated shortwave irradiance, simply add all
bands of the \citet{Kato1999b} or the \citet{fu92,fu93}
parameterization. The five AVHRR channels are weighted sums of the
libRadtran output. Examples how to integrate the output in the
\code{avhrr\_kratz} case are included in the {\sl uvspec} self check
which is initiated with
\code{make check}.
''',
'reptran_file' : r'''
Location of the representative wavelengths file.
\fcode{
reptran\_file file
}
This option is useful together with 'mol\_abs\_param reptran' and 'mol\_abs\_param reptran\_channel'
options, if you want to use your own representative wavelengths parameterization.
''',
'mol_tau_file' : r'''
Location of molecular scattering or absorption optical depth file.
\fcode{
mol\_tau\_file sca/abs filename
}
\begin{description}
\parameter{sca} Usually, the Rayleigh scattering
cross section is calculated from the air pressure provided in \code{atmosphere\_file}
(scaled with \code{pressure}). Use this parameter only if you really want to specify
the optical depth directly (e.g. for a model intercomparison). The
optical thickness profile may be either monochromatic or spectral.
\parameter{abs} Usually, molecular absorption
is calculated from trace gas concentrations provided in \code{atmosphere\_file}
(scaled with \code{mol\_modify O3}, etc.). Use this option only if you want to specify
the optical depth directly (e.g. for a model intercomparison) or for a line-by-line
calculation. If a spectral \code{mol\_tau\_file} is specified, the wavelength
grid defined there is used as the internal wavelength grid for the radiative transfer
calculation, if not defined otherwise with \code{wavelength\_grid\_file}.
\end{description}
The file can be either of the following three formats:
\begin{description}
\parameter{Monochromatic}
Column 1 is the altitude in km
%AK: This is probably old stuff prior to redistribute
%where the altitude grid must be exactly equal
%to the altitude grid specified in \code{atmosphere\_file}.
Column 2 is the absorption optical depth of each layer.
\parameter{Spectral, ASCII}
The first line contains the level altitudes in decreasing order; the following lines
contain the wavelength [nm] in the first column and then the absorption optical depths
of each layer.
\parameter{Spectral, netcdf}
An example is available at the libRadtran homepage,
the file \file{UVSPEC.O2A.afglms.cdf} is a line-by-line spectrum of the oxygen A-Band
around 760nm, calculated for the mid-latitude summer
atmosphere. The advantage of
netcdf compared to ASCII is that it is much faster to read, and that the file
is a self-contained, including data and a description of the variables and arrays.
It is therefore particularly useful for line-by-line calculations where usually
many spectral data points are involved.
%netcdf is a common platform independent format; the description, a library to read and
%write netcdf including some tools to generate netcdf is available at
%http://www.unidata.ucar.edu/packages/netcdf/. A \code{mol\_tau\_file abs} must obey
%certain rules;
\end{description}
Comments start with \code{\#}. Empty lines are ignored.
''',
}
| 46.789826 | 416 | 0.689288 |
dea649b7209e4155083581caa5ba75cefc3ad96f | 4,504 | py | Python | scripts/github.clone_repo_teams.py | uniqueg/git-management | b672afa7063fc50b58e492683a12c3552514eddd | [
"Apache-2.0"
] | null | null | null | scripts/github.clone_repo_teams.py | uniqueg/git-management | b672afa7063fc50b58e492683a12c3552514eddd | [
"Apache-2.0"
] | null | null | null | scripts/github.clone_repo_teams.py | uniqueg/git-management | b672afa7063fc50b58e492683a12c3552514eddd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Sets teams associated with a source repository for a destination
repository.
A personal GitHub access token with write access to source and destination
repositories is required to be defined in environment variable 'GITHUB_TOKEN'.
Source and destination repositories must be associated with an organization,
and the organization must be the same for both repositories. Note that all
team permissions are set to their default value.
"""
__version__ = '0.1.0'
import argparse
import logging
import os
import sys
from github import Github
from github.GithubException import (GithubException, UnknownObjectException)
logger = logging.getLogger()
def parse_args():
"""Parse CLI arguments."""
parser = argparse.ArgumentParser(
description=sys.modules[__name__].__doc__,
)
parser.add_argument(
'org',
type=str,
action='store',
help=(
"organization under which the source and destination "
"repositories are hosted"
),
metavar='STR',
)
parser.add_argument(
'source',
type=str,
action='store',
help="name of the source repository",
metavar='SOURCE',
)
parser.add_argument(
'dest',
type=str,
action='store',
help="name of the destination repository",
metavar='DEST',
)
parser.add_argument(
'--verbose', "-v",
action='store_true',
default=False,
help="print logging messages to STDERR",
)
parser.add_argument(
'--debug',
action='store_true',
default=False,
help="also print debugging messages to STDERR",
)
parser.add_argument(
'--version',
action='version',
version='%(prog)s {version}'.format(version=__version__),
help="show version information and exit",
)
return parser.parse_args()
def setup_logging(
logger: logging.Logger,
verbose: bool = False,
debug: bool = False,
):
"""Configure logging."""
if debug:
logger.setLevel(logging.DEBUG)
elif verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"[%(asctime)-15s: %(levelname)-8s @ %(funcName)s] %(message)s"
))
logger.addHandler(handler)
def main():
# Parse CLI arguments
args = parse_args()
# Set up logging
setup_logging(
logger=logger,
verbose=args.verbose,
debug=args.debug,
)
# Set up GitHub API client
try:
g = Github(os.environ['GITHUB_TOKEN'])
except KeyError:
logger.error("Environment variable 'GITHUB_TOKEN' not defined.")
raise
# Get repo owner
try:
org = g.get_organization(args.org)
except Exception as e:
logger.error(f"Could not connect to GitHub. Error: {e}")
raise
# Get repos
try:
repo_source = org.get_repo(args.source)
except UnknownObjectException:
logger.error(
f"Source repo '{args.source}' could not be found at org "
f"'{org.slug}'."
)
raise
try:
repo_dest = org.get_repo(args.dest)
except UnknownObjectException:
logger.error(
f"Destination repo '{args.dest}' could not be found at org "
f"'{org.slug}'."
)
raise
# Get source teams
try:
teams_source = repo_source.get_teams()
except GithubException:
logger.error(
f"Could not get teams for repo '{args.source}' at org "
f"'{org.slug}'."
)
raise
# Add source teams to destination
error = False
for team in teams_source:
try:
logger.info(
f"Adding team '{team.name}' to repo '{args.dest}' at org "
f"'{org.login}'..."
)
team.add_to_repos(repo_dest)
except Exception as e:
logger.warning(
f"Could not add team '{team.name}' to repo '{args.dest}' at "
f"org '{org.login}'. Error: {e}"
)
error = e
# Raise if errors occurred for any team
if error:
logger.error(
f"One or more teams could not be added."
)
raise error
if __name__ == '__main__':
try:
main()
except Exception:
sys.exit(1)
| 25.162011 | 78 | 0.588588 |
fc3271f99ce9aeea7b1037f8660de4ba2c854a16 | 6,069 | py | Python | example_scripts/edge_tpu/detector/detector_video.py | joaopdss/aXeleRate | 791c8b29056ed11bd0ed306e620664577ec9724c | [
"MIT"
] | 148 | 2020-03-18T01:36:20.000Z | 2022-03-24T08:56:45.000Z | example_scripts/edge_tpu/detector/detector_video.py | joaopdss/aXeleRate | 791c8b29056ed11bd0ed306e620664577ec9724c | [
"MIT"
] | 55 | 2020-03-29T14:36:44.000Z | 2022-02-17T22:35:03.000Z | example_scripts/edge_tpu/detector/detector_video.py | joaopdss/aXeleRate | 791c8b29056ed11bd0ed306e620664577ec9724c | [
"MIT"
] | 57 | 2020-04-01T14:22:53.000Z | 2022-01-31T13:09:49.000Z | import argparse
import io
import time
import numpy as np
import cv2
from box import BoundBox, nms_boxes, boxes_to_array, to_minmax, draw_boxes
#from tflite_runtime.interpreter import Interpreter
import tflite_runtime.interpreter as tflite
class Detector(object):
def __init__(self, label_file, model_file, threshold):
self._threshold = float(threshold)
self.labels = self.load_labels(label_file)
self.interpreter = tflite.Interpreter(model_file, experimental_delegates=[tflite.load_delegate('libedgetpu.so.1')])
self.interpreter.allocate_tensors()
_, self.input_height, self.input_width, _ = self.interpreter.get_input_details()[0]['shape']
self.tensor_index = self.interpreter.get_input_details()[0]['index']
def load_labels(self, path):
with open(path, 'r') as f:
return {i: line.strip() for i, line in enumerate(f.read().replace('"','').split(','))}
def preprocess(self, img):
img = cv2.resize(img, (self.input_width, self.input_height))
img = img.astype(np.float32)
img = img / 255.
img = img - 0.5
img = img * 2.
img = img[:, :, ::-1]
img = np.expand_dims(img, 0)
return img
def get_output_tensor(self, index):
"""Returns the output tensor at the given index."""
output_details = self.interpreter.get_output_details()[index]
tensor = np.squeeze(self.interpreter.get_tensor(output_details['index']))
return tensor
def detect_objects(self, image):
"""Returns a list of detection results, each a dictionary of object info."""
img = self.preprocess(image)
self.interpreter.set_tensor(self.tensor_index, img)
self.interpreter.invoke()
# Get all output details
raw_detections = self.get_output_tensor(0)
output_shape = [7, 7, 5, 6]
output = np.reshape(raw_detections, output_shape)
return output
def detect(self, original_image):
self.output_height, self.output_width = original_image.shape[0:2]
start_time = time.time()
results = self.detect_objects(original_image)
elapsed_ms = (time.time() - start_time) * 1000
fps = 1 / elapsed_ms*1000
print("Estimated frames per second : {0:.2f} Inference time: {1:.2f}".format(fps, elapsed_ms))
def _to_original_scale(boxes):
minmax_boxes = to_minmax(boxes)
minmax_boxes[:,0] *= self.output_width
minmax_boxes[:,2] *= self.output_width
minmax_boxes[:,1] *= self.output_height
minmax_boxes[:,3] *= self.output_height
return minmax_boxes.astype(np.int)
boxes, probs = self.run(results)
print(boxes)
if len(boxes) > 0:
boxes = _to_original_scale(boxes)
original_image = draw_boxes(original_image, boxes, probs, self.labels)
return original_image
def run(self, netout):
anchors = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]
nms_threshold=0.2
"""Convert Yolo network output to bounding box
# Args
netout : 4d-array, shape of (grid_h, grid_w, num of boxes per grid, 5 + n_classes)
YOLO neural network output array
# Returns
boxes : array, shape of (N, 4)
coordinate scale is normalized [0, 1]
probs : array, shape of (N, nb_classes)
"""
grid_h, grid_w, nb_box = netout.shape[:3]
boxes = []
# decode the output by the network
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > self._threshold
for row in range(grid_h):
for col in range(grid_w):
for b in range(nb_box):
# from 4th element onwards are confidence and class classes
classes = netout[row,col,b,5:]
if np.sum(classes) > 0:
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row,col,b,:4]
x = (col + _sigmoid(x)) / grid_w # center position, unit: image width
y = (row + _sigmoid(y)) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / grid_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / grid_h # unit: image height
confidence = netout[row,col,b,4]
box = BoundBox(x, y, w, h, confidence, classes)
boxes.append(box)
boxes = nms_boxes(boxes, len(classes), nms_threshold, self._threshold)
boxes, probs = boxes_to_array(boxes)
return boxes, probs
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _softmax(x, axis=-1, t=-100.):
x = x - np.max(x)
if np.min(x) < t:
x = x/np.min(x)*t
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--model', help='File path of .tflite file.', required=True)
parser.add_argument('--labels', help='File path of labels file.', required=True)
parser.add_argument('--threshold', help='Confidence threshold.', default=0.3)
args = parser.parse_args()
detector = Detector(args.labels, args.model, args.threshold)
camera = cv2.VideoCapture(2)
while(camera.isOpened()):
ret, frame = camera.read()
image = detector.detect(frame)
if ret == True:
# Display the resulting frame
cv2.imshow('Frame', image)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
camera.release()
# Closes all the frames
cv2.destroyAllWindows()
| 37.695652 | 123 | 0.598286 |
9da91d8ec464cad5b643c09ea2e2adcff3f6ac92 | 8,500 | py | Python | examples/run_dqn.py | BetsyHJ/SOFA | a80e684b8047496dac6a164893b9e0666fa474a7 | [
"MIT"
] | 20 | 2020-07-28T09:57:26.000Z | 2022-02-13T12:42:12.000Z | examples/run_dqn.py | BetsyHJ/SOFA | a80e684b8047496dac6a164893b9e0666fa474a7 | [
"MIT"
] | null | null | null | examples/run_dqn.py | BetsyHJ/SOFA | a80e684b8047496dac6a164893b9e0666fa474a7 | [
"MIT"
] | 3 | 2021-03-24T08:33:28.000Z | 2022-01-23T03:14:22.000Z | import sys
sys.path.append('../src/')
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import numpy as np
from time import time, localtime, strftime
import configparser
# from env.env import SOFAEnv, simulated_data
from env.env_multiusers import SOFAEnv, simulated_data
from train import train
from evaluation import evaluate, eval_yahoo_sinTurn, yahoo_eval_1, yahoo_eval_1_calu_itemset
# np.random.seed(2020)
def _get_conf(conf_name):
config = configparser.ConfigParser()
config.read("../conf/"+conf_name+".properties")
conf=dict(config.items("default"))
np.random.seed(int(conf['seed']))
# # for multiple jobs in
# args = set_hparams()
# conf["data.debiasing"] = args.debiasing
# conf["seed"] = str(args.seed)
evalProcess = conf['evaluation']
if evalProcess.lower() == 'false':
if ((conf["data.input.dataset"] == 'sim4') or (conf["data.input.dataset"] == 'sim5')) and (conf["data.debiasing"] == 'GT'):
rating_file = conf["data.input.path"] + conf["data.input.dataset"] + "_GT_ratingM.ascii"
else:
rating_file = conf["data.input.path"] + conf["data.input.dataset"] + '_' + \
conf["data.gen_model"] + '_' + conf["data.debiasing"] + "_ratingM.ascii"
if conf["data.debiasing"] == 'GT':
rating_file = conf["data.input.path"] + conf["data.input.dataset"] + "_pseudoGT_ratingM.ascii"
print("we use a pseudo GT for yahoo, which is generated by MF on unbiased testset:", rating_file)
else:
if conf["data.input.dataset"].lower() == "sim4" or conf["data.input.dataset"].lower() == "sim5":
print('now evaluation process only for simulated dataset which has the groundTruth')
rating_file = conf["data.input.path"] + conf["data.input.dataset"] + "_GT_ratingM.ascii"
# rating_file = conf["data.input.path"] + conf["data.input.dataset"] + '_' + \
# conf["data.gen_model"] + '_' + conf["data.debiasing"] + "_ratingM.ascii"
elif conf["data.input.dataset"].lower() in ["yahoo", "coat"]:
rating_file = conf["data.input.path"] + conf["data.input.dataset"] + '_' + \
conf["data.gen_model"] + '_' + conf["data.debiasing"] + "_ratingM.ascii" # this simulator is not for evaluation directly, but for several interaction to generate states
# solution-2 with pseudo GT
rating_file = conf["data.input.path"] + conf["data.input.dataset"] + "_pseudoGT_ratingM.ascii"
print("we use a pseudo GT for yahoo, which is generated by MF on unbiased testset:", rating_file)
else:
print("check data")
conf["RATING_TYPE"] = conf["rating_type"]
# # conf['RATINGS'] = np.clip(np.loadtxt(rating_file), 1.0, 5.0)
# # print("rating of simulator is float")
# if evalProcess.lower() == 'true':
# ratingM = np.clip(np.round(np.loadtxt(rating_file)).astype('int'), 1, 5)
# if conf["data.input.dataset"].lower() == "sim5":
# trainM = np.loadtxt('../../../sigir2020/data/simulated5/train.ascii', dtype=int)
# conf['RATINGS'] = np.where(trainM>0, 1, ratingM)
# else:
# print("non for other dataset, only for sim5")
# exit(0)
# else:
# conf["RATINGS"] = np.clip(np.round(np.loadtxt(rating_file)).astype('int'), 1, 5)
conf["RATINGS"] = np.clip(np.round(np.loadtxt(rating_file)).astype('int'), 1, 5)
# item_emb_file = conf["data.input.path"] + conf["data.input.dataset"] + '_' + \
# conf["data.gen_model"] + '_' + conf["data.debiasing"] + "_item.emb"
# conf["ITEM_VEC"] = np.loadtxt(item_emb_file)
# print(conf["ITEM_VEC"].shape, item_emb_file)
conf["EPISODE_LENGTH"] = conf["episode_length"]
conf['mode'] = conf['mode'].upper()
if conf['mode'] == 'DOUBLEDQN':
conf['mode'] = 'DoubleDQN'
return conf
def _logging_(basis_conf, params_conf):
now = localtime(time())
now = strftime("%Y-%m-%d %H:%M:%S", now)
origin_data_name = basis_conf["data.input.dataset"]
gen_model = basis_conf["data.gen_model"]
debiasing = basis_conf["data.debiasing"]
print(now + " - data:%s" % origin_data_name)
print(now + " - gen_model:%s, debiasing:%s" % (gen_model, debiasing))
print(now + " - RL Algo: %s, state_encoder: %s" % (basis_conf['mode'], params_conf['state_encoder']))
print("conf : " + str(params_conf), flush=True)
def run_dqn():
conf = _get_conf('yahoo')
# conf['RATINGS'], item_vec = simulated_data(10, 20)
# item_vec = conf["ITEM_VEC"]
sofa = SOFAEnv(conf)
action_space = sofa.num_items
num_users = sofa.num_users
# init DQN
config = load_parameters(conf['mode'])
config['STATE_MAXLENGTH'] = int(conf["episode_length"])
config['ACTION_SPACE'] = action_space
# tuning = 'learning_rate'.upper()
# tuning = 'memory_size'.upper()
# tuning = 'batch_size'.upper()
# tuning = 'gamma'.upper()
# tuning = 'optimizer'.upper()
# tuning = 'replace_targetnet'.upper()
# tuning = 'epsilon_decay_step'
# tuning = 'lr_decay_step'
# tuning = "state_encoder"
# tuning = 'action_dim'.upper()
# tuning = 'RNN_STATE_DIM'
# print("tuning:",tuning)
config['SAVE_MODEL_FILE'] = conf["data.input.dataset"] + '_' + \
conf["data.gen_model"] + '_' + conf["data.debiasing"] + '_' + \
conf['mode'] + '_' + config["state_encoder"] + '_' + 'r-12_SmoothL1_' + 'nohuman_' + "seed" + conf["seed"] + '_'#+ tuning + str(config[tuning]) + '_' \
#+ "actiondim500_" #"rnn_state_10_"
# 'episode20_' or 'nohuman_' or 'trick1_' 'r-12_SmoothL1_'
#+ "seed" + conf["seed"] + '_'
# if conf["data.input.dataset"].lower() == 'yahoo':
# config['SAVE_MODEL_FILE'] += 'le-1_'
# 'testuser_'
# config['SAVE_MODEL_FILE'] = 'sim_random_' + str(num_users) + '_' + str(action_space) + '_' + config["state_encoder"] + '_'
_logging_(conf, config)
# config['ACTION_DIM'] = item_vec.shape[1]
# config['ACTION_FEATURE'] = item_vec
## train process
evalProcess = conf['evaluation']
if evalProcess.lower() == 'false':
train(conf, config, sofa)
else:
if conf["data.input.dataset"].lower() in ['yahoo', 'coat']:
test_file = conf["data.input.path"] + conf["data.input.dataset"] + "_test.ascii"
# eval_yahoo_sinTurn(conf, config, sofa, test_file)
yahoo_eval_1(conf, config, sofa, test_file)
# yahoo_eval_1_calu_itemset(conf, config, sofa, test_file)
# # solution-2 with a pseudo GT
# evaluate(conf, config, sofa, test_file)
else:
evaluate(conf, config, sofa)
def load_parameters(mode):
# config = dict()
# # config['STATE_MAXLENGTH'] = 10 # also be the length of episode
# config['ACTION_DIM'] = 10
# config['RNN_STATE_DIM'] = 10
# config['MEMORY_SIZE'] = 20000
# config['GAMMA'] = 0.9 # reward decay
# # config['GAMMA'] = 0.4 # 0.4 ** 3 = 0.064, see 2-3 steps
# config['LEARNING_RATE'] = 1e-4 # 1e-4 before
# config['EPSILON'] = 0.75
# config['BATCH_SIZE'] = 512 # before it is 256
# config['REPLACE_TARGETNET'] = 100
## load from configfile
params = {}
config = configparser.ConfigParser()
config.read("../conf/"+mode+".properties")
conf=dict(config.items("hyperparameters"))
params['ACTION_DIM'] = int(conf['action_dim'])
params['MEMORY_SIZE'] = int(conf['memory_size'])
params['GAMMA'] = float(conf['gamma']) # reward decay
params['LEARNING_RATE'] = float(conf['learning_rate'])
params['EPSILON'] = float(conf['epsilon'])
params['BATCH_SIZE'] = int(conf['batch_size'])
params['REPLACE_TARGETNET'] = int(conf['replace_targetnet'])
params['OPTIMIZER'] = conf['optimizer']
params['RNN_STATE_DIM'] = int(conf['rnn_state_dim'])
params['state_encoder'] = conf['state_encoder']
params['lr_decay_step'] = conf['lr_decay_step']
params['epsilon_decay_step'] = conf['epsilon_decay_step']
return params
def set_hparams():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int)
parser.add_argument('--debiasing', type=str)
args = parser.parse_args()
print("now the seed is", args.seed, flush=True)
np.random.seed(args.seed)
return args
if __name__ == "__main__":
run_dqn()
print("End. " + strftime("%Y-%m-%d %H:%M:%S", localtime(time())))
# print("checkpoint") | 44.502618 | 180 | 0.619294 |
f008bc1f17d52ade63a8efe2bda9c206815a896c | 5,202 | py | Python | superset/tasks/async_queries.py | fx19880617/superset | 0dfb32cf08c2ec0d9a635f8dedcbf3f41aed1b35 | [
"Apache-2.0"
] | 7 | 2020-07-31T04:50:01.000Z | 2021-12-08T07:56:42.000Z | superset/tasks/async_queries.py | fx19880617/superset | 0dfb32cf08c2ec0d9a635f8dedcbf3f41aed1b35 | [
"Apache-2.0"
] | 77 | 2020-02-02T07:54:13.000Z | 2022-03-23T18:22:04.000Z | superset/tasks/async_queries.py | fx19880617/superset | 0dfb32cf08c2ec0d9a635f8dedcbf3f41aed1b35 | [
"Apache-2.0"
] | 6 | 2020-03-25T01:02:29.000Z | 2021-05-12T17:11:19.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Any, cast, Dict, Optional
from celery.exceptions import SoftTimeLimitExceeded
from flask import current_app, g
from superset import app
from superset.exceptions import SupersetVizException
from superset.extensions import (
async_query_manager,
cache_manager,
celery_app,
security_manager,
)
from superset.utils.cache import generate_cache_key, set_and_log_cache
from superset.views.utils import get_datasource_info, get_viz
logger = logging.getLogger(__name__)
query_timeout = current_app.config[
"SQLLAB_ASYNC_TIME_LIMIT_SEC"
] # TODO: new config key
def ensure_user_is_set(user_id: Optional[int]) -> None:
user_is_set = hasattr(g, "user") and g.user is not None
if not user_is_set and user_id is not None:
g.user = security_manager.get_user_by_id(user_id)
@celery_app.task(name="load_chart_data_into_cache", soft_time_limit=query_timeout)
def load_chart_data_into_cache(
job_metadata: Dict[str, Any], form_data: Dict[str, Any],
) -> None:
from superset.charts.commands.data import ChartDataCommand
with app.app_context(): # type: ignore
try:
ensure_user_is_set(job_metadata.get("user_id"))
command = ChartDataCommand()
command.set_query_context(form_data)
result = command.run(cache=True)
cache_key = result["cache_key"]
result_url = f"/api/v1/chart/data/{cache_key}"
async_query_manager.update_job(
job_metadata, async_query_manager.STATUS_DONE, result_url=result_url,
)
except SoftTimeLimitExceeded as exc:
logger.warning(
"A timeout occurred while loading chart data, error: %s", exc
)
raise exc
except Exception as exc:
# TODO: QueryContext should support SIP-40 style errors
error = exc.message if hasattr(exc, "message") else str(exc) # type: ignore # pylint: disable=no-member
errors = [{"message": error}]
async_query_manager.update_job(
job_metadata, async_query_manager.STATUS_ERROR, errors=errors
)
raise exc
return None
@celery_app.task(name="load_explore_json_into_cache", soft_time_limit=query_timeout)
def load_explore_json_into_cache( # pylint: disable=too-many-locals
job_metadata: Dict[str, Any],
form_data: Dict[str, Any],
response_type: Optional[str] = None,
force: bool = False,
) -> None:
with app.app_context(): # type: ignore
cache_key_prefix = "ejr-" # ejr: explore_json request
try:
ensure_user_is_set(job_metadata.get("user_id"))
datasource_id, datasource_type = get_datasource_info(None, None, form_data)
viz_obj = get_viz(
datasource_type=cast(str, datasource_type),
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
# run query & cache results
payload = viz_obj.get_payload()
if viz_obj.has_error(payload):
raise SupersetVizException(errors=payload["errors"])
# cache form_data for async retrieval
cache_value = {"form_data": form_data, "response_type": response_type}
cache_key = generate_cache_key(cache_value, cache_key_prefix)
set_and_log_cache(cache_manager.cache, cache_key, cache_value)
result_url = f"/superset/explore_json/data/{cache_key}"
async_query_manager.update_job(
job_metadata, async_query_manager.STATUS_DONE, result_url=result_url,
)
except SoftTimeLimitExceeded as ex:
logger.warning(
"A timeout occurred while loading explore json, error: %s", ex
)
raise ex
except Exception as exc:
if isinstance(exc, SupersetVizException):
errors = exc.errors # pylint: disable=no-member
else:
error = (
exc.message if hasattr(exc, "message") else str(exc) # type: ignore # pylint: disable=no-member
)
errors = [error]
async_query_manager.update_job(
job_metadata, async_query_manager.STATUS_ERROR, errors=errors
)
raise exc
return None
| 39.112782 | 116 | 0.662438 |
20f3291534cf33a7651ff42e32740e0a210f59e6 | 2,492 | py | Python | projects/migrations/0001_initial.py | TheDim0n/ProjectManager | 50d36e7e3fc71655aa5a82bb19eacc07172ba5e4 | [
"MIT"
] | null | null | null | projects/migrations/0001_initial.py | TheDim0n/ProjectManager | 50d36e7e3fc71655aa5a82bb19eacc07172ba5e4 | [
"MIT"
] | 1 | 2020-09-08T11:10:53.000Z | 2020-09-08T11:10:53.000Z | projects/migrations/0001_initial.py | TheDim0n/ProjectManager | 50d36e7e3fc71655aa5a82bb19eacc07172ba5e4 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-08-10 14:20
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
('status', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('start_date', models.DateField(default=datetime.datetime(2020, 8, 10, 14, 20, 49, 998643, tzinfo=utc))),
('finish_date', models.DateField(default=datetime.datetime(2020, 8, 11, 14, 20, 49, 998643, tzinfo=utc))),
('description', models.TextField(blank=True, max_length=1000)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('status', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='status.Status')),
],
),
migrations.CreateModel(
name='Level',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('start_date', models.DateField(default=datetime.datetime(2020, 8, 10, 14, 20, 49, 999638, tzinfo=utc))),
('finish_date', models.DateField(default=datetime.datetime(2020, 8, 11, 14, 20, 49, 999638, tzinfo=utc))),
('is_zero', models.BooleanField(blank=True, default=False)),
('description', models.TextField(blank=True, max_length=1000)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='levels', to='projects.Project')),
('root_level', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='levels', to='projects.Level')),
('status', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='status.Status')),
],
),
]
| 51.916667 | 150 | 0.634831 |
cdafbaa76eb5fe331b4cf5d0b43379de78f85066 | 1,728 | py | Python | dephell/networking.py | OliverHofkens/dephell | 6303f416018910668f1635b70cd828a2fd2b2d9e | [
"MIT"
] | null | null | null | dephell/networking.py | OliverHofkens/dephell | 6303f416018910668f1635b70cd828a2fd2b2d9e | [
"MIT"
] | null | null | null | dephell/networking.py | OliverHofkens/dephell | 6303f416018910668f1635b70cd828a2fd2b2d9e | [
"MIT"
] | null | null | null | # built-in
from functools import partial, update_wrapper
from logging import getLogger
from ssl import create_default_context
from time import sleep
# external
import certifi
import requests
from aiohttp import ClientError, ClientSession, TCPConnector
# app
from . import __version__
USER_AGENT = 'DepHell/{version}'.format(version=__version__)
logger = getLogger('dephell.networking')
def aiohttp_session(*, auth=None, **kwargs):
headers = dict()
if auth:
headers['Authorization'] = auth.encode()
ssl_context = create_default_context(cafile=certifi.where())
try:
connector = TCPConnector(ssl=ssl_context)
except TypeError:
connector = TCPConnector(ssl_context=ssl_context)
return ClientSession(headers=headers, connector=connector, **kwargs)
def requests_session(*, auth=None, headers=None, **kwargs):
session = requests.Session()
if auth:
session.auth = auth
if headers is None:
headers = dict()
headers.setdefault('User-Agent', USER_AGENT)
session.headers = headers
if kwargs:
session.__dict__.update(kwargs)
return session
def aiohttp_repeat(func=None, *, count: int = 4):
if func is None:
return partial(func, count=count)
async def wrapper(*args, **kwargs):
for pause in range(1, count + 1):
try:
return await func(*args, **kwargs)
except ClientError:
if pause == count:
raise
logger.debug('aiohttp payload error, repeating...', exc_info=True)
sleep(pause)
raise RuntimeError('unreachable')
wrapper = update_wrapper(wrapper=wrapper, wrapped=func)
return wrapper
| 27.870968 | 82 | 0.665509 |
4dca4f6577a24e547b9562717c8ec9f3496b161b | 9,407 | py | Python | docs/examples/use_cases/tensorflow/resnet-n/nvutils/hvd_patch.py | cyyever/DALI | e2b2d5a061da605e3e9e681017a7b2d53fe41a62 | [
"ECL-2.0",
"Apache-2.0"
] | 3,967 | 2018-06-19T04:39:09.000Z | 2022-03-31T10:57:53.000Z | docs/examples/use_cases/tensorflow/resnet-n/nvutils/hvd_patch.py | cyyever/DALI | e2b2d5a061da605e3e9e681017a7b2d53fe41a62 | [
"ECL-2.0",
"Apache-2.0"
] | 3,494 | 2018-06-21T07:09:58.000Z | 2022-03-31T19:44:51.000Z | docs/examples/use_cases/tensorflow/resnet-n/nvutils/hvd_patch.py | cyyever/DALI | e2b2d5a061da605e3e9e681017a7b2d53fe41a62 | [
"ECL-2.0",
"Apache-2.0"
] | 531 | 2018-06-19T23:53:10.000Z | 2022-03-30T08:35:59.000Z | # This is a patch for Horovod 0.21.3 to work with our custom learning schedule
# used in CNN resnet50 scripts.
from tensorflow import keras
from horovod.tensorflow import Compression
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
import horovod.tensorflow as hvd
import tensorflow as tf
from nvutils import common
from distutils.version import LooseVersion
from horovod.tensorflow import Average, Compression, Sum
_PRE_TF_2_4_0 = LooseVersion(tf.__version__) < LooseVersion('2.4.0')
def create_distributed_optimizer(
keras, optimizer, name, device_dense, device_sparse, compression,
sparse_as_dense, gradient_predivide_factor, op,
backward_passes_per_step=1, average_aggregated_gradients=False,
groups=None):
class _DistributedOptimizer(keras.optimizers.Optimizer):
_HAS_AGGREGATE_GRAD = True
def __init__(self, **kwargs):
self._name = name or "Distributed%s" % self.__class__.__base__.__name__
self._aggregated_gradients = False
self._allreduce_grads = hvd._make_allreduce_grads_fn(
self._name,
device_dense,
device_sparse,
compression,
sparse_as_dense,
op,
gradient_predivide_factor,
groups)
self._agg_helper = None
if backward_passes_per_step > 1:
if hvd._executing_eagerly():
self._agg_helper = LocalGradientAggregationHelperEager(
backward_passes_per_step=backward_passes_per_step,
allreduce_func=self._allreduce_grads,
sparse_as_dense=sparse_as_dense,
average_aggregated_gradients=average_aggregated_gradients,
)
else:
self._agg_helper = LocalGradientAggregationHelper(
backward_passes_per_step=backward_passes_per_step,
allreduce_func=self._allreduce_grads,
sparse_as_dense=sparse_as_dense,
average_aggregated_gradients=average_aggregated_gradients,
rank=rank(),
optimizer_type=(
LocalGradientAggregationHelper._OPTIMIZER_TYPE_KERAS),
)
super(self.__class__, self).__init__(**kwargs)
def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):
"""
Compute gradients of all trainable variables.
See Optimizer.get_gradients() for more info.
In DistributedOptimizer, get_gradients() is overriden to also
allreduce the gradients before returning them.
"""
if _PRE_TF_2_4_0:
return super(self.__class__, self)._compute_gradients(
loss, var_list, grad_loss, tape)
tape = backprop.GradientTape() if tape is None else tape
grads_and_vars = super(self.__class__, self)._compute_gradients(
# pylint: disable=protected-access
loss,
var_list,
grad_loss,
tape=tape)
grads, weights = list(zip(*grads_and_vars))
allreduced_grads = self._allreduce(grads, weights)
return list(zip(allreduced_grads, weights))
def get_gradients(self, loss, params):
"""
Compute gradients of all trainable variables.
See Optimizer.get_gradients() for more info.
In DistributedOptimizer, get_gradients() is overriden to also
allreduce the gradients before returning them.
"""
gradients = super(self.__class__, self).get_gradients(loss, params)
return self._allreduce(gradients, params)
def _aggregate_gradients(self, grads_and_vars):
if _PRE_TF_2_4_0:
grads, vars = list(zip(*grads_and_vars))
aggregated_grads = self._allreduce(grads, vars)
return aggregated_grads
else:
return super(self.__class__, self)._aggregate_gradients(
grads_and_vars)
def _allreduce(self, grads, vars):
self._aggregated_gradients = True
if self._agg_helper:
return self._agg_helper.compute_gradients(tuple(grads), tuple(vars))
else:
return self._allreduce_grads(grads, vars)
def apply_gradients(self, *args, **kwargs):
if self._agg_helper:
if isinstance(args[0], zip):
# If grad_and_vars are passed in as a zip object
# convert to a list. This is necessary for TF2.4+
# b/c args[0] is used in both conditional branches
# inside _agg_helper.apply_gradients().
args = list(args)
args[0] = list(args[0])
args = tuple(args)
results = self._agg_helper.apply_gradients(
lambda: super(self.__class__, self).apply_gradients(*args, **kwargs),
self,
*args,
**kwargs,
)
else:
results = super(self.__class__, self).apply_gradients(*args, **kwargs)
if _PRE_TF_2_4_0 and not self._aggregated_gradients:
raise Exception('`apply_gradients()` was called without a call to '
'`get_gradients()` or `_aggregate_gradients`. If '
'you\'re using TensorFlow 2.0, please specify '
'`experimental_run_tf_function=False` in `compile()`.')
return results
# We dynamically create a new class that inherits from the optimizer that was
# passed in. The goal is to override get_gradients() method with an allreduce
# implementation. This class will have the same name as the optimizer it's
# wrapping, so that the saved model could be easily restored without Horovod.
cls = type(optimizer.__class__.__name__, (optimizer.__class__,),
dict(_DistributedOptimizer.__dict__))
# This is the patch to allow the hovorod DistributedOptimizer recognize the
# custom learning rate schedule we have used in CNN resnet50 scripts.
config = optimizer.get_config()
config['learning_rate'] = \
common.PiecewiseConstantDecayWithWarmup.from_config(
config['learning_rate']['config'])
return cls.from_config(config)
def DistributedOptimizer(optimizer, name=None,
device_dense='', device_sparse='',
compression=Compression.none,
sparse_as_dense=False,
gradient_predivide_factor=1.0,
op=Average,
backward_passes_per_step=1,
average_aggregated_gradients=False):
"""
An optimizer that wraps another keras.optimizers.Optimizer, using an allreduce
to average gradient values before applying gradients to model weights.
Args:
optimizer: Optimizer to use for computing gradients and applying updates.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Distributed" followed by the provided
optimizer type.
device_dense: Device to be used for dense tensors. Uses GPU by default
if Horovod was build with HOROVOD_GPU_OPERATIONS.
device_sparse: Device to be used for sparse tensors. Uses GPU by default
if Horovod was build with HOROVOD_GPU_OPERATIONS.
compression: Compression algorithm used to reduce the amount of data
sent and received by each worker node. Defaults to not
using compression.
sparse_as_dense: Treat all sparse gradients as dense tensors. This can
help improve performance and memory utilization if
the original sparse gradient has high density.
Defaults to false.
gradient_predivide_factor: gradient_predivide_factor splits the averaging
before and after the sum. Gradients are scaled
by 1.0 / gradient_predivide_factor before the
sum and gradient_predivide_factor / size after
the sum.
op: The reduction operation to use when combining gradients across
different ranks. Defaults to Average.
backward_passes_per_step: Number of backward passes to perform before
calling hvd.allreduce. This allows accumulating
updates over multiple mini-batches before
reducing and applying them.
average_aggregated_gradients: Whether to average the aggregated gradients
that have been accumulated over multiple
mini-batches. If true divides gradient
updates by backward_passes_per_step.
Only applicable for backward_passes_per_step
> 1.
"""
if gradient_predivide_factor != 1.0 and rocm_built():
raise ValueError('gradient_predivide_factor not supported yet with ROCm')
if op != Average and op != Sum:
raise ValueError('op currently only supports Average and Sum')
return create_distributed_optimizer(
keras=keras,
optimizer=optimizer,
name=name,
device_dense=device_dense,
device_sparse=device_sparse,
compression=compression,
sparse_as_dense=sparse_as_dense,
gradient_predivide_factor=gradient_predivide_factor,
op=op,
backward_passes_per_step=backward_passes_per_step,
average_aggregated_gradients=average_aggregated_gradients,
)
| 43.151376 | 81 | 0.65515 |
46c06c984e87ca8196d117ce3501059054f4b852 | 147 | py | Python | reddit2telegram/channels/r_fallguysgame/app.py | mainyordle/reddit2telegram | 1163e15aed3b6ff0fba65b222d3d9798f644c386 | [
"MIT"
] | 187 | 2016-09-20T09:15:54.000Z | 2022-03-29T12:22:33.000Z | reddit2telegram/channels/r_fallguysgame/app.py | mainyordle/reddit2telegram | 1163e15aed3b6ff0fba65b222d3d9798f644c386 | [
"MIT"
] | 84 | 2016-09-22T14:25:07.000Z | 2022-03-19T01:26:17.000Z | reddit2telegram/channels/r_fallguysgame/app.py | mainyordle/reddit2telegram | 1163e15aed3b6ff0fba65b222d3d9798f644c386 | [
"MIT"
] | 172 | 2016-09-21T15:39:39.000Z | 2022-03-16T15:15:58.000Z | #encoding:utf-8
subreddit = 'FallGuysGame'
t_channel = '@r_FallGuysGame'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| 16.333333 | 38 | 0.755102 |
257ec9bd188bbbfc734b5030ec50c1a76a5b5936 | 9,262 | py | Python | tests/test_data.py | mlfpm/deepof | 634ef2856d278f0f34f9c38346a28a40410571f8 | [
"MIT"
] | 4 | 2020-09-23T12:35:08.000Z | 2022-02-21T07:40:45.000Z | tests/test_data.py | mlfpm/deepof | 634ef2856d278f0f34f9c38346a28a40410571f8 | [
"MIT"
] | null | null | null | tests/test_data.py | mlfpm/deepof | 634ef2856d278f0f34f9c38346a28a40410571f8 | [
"MIT"
] | null | null | null | # @author lucasmiranda42
# encoding: utf-8
# module deepof
"""
Testing module for deepof.preprocess
"""
from hypothesis import given
from hypothesis import settings
from hypothesis import strategies as st
from collections import defaultdict
import deepof.utils
import deepof.data
import matplotlib.figure
import numpy as np
import os
import pytest
@settings(deadline=None)
@given(
table_type=st.integers(min_value=0, max_value=1),
arena_type=st.integers(min_value=0, max_value=1),
)
def test_project_init(table_type, arena_type):
table_type = [".h5", ".csv"][table_type]
arena_type = ["circular", "foo"][arena_type]
if arena_type == "foo":
with pytest.raises(NotImplementedError):
prun = deepof.data.Project(
path=os.path.join(".", "tests", "test_examples", "test_single_topview"),
arena=arena_type,
arena_dims=tuple([380]),
video_format=".mp4",
table_format=table_type,
).run()
else:
prun = deepof.data.Project(
path=os.path.join(".", "tests", "test_examples", "test_single_topview"),
arena=arena_type,
arena_dims=tuple([380]),
video_format=".mp4",
table_format=table_type,
)
if table_type != ".foo" and arena_type != "foo":
assert isinstance(prun, deepof.data.Project)
assert isinstance(prun.load_tables(verbose=True), tuple)
def test_project_properties():
prun = deepof.data.Project(
path=os.path.join(".", "tests", "test_examples", "test_single_topview"),
arena="circular",
arena_dims=tuple([380]),
video_format=".mp4",
table_format=".h5",
)
assert prun.subset_condition is None
prun.subset_condition = "testing"
assert prun.subset_condition == "testing"
assert prun.distances == "all"
prun.distances = "testing"
assert prun.distances == "testing"
assert not prun.ego
prun.ego = "testing"
assert prun.ego == "testing"
assert prun.angles
prun.angles = False
assert not prun.angles
@settings(deadline=None)
@given(
nodes=st.integers(min_value=0, max_value=1),
ego=st.integers(min_value=0, max_value=2),
)
def test_get_distances(nodes, ego):
nodes = ["all", ["Center", "Nose", "Tail_base"]][nodes]
ego = [False, "Center", "Nose"][ego]
prun = deepof.data.Project(
path=os.path.join(".", "tests", "test_examples", "test_single_topview"),
arena="circular",
arena_dims=tuple([380]),
video_format=".mp4",
table_format=".h5",
)
tables, _ = prun.load_tables(verbose=True)
prun.scales, prun.arena_params, prun.video_resolution = prun.get_arena(
tables=tables
)
prun.distances = nodes
prun.ego = ego
prun = prun.get_distances(prun.load_tables()[0], verbose=True)
assert isinstance(prun, dict)
@settings(deadline=None)
@given(
nodes=st.integers(min_value=0, max_value=1),
ego=st.integers(min_value=0, max_value=2),
)
def test_get_angles(nodes, ego):
nodes = ["all", ["Center", "Nose", "Tail_base"]][nodes]
ego = [False, "Center", "Nose"][ego]
prun = deepof.data.Project(
path=os.path.join(".", "tests", "test_examples", "test_single_topview"),
arena="circular",
arena_dims=tuple([380]),
video_format=".mp4",
table_format=".h5",
)
prun.distances = nodes
prun.ego = ego
prun = prun.get_angles(prun.load_tables()[0], verbose=True)
assert isinstance(prun, dict)
@settings(deadline=None)
@given(
nodes=st.integers(min_value=0, max_value=1),
ego=st.integers(min_value=0, max_value=2),
)
def test_run(nodes, ego):
nodes = ["all", ["Center", "Nose", "Tail_base"]][nodes]
ego = [False, "Center", "Nose"][ego]
prun = deepof.data.Project(
path=os.path.join(".", "tests", "test_examples", "test_single_topview"),
arena="circular",
arena_dims=tuple([380]),
video_format=".mp4",
table_format=".h5",
)
prun.distances = nodes
prun.ego = ego
prun = prun.run(verbose=True)
assert isinstance(prun, deepof.data.Coordinates)
def test_get_rule_based_annotation():
prun = deepof.data.Project(
path=os.path.join(".", "tests", "test_examples", "test_single_topview"),
arena="circular",
arena_dims=tuple([380]),
video_format=".mp4",
table_format=".h5",
).run()
prun = prun.supervised_annotation()
assert isinstance(prun, deepof.data.TableDict)
assert prun._type == "rule-based"
@settings(max_examples=10, deadline=None)
@given(
nodes=st.integers(min_value=0, max_value=1),
mode=st.one_of(
st.just("single"), st.just("single")
), # TODO: every setting should be transferrable to the multi-mice setting
ego=st.integers(min_value=0, max_value=2),
exclude=st.one_of(st.just(tuple([""])), st.just(["Tail_tip"])),
sampler=st.data(),
)
def test_get_table_dicts(nodes, mode, ego, exclude, sampler):
nodes = ["all", ["Center", "Nose", "Tail_base"]][nodes]
ego = [False, "Center", "Nose"][ego]
prun = deepof.data.Project(
path=os.path.join(
".", "tests", "test_examples", "test_{}_topview".format(mode)
),
arena="circular",
arena_dims=380,
video_format=".mp4",
animal_ids=(["B", "W"] if mode == "multi" else [""]),
table_format=".h5",
exclude_bodyparts=exclude,
exp_conditions={"test": "test_cond"},
)
prun.distances = nodes
prun.ego = ego
prun = prun.run(verbose=False)
algn = sampler.draw(st.one_of(st.just(False), st.just("Nose")))
inplace = sampler.draw(st.booleans())
polar = st.one_of(st.just(True), st.just(False))
speed = sampler.draw(st.integers(min_value=0, max_value=5))
propagate = sampler.draw(st.booleans())
propagate_annots = sampler.draw(
st.one_of(st.just(prun.supervised_annotation()), st.just(False))
)
coords = prun.get_coords(
center=sampler.draw(st.one_of(st.just("arena"), st.just("Center"))),
polar=polar,
align=algn,
align_inplace=inplace,
propagate_labels=propagate,
propagate_annotations=propagate_annots,
)
speeds = prun.get_coords(
center=sampler.draw(st.one_of(st.just("arena"), st.just("Center"))),
polar=sampler.draw(st.booleans()),
speed=speed,
propagate_labels=propagate,
propagate_annotations=propagate_annots,
)
distances = prun.get_distances(
speed=sampler.draw(st.integers(min_value=0, max_value=5)),
propagate_labels=propagate,
propagate_annotations=propagate_annots,
)
angles = prun.get_angles(
degrees=sampler.draw(st.booleans()),
speed=sampler.draw(st.integers(min_value=0, max_value=5)),
propagate_labels=propagate,
propagate_annotations=propagate_annots,
)
# deepof.coordinates testing
assert isinstance(coords, deepof.data.TableDict)
assert isinstance(speeds, deepof.data.TableDict)
assert isinstance(distances, deepof.data.TableDict)
assert isinstance(angles, deepof.data.TableDict)
assert isinstance(prun.get_videos(), list)
assert prun.get_exp_conditions is not None
assert isinstance(prun.get_quality(), defaultdict)
assert isinstance(prun.get_arenas, tuple)
# deepof.table_dict testing
table = sampler.draw(
st.one_of(
st.just(coords), st.just(speeds), st.just(distances), st.just(angles)
),
st.just(deepof.data.merge_tables(coords, speeds, distances, angles)),
)
assert table.filter_videos(["test"]) == table
tset = table.get_training_set(
test_videos=sampler.draw(st.integers(min_value=0, max_value=len(table) - 1))
)
assert len(tset) == 4
assert isinstance(tset[0], np.ndarray)
if table._type == "coords" and algn == "Nose" and polar is False and speed == 0:
assert isinstance(
table.plot_heatmaps(bodyparts=["Spine_1"]), matplotlib.figure.Figure
)
align = sampler.draw(
st.one_of(st.just(False), st.just("all"), st.just("center"))
)
else:
align = False
prep = table.preprocess(
window_size=11,
window_step=1,
scale=sampler.draw(st.one_of(st.just("standard"), st.just("minmax"))),
test_videos=sampler.draw(st.integers(min_value=0, max_value=len(table) - 1)),
verbose=True,
conv_filter=sampler.draw(st.one_of(st.just(None), st.just("gaussian"))),
sigma=sampler.draw(st.floats(min_value=0.5, max_value=5.0)),
shift=sampler.draw(st.floats(min_value=-1.0, max_value=1.0)),
shuffle=sampler.draw(st.booleans()),
align=align,
)
assert isinstance(prep[0], np.ndarray)
# deepof dimensionality reduction testing
table = deepof.data.TableDict(
dict(table, **{"test1": table["test"]}), typ=table._type
)
print(table)
assert isinstance(table.random_projection(n_components=2), tuple)
assert isinstance(table.pca(n_components=2), tuple)
assert isinstance(table.tsne(n_components=2), tuple)
| 29.685897 | 88 | 0.636472 |
a0bbfbaea9793d7d94ac4c379d7650aeda1402d2 | 3,221 | py | Python | Autocoders/Python/src/fprime_ac/generators/StartSource.py | sommercharles/fprime | f28c92e31d58e7e44bff09ad57d574ca5d5e91c7 | [
"Apache-2.0"
] | 1 | 2022-02-15T19:28:01.000Z | 2022-02-15T19:28:01.000Z | Autocoders/Python/src/fprime_ac/generators/StartSource.py | abcouwer-jpl/fprime | f28c92e31d58e7e44bff09ad57d574ca5d5e91c7 | [
"Apache-2.0"
] | 5 | 2020-07-13T16:56:33.000Z | 2020-07-23T20:38:13.000Z | Autocoders/Python/src/fprime_ac/generators/StartSource.py | abcouwer-jpl/fprime | f28c92e31d58e7e44bff09ad57d574ca5d5e91c7 | [
"Apache-2.0"
] | null | null | null | #===============================================================================
# NAME: StartSource
#
# DESCRIPTION: The StartSource class is the main entry point
# for generation of beginning and header comments code
# in code generation.
#
# USAGE: Nominally the StartSource.__call__ is called by using the
# instance name. The instance name is the function
# called with a suitable argument object containing
# all needed model information to generate the code.
#
# AUTHOR: reder
# EMAIL: reder@jpl.nasa.gov
# DATE CREATED : Feb. 5, 2013
#
# Copyright 2013, California Institute of Technology.
# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.
#===============================================================================
#
# Python standard modules
#
import logging
#import os
#import sys
#import time
#
# Python extention modules and custom interfaces
#
from fprime_ac.generators.visitors import AbstractVisitor
#
# Universal globals used within module go here.
# (DO NOT USE MANY!)
#
# Global logger init. below.
PRINT = logging.getLogger('output')
DEBUG = logging.getLogger('debug')
#
# Module class or classes go here.
class StartSource:
"""
Defines the interface concrete class implementation that drives code generation.
"""
__visitor_list = []
__obj = None
def __init__(self):
"""
Constructor.
"""
self.__visitor_list = list()
def __call__(self, args):
"""
Main execution point.
Calls the accept method on each visitor to generate the code.
"""
# Note that name handling for params goes
# here so that the visitor in accept can
# process all.
self.__obj = args
for v in self.__visitor_list:
self.accept(v)
def accept(self, visitor):
"""
The operation in Visitor design pattern that takes a visitor as an argument
and calls the visitor's method that corresponds to this elememnt.
@raise Exception: if the given visitor is not a subclass of AbstractVisitor
"""
# visitor should be extended from the AbstractVisitor class
if issubclass(visitor.__class__, AbstractVisitor.AbstractVisitor):
visitor.startSourceFilesVisit(self.__obj)
else:
DEBUG.error('StartSource.accept() - the given visitor is not a subclass of AbstractVisitor!')
raise Exception('StartSource.accept() - the given visitor is not a subclass of AbstractVisitor!')
def addVisitor(self, visitor):
"""
Add a visitor to the list of vistors.
@param visitor: the visitor to add, must be derived from AbstractVisitor.
"""
if issubclass(visitor.__class__, AbstractVisitor.AbstractVisitor):
self.__visitor_list.append(visitor)
else:
DEBUG.error('StartSource.addVisitor(v) - the given visitor is not a subclass of AbstractVisitor!')
raise Exception('StartSource.addVisitor(v) - the given visitor is not a subclass of AbstractVisitor!')
def getObj(self):
"""
Return the object to the visitor.
"""
return self.__obj
| 32.535354 | 114 | 0.632723 |
6bdfd24de0d344e30d7d77b05c14e6175604549e | 6,795 | py | Python | goji/full_node/mempool_check_conditions.py | zcomputerwiz/gojiv2-blockchain | 3be896d4dcb48a734f8d2a901ab5648201fbd4d7 | [
"Apache-2.0"
] | 2 | 2022-02-09T04:30:19.000Z | 2022-03-19T14:01:43.000Z | goji/full_node/mempool_check_conditions.py | zcomputerwiz/goji-blockchain | 3be896d4dcb48a734f8d2a901ab5648201fbd4d7 | [
"Apache-2.0"
] | 1 | 2021-12-30T09:17:47.000Z | 2021-12-30T09:17:47.000Z | goji/full_node/mempool_check_conditions.py | zcomputerwiz/gojiv2-blockchain | 3be896d4dcb48a734f8d2a901ab5648201fbd4d7 | [
"Apache-2.0"
] | 1 | 2022-03-15T08:42:52.000Z | 2022-03-15T08:42:52.000Z | import logging
import time
from typing import Dict, List, Optional
from clvm_rs import STRICT_MODE
from goji.consensus.cost_calculator import NPCResult
from goji.full_node.generator import create_generator_args, setup_generator_args
from goji.types.blockchain_format.program import NIL
from goji.types.coin_record import CoinRecord
from goji.types.condition_with_args import ConditionWithArgs
from goji.types.generator_types import BlockGenerator
from goji.types.name_puzzle_condition import NPC
from goji.util.clvm import int_from_bytes
from goji.util.condition_tools import ConditionOpcode
from goji.util.errors import Err
from goji.util.ints import uint32, uint64, uint16
from goji.wallet.puzzles.generator_loader import GENERATOR_FOR_SINGLE_COIN_MOD
from goji.wallet.puzzles.rom_bootstrap_generator import get_generator
GENERATOR_MOD = get_generator()
log = logging.getLogger(__name__)
def mempool_assert_absolute_block_height_exceeds(
condition: ConditionWithArgs, prev_transaction_block_height: uint32
) -> Optional[Err]:
"""
Checks if the next block index exceeds the block index from the condition
"""
try:
block_index_exceeds_this = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
if prev_transaction_block_height < block_index_exceeds_this:
return Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
return None
def mempool_assert_relative_block_height_exceeds(
condition: ConditionWithArgs, unspent: CoinRecord, prev_transaction_block_height: uint32
) -> Optional[Err]:
"""
Checks if the coin age exceeds the age from the condition
"""
try:
expected_block_age = int_from_bytes(condition.vars[0])
block_index_exceeds_this = expected_block_age + unspent.confirmed_block_index
except ValueError:
return Err.INVALID_CONDITION
if prev_transaction_block_height < block_index_exceeds_this:
return Err.ASSERT_HEIGHT_RELATIVE_FAILED
return None
def mempool_assert_absolute_time_exceeds(condition: ConditionWithArgs, timestamp: uint64) -> Optional[Err]:
"""
Check if the current time in seconds exceeds the time specified by condition
"""
try:
expected_seconds = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
if timestamp is None:
timestamp = uint64(int(time.time()))
if timestamp < expected_seconds:
return Err.ASSERT_SECONDS_ABSOLUTE_FAILED
return None
def mempool_assert_relative_time_exceeds(
condition: ConditionWithArgs, unspent: CoinRecord, timestamp: uint64
) -> Optional[Err]:
"""
Check if the current time in seconds exceeds the time specified by condition
"""
try:
expected_seconds = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
if timestamp is None:
timestamp = uint64(int(time.time()))
if timestamp < expected_seconds + unspent.timestamp:
return Err.ASSERT_SECONDS_RELATIVE_FAILED
return None
def get_name_puzzle_conditions(
generator: BlockGenerator, max_cost: int, *, cost_per_byte: int, safe_mode: bool
) -> NPCResult:
block_program, block_program_args = setup_generator_args(generator)
max_cost -= len(bytes(generator.program)) * cost_per_byte
if max_cost < 0:
return NPCResult(uint16(Err.INVALID_BLOCK_COST.value), [], uint64(0))
flags = STRICT_MODE if safe_mode else 0
try:
err, result, clvm_cost = GENERATOR_MOD.run_as_generator(max_cost, flags, block_program, block_program_args)
if err is not None:
return NPCResult(uint16(err), [], uint64(0))
else:
npc_list = []
for r in result:
conditions = []
for c in r.conditions:
cwa = []
for cond_list in c[1]:
cwa.append(ConditionWithArgs(ConditionOpcode(bytes([cond_list.opcode])), cond_list.vars))
conditions.append((ConditionOpcode(bytes([c[0]])), cwa))
npc_list.append(NPC(r.coin_name, r.puzzle_hash, conditions))
return NPCResult(None, npc_list, uint64(clvm_cost))
except BaseException as e:
log.debug(f"get_name_puzzle_condition failed: {e}")
return NPCResult(uint16(Err.GENERATOR_RUNTIME_ERROR.value), [], uint64(0))
def get_puzzle_and_solution_for_coin(generator: BlockGenerator, coin_name: bytes, max_cost: int):
try:
block_program = generator.program
if not generator.generator_args:
block_program_args = [NIL]
else:
block_program_args = create_generator_args(generator.generator_refs())
cost, result = GENERATOR_FOR_SINGLE_COIN_MOD.run_with_cost(
max_cost, block_program, block_program_args, coin_name
)
puzzle = result.first()
solution = result.rest().first()
return None, puzzle, solution
except Exception as e:
return e, None, None
def mempool_check_conditions_dict(
unspent: CoinRecord,
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],
prev_transaction_block_height: uint32,
timestamp: uint64,
) -> Optional[Err]:
"""
Check all conditions against current state.
"""
for con_list in conditions_dict.values():
cvp: ConditionWithArgs
for cvp in con_list:
error: Optional[Err] = None
if cvp.opcode is ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE:
error = mempool_assert_absolute_block_height_exceeds(cvp, prev_transaction_block_height)
elif cvp.opcode is ConditionOpcode.ASSERT_HEIGHT_RELATIVE:
error = mempool_assert_relative_block_height_exceeds(cvp, unspent, prev_transaction_block_height)
elif cvp.opcode is ConditionOpcode.ASSERT_SECONDS_ABSOLUTE:
error = mempool_assert_absolute_time_exceeds(cvp, timestamp)
elif cvp.opcode is ConditionOpcode.ASSERT_SECONDS_RELATIVE:
error = mempool_assert_relative_time_exceeds(cvp, unspent, timestamp)
elif cvp.opcode is ConditionOpcode.ASSERT_MY_COIN_ID:
assert False
elif cvp.opcode is ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT:
assert False
elif cvp.opcode is ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT:
assert False
elif cvp.opcode is ConditionOpcode.ASSERT_MY_PARENT_ID:
assert False
elif cvp.opcode is ConditionOpcode.ASSERT_MY_PUZZLEHASH:
assert False
elif cvp.opcode is ConditionOpcode.ASSERT_MY_AMOUNT:
assert False
if error:
return error
return None
| 38.828571 | 115 | 0.707138 |
a45d04b8b5ec815dd42897f97a66ec573788249f | 4,981 | py | Python | venv1/Lib/site-packages/tensorflow/python/tools/strip_unused_lib.py | Soum-Soum/Tensorflow_Face_Finder | fec6c15d2df7012608511ad87f4b55731bf99478 | [
"Apache-2.0",
"MIT"
] | 6 | 2022-02-04T18:12:24.000Z | 2022-03-21T23:57:12.000Z | Lib/site-packages/tensorflow/python/tools/strip_unused_lib.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | 1 | 2021-05-20T00:58:04.000Z | 2021-05-20T00:58:04.000Z | Lib/site-packages/tensorflow/python/tools/strip_unused_lib.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | 1 | 2022-02-08T03:53:23.000Z | 2022-02-08T03:53:23.000Z | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to remove unneeded nodes from a GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
def strip_unused(input_graph_def, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a GraphDef.
Args:
input_graph_def: A graph with nodes we want to prune.
input_node_names: A list of the nodes we use as inputs.
output_node_names: A list of the output nodes.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
Returns:
A `GraphDef` with all unnecessary ops removed.
Raises:
ValueError: If any element in `input_node_names` refers to a tensor instead
of an operation.
KeyError: If any element in `input_node_names` is not found in the graph.
"""
for name in input_node_names:
if ":" in name:
raise ValueError("Name '%s' appears to refer to a Tensor, "
"not a Operation." % name)
# Here we replace the nodes we're going to override as inputs with
# placeholders so that any unused nodes that are inputs to them are
# automatically stripped out by extract_sub_graph().
not_found = {name for name in input_node_names}
inputs_replaced_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in input_node_names:
not_found.remove(node.name)
placeholder_node = node_def_pb2.NodeDef()
placeholder_node.op = "Placeholder"
placeholder_node.name = node.name
if isinstance(placeholder_type_enum, list):
input_node_index = input_node_names.index(node.name)
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum[
input_node_index]))
else:
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum))
if "_output_shapes" in node.attr:
placeholder_node.attr["_output_shapes"].CopyFrom(node.attr[
"_output_shapes"])
inputs_replaced_graph_def.node.extend([placeholder_node])
else:
inputs_replaced_graph_def.node.extend([copy.deepcopy(node)])
if not_found:
raise KeyError("The following input nodes were not found: %s\n" % not_found)
output_graph_def = graph_util.extract_sub_graph(inputs_replaced_graph_def,
output_node_names)
return output_graph_def
def strip_unused_from_files(input_graph, input_binary, output_graph,
output_binary, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a graph file."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
output_graph_def = strip_unused(input_graph_def,
input_node_names.split(","),
output_node_names.split(","),
placeholder_type_enum)
if output_binary:
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
else:
with gfile.GFile(output_graph, "w") as f:
f.write(text_format.MessageToString(output_graph_def))
print("%d ops in the final graph." % len(output_graph_def.node))
| 40.495935 | 81 | 0.678579 |
d6d8489a24cdd946d5ba543063f1cf3ea528ac45 | 766 | py | Python | alipay/aop/api/response/AlipayFundTransInvoiceSyncdataModifyResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/response/AlipayFundTransInvoiceSyncdataModifyResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/response/AlipayFundTransInvoiceSyncdataModifyResponse.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayFundTransInvoiceSyncdataModifyResponse(AlipayResponse):
def __init__(self):
super(AlipayFundTransInvoiceSyncdataModifyResponse, self).__init__()
self._invoice_url = None
@property
def invoice_url(self):
return self._invoice_url
@invoice_url.setter
def invoice_url(self, value):
self._invoice_url = value
def parse_response_content(self, response_content):
response = super(AlipayFundTransInvoiceSyncdataModifyResponse, self).parse_response_content(response_content)
if 'invoice_url' in response:
self.invoice_url = response['invoice_url']
| 29.461538 | 117 | 0.733681 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.