hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
163c2dddfef4d93c8c4b83f43f67bab6d45d5c16 | 12,855 | py | Python | tests/core/contracts/test_contract_transact_interface.py | CPChain/fusion | 63b6913010e8e5b296a1900c59592c8fd1802c2e | [
"MIT"
] | 5 | 2018-12-19T02:37:18.000Z | 2022-01-26T02:52:50.000Z | tests/core/contracts/test_contract_transact_interface.py | CPChain/fusion | 63b6913010e8e5b296a1900c59592c8fd1802c2e | [
"MIT"
] | null | null | null | tests/core/contracts/test_contract_transact_interface.py | CPChain/fusion | 63b6913010e8e5b296a1900c59592c8fd1802c2e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from eth_utils import (
to_bytes,
)
from cpc_fusion._utils.empty import (
empty,
)
from cpc_fusion.exceptions import (
ValidationError,
)
# Ignore warning in pyethereum 1.6 - will go away with the upgrade
pytestmark = pytest.mark.filterwarnings("ignore:implicit cast from 'char *'")
@pytest.fixture()
def math_contract(web3, MathContract, address_conversion_func):
deploy_txn = MathContract.constructor().transact()
deploy_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert deploy_receipt is not None
address = address_conversion_func(deploy_receipt['contractAddress'])
_math_contract = MathContract(address=address)
assert _math_contract.address == address
return _math_contract
@pytest.fixture()
def string_contract(web3, StringContract, address_conversion_func):
deploy_txn = StringContract.constructor("Caqalai").transact()
deploy_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert deploy_receipt is not None
address = address_conversion_func(deploy_receipt['contractAddress'])
_string_contract = StringContract(address=address)
assert _string_contract.address == address
return _string_contract
@pytest.fixture()
def fallback_function_contract(web3, FallballFunctionContract, address_conversion_func):
deploy_txn = FallballFunctionContract.constructor().transact()
deploy_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert deploy_receipt is not None
address = address_conversion_func(deploy_receipt['contractAddress'])
_fallback_contract = FallballFunctionContract(address=address)
assert _fallback_contract.address == address
return _fallback_contract
@pytest.fixture()
def arrays_contract(web3, ArraysContract, address_conversion_func):
# bytes_32 = [keccak('0'), keccak('1')]
bytes32_array = [
b'\x04HR\xb2\xa6p\xad\xe5@~x\xfb(c\xc5\x1d\xe9\xfc\xb9eB\xa0q\x86\xfe:\xed\xa6\xbb\x8a\x11m', # noqa: E501
b'\xc8\x9e\xfd\xaaT\xc0\xf2\x0cz\xdfa(\x82\xdf\tP\xf5\xa9Qc~\x03\x07\xcd\xcbLg/)\x8b\x8b\xc6', # noqa: E501
]
byte_arr = [b'\xff', b'\xff', b'\xff', b'\xff']
deploy_txn = ArraysContract.constructor(bytes32_array, byte_arr).transact()
deploy_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert deploy_receipt is not None
address = address_conversion_func(deploy_receipt['contractAddress'])
_arrays_contract = ArraysContract(address=address)
return _arrays_contract
@pytest.fixture()
def payable_tester_contract(web3, PayableTesterContract, address_conversion_func):
deploy_txn = PayableTesterContract.constructor().transact()
deploy_receipt = web3.eth.waitForTransactionReceipt(deploy_txn)
assert deploy_receipt is not None
address = address_conversion_func(deploy_receipt['contractAddress'])
_payable_tester = PayableTesterContract(address=address)
assert _payable_tester.address == address
return _payable_tester
def test_transacting_with_contract_no_arguments(web3, math_contract, transact, call):
initial_value = call(contract=math_contract,
contract_function='counter')
txn_hash = transact(contract=math_contract,
contract_function='increment')
txn_receipt = web3.eth.waitForTransactionReceipt(txn_hash)
assert txn_receipt is not None
final_value = call(contract=math_contract,
contract_function='counter')
assert final_value - initial_value == 1
def test_transact_not_sending_ether_to_nonpayable_function(
web3,
payable_tester_contract,
transact,
call):
initial_value = call(contract=payable_tester_contract,
contract_function='wasCalled')
assert initial_value is False
txn_hash = transact(contract=payable_tester_contract,
contract_function='doNoValueCall')
txn_receipt = web3.eth.waitForTransactionReceipt(txn_hash)
assert txn_receipt is not None
final_value = call(contract=payable_tester_contract,
contract_function='wasCalled')
assert final_value is True
def test_transact_sending_ether_to_nonpayable_function(
web3,
payable_tester_contract,
transact,
call):
initial_value = call(contract=payable_tester_contract,
contract_function='wasCalled')
assert initial_value is False
with pytest.raises(ValidationError):
txn_hash = transact(contract=payable_tester_contract,
contract_function='doNoValueCall',
tx_params={'value': 1})
txn_receipt = web3.eth.waitForTransactionReceipt(txn_hash)
assert txn_receipt is not None
final_value = call(contract=payable_tester_contract,
contract_function='wasCalled')
assert final_value is False
@pytest.mark.parametrize(
'transact_args,transact_kwargs',
(
((5,), {}),
(tuple(), {'amt': 5}),
),
)
def test_transacting_with_contract_with_arguments(web3,
math_contract,
transact,
call,
transact_args,
transact_kwargs):
initial_value = call(contract=math_contract,
contract_function='counter')
txn_hash = transact(contract=math_contract,
contract_function='increment',
func_args=transact_args,
func_kwargs=transact_kwargs)
txn_receipt = web3.eth.waitForTransactionReceipt(txn_hash)
assert txn_receipt is not None
final_value = call(contract=math_contract,
contract_function='counter')
assert final_value - initial_value == 5
def test_deploy_when_default_account_is_set(web3,
wait_for_transaction,
STRING_CONTRACT):
web3.eth.defaultAccount = web3.eth.accounts[1]
assert web3.eth.defaultAccount is not empty
StringContract = web3.eth.contract(**STRING_CONTRACT)
deploy_txn = StringContract.constructor("Caqalai").transact()
web3.eth.waitForTransactionReceipt(deploy_txn)
txn_after = web3.eth.getTransaction(deploy_txn)
assert txn_after['from'] == web3.eth.defaultAccount
def test_transact_when_default_account_is_set(web3,
wait_for_transaction,
math_contract,
transact):
web3.eth.defaultAccount = web3.eth.accounts[1]
assert web3.eth.defaultAccount is not empty
txn_hash = transact(contract=math_contract,
contract_function='increment')
wait_for_transaction(web3, txn_hash)
txn_after = web3.eth.getTransaction(txn_hash)
assert txn_after['from'] == web3.eth.defaultAccount
def test_transacting_with_contract_with_string_argument(web3, string_contract, transact, call):
# cpc_abi will pass as raw bytes, no encoding
# unless we encode ourselves
txn_hash = transact(contract=string_contract,
contract_function='setValue',
func_args=["ÄLÄMÖLÖ".encode('utf8')])
txn_receipt = web3.eth.waitForTransactionReceipt(txn_hash)
assert txn_receipt is not None
final_value = call(contract=string_contract,
contract_function='getValue')
assert final_value == "ÄLÄMÖLÖ"
def test_transacting_with_contract_with_bytes32_array_argument(web3,
arrays_contract,
transact,
call):
# new_bytes32_array = [keccak('1'), keccak('2'), keccak('3')]
new_bytes32_array = [
b'\xc8\x9e\xfd\xaaT\xc0\xf2\x0cz\xdfa(\x82\xdf\tP\xf5\xa9Qc~\x03\x07\xcd\xcbLg/)\x8b\x8b\xc6', # noqa: E501
b'\xad|[\xef\x02x\x16\xa8\x00\xda\x176DO\xb5\x8a\x80~\xf4\xc9`;xHg?~:h\xeb\x14\xa5',
b"*\x80\xe1\xef\x1dxB\xf2\x7f.k\xe0\x97+\xb7\x08\xb9\xa15\xc3\x88`\xdb\xe7<'\xc3Hl4\xf4\xde", # noqa: E501
]
txn_hash = transact(contract=arrays_contract,
contract_function="setBytes32Value",
func_args=[new_bytes32_array])
txn_receipt = web3.eth.waitForTransactionReceipt(txn_hash)
assert txn_receipt is not None
final_value = call(contract=arrays_contract,
contract_function="getBytes32Value")
assert final_value == new_bytes32_array
def test_transacting_with_contract_with_byte_array_argument(web3, arrays_contract, transact, call):
new_byte_array = [b'\x03', b'\x03', b'\x03', b'\x03', b'\x03', b'\x03']
txn_hash = transact(contract=arrays_contract,
contract_function='setByteValue',
func_args=[new_byte_array])
txn_receipt = web3.eth.waitForTransactionReceipt(txn_hash)
assert txn_receipt is not None
final_value = call(contract=arrays_contract,
contract_function='getByteValue')
assert final_value == new_byte_array
def test_transacting_with_contract_respects_explicit_gas(web3,
STRING_CONTRACT,
skip_if_testrpc,
wait_for_block,
call,
transact):
skip_if_testrpc(web3)
wait_for_block(web3)
StringContract = web3.eth.contract(**STRING_CONTRACT)
deploy_txn = StringContract.constructor("Caqalai").transact()
deploy_receipt = web3.eth.waitForTransactionReceipt(deploy_txn, 30)
assert deploy_receipt is not None
string_contract = StringContract(address=deploy_receipt['contractAddress'])
# cpc_abi will pass as raw bytes, no encoding
# unless we encode ourselves
txn_hash = transact(contract=string_contract,
contract_function='setValue',
func_args=[to_bytes(text="ÄLÄMÖLÖ")],
tx_kwargs={'gas': 200000})
txn_receipt = web3.eth.waitForTransactionReceipt(txn_hash, 30)
assert txn_receipt is not None
final_value = call(contract=string_contract,
contract_function='getValue')
assert to_bytes(text=final_value) == to_bytes(text="ÄLÄMÖLÖ")
txn = web3.eth.getTransaction(txn_hash)
assert txn['gas'] == 200000
def test_auto_gas_computation_when_transacting(web3,
STRING_CONTRACT,
skip_if_testrpc,
wait_for_block,
call,
transact):
skip_if_testrpc(web3)
wait_for_block(web3)
StringContract = web3.eth.contract(**STRING_CONTRACT)
deploy_txn = StringContract.constructor("Caqalai").transact()
deploy_receipt = web3.eth.waitForTransactionReceipt(deploy_txn, 30)
assert deploy_receipt is not None
string_contract = StringContract(address=deploy_receipt['contractAddress'])
gas_estimate = string_contract.functions.setValue(to_bytes(text="ÄLÄMÖLÖ")).estimateGas()
# cpc_abi will pass as raw bytes, no encoding
# unless we encode ourselves
txn_hash = transact(contract=string_contract,
contract_function="setValue",
func_args=[to_bytes(text="ÄLÄMÖLÖ")])
txn_receipt = web3.eth.waitForTransactionReceipt(txn_hash, 30)
assert txn_receipt is not None
final_value = call(contract=string_contract,
contract_function='getValue')
assert to_bytes(text=final_value) == to_bytes(text="ÄLÄMÖLÖ")
txn = web3.eth.getTransaction(txn_hash)
assert txn['gas'] == gas_estimate + 100000
def test_fallback_transacting_with_contract(web3, fallback_function_contract, call):
initial_value = call(contract=fallback_function_contract,
contract_function='getData')
txn_hash = fallback_function_contract.fallback.transact()
txn_receipt = web3.eth.waitForTransactionReceipt(txn_hash)
assert txn_receipt is not None
final_value = call(contract=fallback_function_contract,
contract_function='getData')
assert final_value - initial_value == 1
| 39.675926 | 116 | 0.644963 |
1c989c93ecfc3ea1e0e9e161aff9b91a3c32bf2c | 1,128 | py | Python | release/stubs.min/System/__init___parts/MethodAccessException.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | release/stubs.min/System/__init___parts/MethodAccessException.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | release/stubs.min/System/__init___parts/MethodAccessException.py | tranconbv/ironpython-stubs | a601759e6c6819beff8e6b639d18a24b7e351851 | [
"MIT"
] | null | null | null | class MethodAccessException(MemberAccessException):
"""
The exception that is thrown when there is an invalid attempt to access a method,such as accessing a private method from partially trusted code.
MethodAccessException()
MethodAccessException(message: str)
MethodAccessException(message: str,inner: Exception)
"""
def ZZZ(self):
"""hardcoded/mock instance of the class"""
return MethodAccessException()
instance=ZZZ()
"""hardcoded/returns an instance of the class"""
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,message=None,inner=None):
"""
__new__(cls: type)
__new__(cls: type,message: str)
__new__(cls: type,message: str,inner: Exception)
__new__(cls: type,info: SerializationInfo,context: StreamingContext)
"""
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
SerializeObjectState=None
| 35.25 | 215 | 0.725177 |
c4d9102688afd3208121fa78755b587d1cc285b4 | 16,796 | py | Python | jsb/lib/boot.py | NURDspace/jsonbot | 400dce602700a93db80e10707c3785a0fbdfaaeb | [
"MIT"
] | 1 | 2019-04-12T12:20:12.000Z | 2019-04-12T12:20:12.000Z | jsb/lib/boot.py | Petraea/jsonbot | 04a3a0c586a8a1a3b0e5316d10cda16df9be415b | [
"MIT"
] | null | null | null | jsb/lib/boot.py | Petraea/jsonbot | 04a3a0c586a8a1a3b0e5316d10cda16df9be415b | [
"MIT"
] | 1 | 2020-05-06T18:46:53.000Z | 2020-05-06T18:46:53.000Z | # jsb/boot.py
#
#
""" admin related data and functions. """
## jsb imports
from jsb.utils.generic import checkpermissions, isdebian, botuser
from jsb.lib.persist import Persist
from jsb.lib.aliases import savealiases
from jsb.utils.exception import handle_exception
from jsb.lib.datadir import makedirs, getdatadir
from jsb.lib.config import Config, getmainconfig
from jsb.lib.jsbimport import _import
from jsb.utils.lazydict import LazyDict
from jsb.memcached import startmcdaemon
from jsb.lib.runner import threadrunner
## basic imports
import logging
import os
import sys
import types
import copy
## paths
sys.path.insert(0, os.getcwd())
sys.path.insert(0, os.getcwd() + os.sep + '..')
## defines
plugin_packages = ['jsb.plugs.core', 'jsb.plugs.common', 'jsb.plugs.socket', 'myplugs', 'myplugs.socket', 'myplugs.common']
default_plugins = ['jsb.plugs.core.admin', 'jsb.plugs.core.dispatch', 'jsb.plugs.core.plug', 'jsb.lib.periodical']
default_deny = ["jsb.plugs.socket.fish", ]
logging.info("default plugins are %s" % str(default_plugins))
loaded = False
cmndtable = None
pluginlist = None
callbacktable = None
retable = None
cmndperms = None
shorttable = None
timestamps = None
plugwhitelist = None
plugblacklist = None
cpy = copy.deepcopy
## scandir function
def scandir(d, dbenable=False):
from jsb.lib.plugins import plugs
changed = []
try:
changed = checktimestamps(d, dbenable)
mods = []
if changed:
logging.debug("files changed %s" % str(changed))
for plugfile in changed:
if not dbenable and os.sep + 'db' in plugfile: logging.warn("db not enabled .. skipping %s" % plugfile) ; continue
return changed
except Exception, ex: logging.error("boot - can't read %s dir." % d) ; handle_exception()
if changed: logging.debug("%s files changed -=- %s" % (len(changed), str(changed)))
return changed
## boot function
def boot(ddir=None, force=False, encoding="utf-8", umask=None, saveperms=True, fast=False, clear=False, loadall=False):
""" initialize the bot. """
global plugin_packages
try:
if os.getuid() == 0:
print "don't run the bot as root"
os._exit(1)
except AttributeError: pass
logging.warn("starting!")
from jsb.lib.datadir import getdatadir, setdatadir
if ddir: setdatadir(ddir)
origdir = ddir
ddir = ddir or getdatadir()
if not ddir: logging.error("can't determine datadir to boot from") ; raise Exception("can't determine datadir")
if not ddir in sys.path: sys.path.append(ddir)
makedirs(ddir)
if os.path.isdir("/var/run/jsb") and botuser() == "jsb": rundir = "/var/run/jsb"
else: rundir = ddir + os.sep + "run"
try:
k = open(rundir + os.sep + 'jsb.pid','w')
k.write(str(os.getpid()))
k.close()
except IOError: pass
try:
reload(sys)
sys.setdefaultencoding(encoding)
except (AttributeError, IOError): handle_exception()
try:
if not umask: checkpermissions(getdatadir(), 0700)
else: checkpermissions(getdatadir(), umask)
except: handle_exception()
from jsb.lib.plugins import plugs
global loaded
global cmndtable
global retable
global pluginlist
global callbacktable
global shorttable
global cmndperms
global timestamps
global plugwhitelist
global plugblacklist
if not retable: retable = Persist(rundir + os.sep + 'retable')
if clear: retable.data = {}
if not cmndtable: cmndtable = Persist(rundir + os.sep + 'cmndtable')
if clear: cmndtable.data = {}
if not pluginlist: pluginlist = Persist(rundir + os.sep + 'pluginlist')
if clear: pluginlist.data = []
if not callbacktable: callbacktable = Persist(rundir + os.sep + 'callbacktable')
if clear: callbacktable.data = {}
if not shorttable: shorttable = Persist(rundir + os.sep + 'shorttable')
if clear: shorttable.data = {}
if not timestamps: timestamps = Persist(rundir + os.sep + 'timestamps')
#if clear: timestamps.data = {}
if not plugwhitelist: plugwhitelist = Persist(rundir + os.sep + 'plugwhitelist')
if not plugwhitelist.data: plugwhitelist.data = []
if not plugblacklist: plugblacklist = Persist(rundir + os.sep + 'plugblacklist')
if not plugblacklist.data: plugblacklist.data = []
if not cmndperms: cmndperms = Config('cmndperms', ddir=ddir)
changed = []
gotlocal = False
dosave = clear or False
maincfg = getmainconfig(ddir=ddir)
logging.warn("mainconfig used is %s" % maincfg.cfile)
if os.path.isdir('jsb'):
gotlocal = True
packages = find_packages("jsb" + os.sep + "plugs")
pluglist = [x for x in packages if not 'db' in x]
for p in pluglist:
if p not in plugin_packages: plugin_packages.append(p)
for plug in default_plugins:
plugs.reload(plug, showerror=True, force=True)
changed = scandir(getdatadir() + os.sep + 'myplugs', dbenable=maincfg.dbenable)
if changed:
logging.debug("myplugs has changed -=- %s" % str(changed))
for plugfile in changed:
if "gae" in plugfile: continue
try: plugs.reloadfile(plugfile, force=True)
except Exception, ex: handle_exception()
dosave = True
configchanges = checkconfig()
if configchanges:
logging.info("there are configuration changes: %s" % str(configchanges))
for f in configchanges:
if 'mainconfig' in f: force = True ; dosave = True
if os.path.isdir('jsb'):
corechanges = scandir("jsb" + os.sep + "plugs", dbenable=maincfg.dbenable)
if corechanges:
logging.debug("core changed -=- %s" % str(corechanges))
for plugfile in corechanges:
if not maincfg.dbenable and "db" in plugfile: continue
if "gae" in plugfile: continue
try: plugs.reloadfile(plugfile, force=True)
except Exception, ex: handle_exception()
dosave = True
if maincfg.dbenable:
if not 'jsb.plugs.db' in plugin_packages: plugin_packages.append("jsb.plugs.db")
try:
from jsb.db import getmaindb
from jsb.db.tables import tablestxt
db = getmaindb()
if db: db.define(tablestxt)
except Exception, ex: logging.warn("could not initialize database %s" % str(ex))
else:
logging.warn("db not enabled, set dbenable = 1 in %s to enable" % getmainconfig().cfile)
try: plugin_packages.remove("jsb.plugs.db")
except ValueError: pass
if force or dosave or not cmndtable.data or len(cmndtable.data) < 100:
logging.debug("using target %s" % str(plugin_packages))
plugs.loadall(plugin_packages, force=True)
savecmndtable(saveperms=saveperms)
savepluginlist()
savecallbacktable()
savealiases()
logging.warn("ready")
## filestamps stuff
def checkconfig():
changed = []
d = getdatadir() + os.sep + "config"
for f in os.listdir(d):
if os.path.isdir(d + os.sep + f):
dname = d + os.sep + f
changed.extend(checktimestamps(d + os.sep + f))
continue
m = d + os.sep + f
if os.path.isdir(m): continue
if "__init__" in f: continue
global timestamps
try:
t = os.path.getmtime(m)
if t > timestamps.data[m]: changed.append(m) ; timestamps.data[m] = t ;
except KeyError: timestamps.data[m] = os.path.getmtime(m) ; changed.append(m)
if changed: timestamps.save()
return changed
def checktimestamps(d=None, dbenable=False):
changed = []
for f in os.listdir(d):
if os.path.isdir(d + os.sep + f):
if "gae" in f: continue
if f.startswith("."): logging.warn("skipping %s" % f) ; continue
dname = d + os.sep + f
if not dbenable and 'db' in dname: continue
splitted = dname.split(os.sep)
target = []
for s in splitted[::-1]:
target.append(s)
if 'jsb' in s: break
elif 'myplugs' in s: break
package = ".".join(target[::-1])
if not "config" in dname and package not in plugin_packages: logging.warn("adding %s to plugin_packages" % package) ; plugin_packages.append(package)
changed.extend(checktimestamps(d + os.sep + f))
if not f.endswith(".py"): continue
m = d + os.sep + f
global timestamps
try:
t = os.path.getmtime(m)
if t > timestamps.data[m]: changed.append(m) ; timestamps.data[m] = t ;
except KeyError: timestamps.data[m] = os.path.getmtime(m) ; changed.append(m)
if changed: timestamps.save()
return changed
def find_packages(d=None):
packages = []
for f in os.listdir(d):
if os.path.isdir(d + os.sep + f):
if "gae" in f: continue
if f.startswith("."): logging.warn("skipping %s" % f) ; continue
dname = d + os.sep + f
splitted = dname.split(os.sep)
target = []
for s in splitted[::-1]:
target.append(s)
if 'jsb' in s: break
elif 'myplugs' in s: break
package = ".".join(target[::-1])
if package not in plugin_packages: logging.info("adding %s to plugin_packages" % package) ; packages.append(package)
packages.extend(find_packages(d + os.sep + f))
return packages
## commands related commands
def savecmndtable(modname=None, saveperms=True):
""" save command -> plugin list to db backend. """
global cmndtable
if not cmndtable.data: cmndtable.data = {}
if modname: target = LazyDict(cmndtable.data)
else: target = LazyDict()
global shorttable
if not shorttable.data: shorttable.data = {}
if modname: short = LazyDict(shorttable.data)
else: short = LazyDict()
global cmndperms
from jsb.lib.commands import cmnds
assert cmnds
for cmndname, c in cmnds.iteritems():
if modname and c.modname != modname or cmndname == "subs": continue
if cmndname and c:
target[cmndname] = c.modname
cmndperms[cmndname] = c.perms
try:
s = cmndname.split("-")[1]
if not target.has_key(s):
if not short.has_key(s): short[s] = [cmndname, ]
if cmndname not in short[s]: short[s].append(cmndname)
except (ValueError, IndexError): pass
logging.warn("saving command table")
assert cmndtable
assert target
cmndtable.data = target
cmndtable.save()
logging.warn("saving short table")
assert shorttable
assert short
shorttable.data = short
shorttable.save()
logging.warn("saving RE table")
for command in cmnds.regex:
retable.data[command.regex] = command.modname
assert retable
retable.save()
if saveperms:
logging.warn("saving command perms")
cmndperms.save()
def removecmnds(modname):
""" remove commands belonging to modname form cmndtable. """
global cmndtable
assert cmndtable
from jsb.lib.commands import cmnds
assert cmnds
for cmndname, c in cmnds.iteritems():
if c.modname == modname: del cmndtable.data[cmndname]
cmndtable.save()
def getcmndtable():
""" save command -> plugin list to db backend. """
global cmndtable
if not cmndtable: boot()
return cmndtable.data
def getshorttable():
""" save command -> plugin list to db backend. """
global shorttable
if not shorttable: boot()
return shorttable.data
## callbacks related commands
def savecallbacktable(modname=None):
""" save command -> plugin list to db backend. """
if modname: logging.warn("boot - module name is %s" % modname)
global callbacktable
assert callbacktable
if not callbacktable.data: callbacktable.data = {}
if modname: target = LazyDict(callbacktable.data)
else: target = LazyDict()
from jsb.lib.callbacks import first_callbacks, callbacks, last_callbacks, remote_callbacks
for cb in [first_callbacks, callbacks, last_callbacks, remote_callbacks]:
for type, cbs in cb.cbs.iteritems():
for c in cbs:
if modname and c.modname != modname: continue
if not target.has_key(type): target[type] = []
if not c.modname in target[type]: target[type].append(c.modname)
logging.warn("saving callback table")
assert callbacktable
assert target
callbacktable.data = target
callbacktable.save()
def removecallbacks(modname):
""" remove callbacks belonging to modname form cmndtable. """
global callbacktable
assert callbacktable
from jsb.lib.callbacks import first_callbacks, callbacks, last_callbacks, remote_callbacks
for cb in [first_callbacks, callbacks, last_callbacks, remote_callbacks]:
for type, cbs in cb.cbs.iteritems():
for c in cbs:
if not c.modname == modname: continue
if not callbacktable.data.has_key(type): callbacktable.data[type] = []
if c.modname in callbacktable.data[type]: callbacktable.data[type].remove(c.modname)
logging.warn("saving callback table")
assert callbacktable
callbacktable.save()
def getcallbacktable():
""" save command -> plugin list to db backend. """
global callbacktable
if not callbacktable: boot()
return callbacktable.data
def getretable():
""" save command -> plugin list to db backend. """
global retable
if not retable: boot()
return retable.data
## plugin list related commands
def savepluginlist(modname=None):
""" save a list of available plugins to db backend. """
global pluginlist
if not pluginlist.data: pluginlist.data = []
if modname: target = cpy(pluginlist.data)
else: target = []
from jsb.lib.commands import cmnds
assert cmnds
for cmndname, c in cmnds.iteritems():
if modname and c.modname != modname: continue
if c and not c.plugname: logging.info("boot - not adding %s to pluginlist" % cmndname) ; continue
if c and c.plugname not in target and c.enable: target.append(c.plugname)
assert target
target.sort()
logging.warn("saving plugin list")
assert pluginlist
pluginlist.data = target
pluginlist.save()
def remove_plugin(modname):
removecmnds(modname)
removecallbacks(modname)
global pluginlist
try: pluginlist.data.remove(modname.split(".")[-1]) ; pluginlist.save()
except: pass
def clear_tables():
global cmndtable
global callbacktable
global pluginlist
cmndtable.data = {} ; cmndtable.save()
callbacktable.data = {} ; callbacktable.save()
pluginlist.data = [] ; pluginlist.save()
def getpluginlist():
""" get the plugin list. """
global pluginlist
if not pluginlist: boot()
l = plugwhitelist.data or pluginlist.data
result = []
denied = []
for plug in plugblacklist.data:
denied.append(plug.split(".")[-1])
for plug in l:
if plug not in denied: result.append(plug)
return result
## update_mod command
def update_mod(modname):
""" update the tables with new module. """
savecallbacktable(modname)
savecmndtable(modname, saveperms=False)
savepluginlist(modname)
def whatcommands(plug):
tbl = getcmndtable()
result = []
for cmnd, mod in tbl.iteritems():
if not mod: continue
modfile = mod.split(".")[-1]
if plug in modfile:
result.append(cmnd)
return result
def getcmndperms():
return cmndperms
def plugenable(mod):
if plugwhitelist.data and not mod in plugwhitelist.data: plugwhitelist.data.append(mod) ; plugwhtelist.save() ; return
if mod in plugblacklist.data: plugblacklist.data.remove(mod) ; plugblacklist.save()
def plugdisable(mod):
if plugwhitelist.data and mod in plugwhitelist.data: plugwhitelist.data.remove(mod) ; plugwhtelist.save() ; return
if not mod in plugblacklist.data: plugblacklist.data.append(mod) ; plugblacklist.save()
def isenabled(mod):
if mod in default_deny and mod not in plugwhitelist.data: return False
if mod in plugblacklist.data: return False
return True
def size():
global cmndtable
global pluginlist
global callbacktable
global cmndperms
global timestamps
global plugwhitelist
global plugblacklist
return "cmndtable: %s - pluginlist: %s - callbacks: %s - timestamps: %s - whitelist: %s - blacklist: %s" % (cmndtable.size(), pluginlist.size(), callbacktable.size(), timestamps.size(), plugwhitelist.size(), plugblacklist.size())
| 36.276458 | 233 | 0.644082 |
c72425e028e57910d98cdb599e615ae8c3143de9 | 4,481 | py | Python | cgbind/geom.py | duartegroup/cgbind | 8c2369d4c49e8b008fc3951719d99e0c4f6b6b16 | [
"MIT"
] | 7 | 2020-06-08T16:18:56.000Z | 2021-01-28T09:59:16.000Z | cgbind/geom.py | duartegroup/cgbind | 8c2369d4c49e8b008fc3951719d99e0c4f6b6b16 | [
"MIT"
] | null | null | null | cgbind/geom.py | duartegroup/cgbind | 8c2369d4c49e8b008fc3951719d99e0c4f6b6b16 | [
"MIT"
] | 2 | 2020-11-16T04:52:43.000Z | 2021-06-04T05:07:29.000Z | import numpy as np
from cgbind.log import logger
from scipy.spatial.distance import cdist
from scipy.spatial import distance_matrix
from cgbind.atoms import get_atomic_mass
def calc_com(atoms):
"""
Calculate the centre of mass for a list of xyzs
:param atoms: (list(cgbind.atoms.Atom))
:return: (np.ndarray) shape: 3
"""
logger.info('Calculating centre of mass ')
com = np.zeros(3)
total_mass = 0.0
for atom in atoms:
atom_mass = get_atomic_mass(atom)
total_mass += atom_mass
com += atom_mass * atom.coord
return com / total_mass
def calc_normalised_vector(coord1, coord2):
vec = coord2 - coord1
return vec / np.linalg.norm(vec)
def is_geom_reasonable(molecule):
"""
For an xyz list check to ensure the geometry is sensible, before an
optimisation is carried out. There should be no distances smaller than
0.7 Å
:param molecule: (cgbind.molecule.BaseStruct)
:return: (bool)
"""
logger.info('Checking to see whether the geometry is reasonable')
coords = molecule.get_coords()
# Compute the distance matrix with all i,j pairs, thus add 1 to the
# diagonals to remove the d(ii) = 0 components that would otherwise result
# in an unreasonable geometry
dist_mat = distance_matrix(coords, coords) + np.identity(len(coords))
if np.min(dist_mat) < 0.8:
logger.warning('There is a distance < 0.8 Å. There is likely a problem'
' with the geometry')
return False
if np.max(dist_mat) > 1000:
logger.warning('There is a distance > 1000 Å. There is likely a '
'problem with the geometry')
return False
return True
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
:param axis: (np.ndarray) Unit vector in 3D to rotate around
:param theta: (float) Angle in radians
"""
axis = np.asarray(axis)
axis = axis/np.linalg.norm(axis)
a = np.cos(theta/2.0)
b, c, d = -axis*np.sin(theta/2.0)
aa, bb, cc, dd = a*a, b*b, c*c, d*d
bc, ad, ac, ab, bd, cd = b*c, a*d, a*c, a*b, b*d, c*d
return np.array([[aa+bb-cc-dd, 2*(bc+ad), 2*(bd-ac)],
[2*(bc-ad), aa+cc-bb-dd, 2*(cd+ab)],
[2*(bd+ac), 2*(cd-ab), aa+dd-bb-cc]])
def get_rot_mat_kabsch(p_matrix, q_matrix):
"""
Get the optimal rotation matrix with the Kabsch algorithm. Notation is from
https://en.wikipedia.org/wiki/Kabsch_algorithm
:param p_matrix: (np.ndarray)
:param q_matrix: (np.ndarray)
:return: (np.ndarray) rotation matrix
"""
h = np.matmul(p_matrix.transpose(), q_matrix)
u, s, vh = np.linalg.svd(h)
d = np.linalg.det(np.matmul(vh.transpose(), u.transpose()))
int_mat = np.identity(3)
int_mat[2, 2] = d
rot_matrix = np.matmul(np.matmul(vh.transpose(), int_mat), u.transpose())
return rot_matrix
def get_centered_matrix(mat):
"""
For a list of coordinates n.e. a n_atoms x 3 matrix as a np array
translate to the center of the coordinates
:param mat: (np.ndarray)
:return: (np.ndarray) translated coordinates
"""
centroid = np.average(mat, axis=0)
return np.array([coord - centroid for coord in mat])
def spherical_to_cart(r, theta, phi):
return np.array([r * np.cos(theta) * np.sin(phi),
r * np.sin(theta) * np.sin(phi),
r * np.cos(phi)])
def get_max_sphere_negative_radius(theta_and_phi, r, cage_coords):
"""
Get the maximum sphere radius that is possible at a point defined by the
spherical polar coordinates theta, phi and r. This amounts to finding the
minimum pairwise distance between the point and the rest of the cage. The
negative radius is returned as it will be fed into scipy.optmize.minimise
:param theta_and_phi: (list(float))
:param r: (float)
:param cage_coords: (np.ndarray) n_atoms x 3
:return: (float)
"""
theta, phi = theta_and_phi
# Convert the point in spherical polars to Cartesian so the distances to
# the rest of the cage can be calculated
# needs to be a 1 x 3 matrix to use cdist
point = np.array([spherical_to_cart(r=r, theta=theta, phi=phi)])
return -np.min(cdist(point, cage_coords))
i = np.array([1.0, 0.0, 0.0])
j = np.array([0.0, 1.0, 0.0])
k = np.array([0.0, 0.0, 1.0])
| 30.073826 | 79 | 0.642714 |
09b60c514e5239f580d9ebb27dde640b860033a2 | 7,544 | py | Python | tests/services/test_entrez.py | jfear/sra2mongo | 7b73a2a71c9c12160da7d0aaa1537a615a356136 | [
"MIT"
] | 2 | 2017-05-18T18:05:33.000Z | 2019-03-13T13:52:39.000Z | tests/services/test_entrez.py | jfear/sra2mongo | 7b73a2a71c9c12160da7d0aaa1537a615a356136 | [
"MIT"
] | 19 | 2017-05-18T13:39:34.000Z | 2020-07-02T19:36:36.000Z | tests/services/test_entrez.py | jfear/sra2mongo | 7b73a2a71c9c12160da7d0aaa1537a615a356136 | [
"MIT"
] | 3 | 2017-01-31T21:20:57.000Z | 2021-05-20T01:05:18.000Z | import os
import datetime
from textwrap import dedent
import pytest
from sramongo import (
parsers_pubmed_xml,
parsers_biosample_xml,
parsers_bioproject_xml,
parsers_sra_xml,
)
from sramongo.services import entrez
DB = "sra"
QUERY = '"Drosophila melanogaster"[orgn]'
RETMAX = 2
API_KEY = os.environ.get("ENTREZ_API_KEY", False)
@pytest.fixture(scope="module")
def small_esearch_results() -> entrez.EsearchResult:
return entrez.esearch(DB, QUERY, retmax=RETMAX, api_key=API_KEY)
@pytest.fixture(scope="module")
def experiment_set_xml() -> str:
return dedent(
"""<?xml version="1.0"?>
<EXPERIMENT_PACKAGE_SET>
<EXPERIMENT_PACKAGE>
<EXPERIMENT accession="SRX5231949" alias="Library_2">
<IDENTIFIERS>
<PRIMARY_ID>SRX5231949</PRIMARY_ID>
</IDENTIFIERS>
</EXPERIMENT>
<SUBMISSION accession="SRA832165" alias="SUB5027898">
<IDENTIFIERS>
<PRIMARY_ID>SRA832165</PRIMARY_ID>
</IDENTIFIERS>
</SUBMISSION>
</EXPERIMENT_PACKAGE>
<EXPERIMENT_PACKAGE>
<EXPERIMENT accession="SRX5231948" alias="Library_1">
<IDENTIFIERS>
<PRIMARY_ID>SRX5231948</PRIMARY_ID>
</IDENTIFIERS>
<TITLE>d77ZT8</TITLE>
</EXPERIMENT>
<SUBMISSION accession="SRA832165" alias="SUB5027898">
<IDENTIFIERS>
<PRIMARY_ID>SRA832165</PRIMARY_ID>
</IDENTIFIERS>
</SUBMISSION>
</EXPERIMENT_PACKAGE>
</EXPERIMENT_PACKAGE_SET>
"""
)
def test_urlencode_query():
import urllib.parse
cleaned_query = urllib.parse.quote_plus(QUERY, safe="/+")
assert cleaned_query == "%22Drosophila+melanogaster%22%5Borgn%5D"
@pytest.mark.skip
def test_esearch_sra_nohistory():
esearch_results = entrez.esearch(DB, QUERY, userhistory=False, retmax=RETMAX, api_key=API_KEY)
assert len(esearch_results.ids) == RETMAX
assert esearch_results.webenv == ""
assert esearch_results.query_key == ""
@pytest.mark.skip
def test_esearch_sra_withhistory():
esearch_results = entrez.esearch(DB, QUERY, userhistory=True, retmax=RETMAX, api_key=API_KEY)
assert len(esearch_results.ids) == RETMAX
assert esearch_results.webenv != ""
assert esearch_results.query_key != ""
@pytest.mark.skip
def test_epost(small_esearch_results):
ids = small_esearch_results.ids[:2]
epost_results = entrez.epost(DB, ids=ids, api_key=API_KEY)
assert epost_results.query_key == "1"
@pytest.mark.skip
def test_esummary_sra_no_history(small_esearch_results):
ids = small_esearch_results.ids
esummary_results = []
for docs in entrez.esummary(DB, ids, api_key=API_KEY):
esummary_results.extend(list(parsers_sra_xml.parse_sra_esummary_result(docs)))
assert len(esummary_results) == RETMAX
assert esummary_results[0].accn != ""
assert esummary_results[0].accn != ""
assert type(esummary_results[0].create_date) == datetime.datetime
assert type(esummary_results[0].update_date) == datetime.datetime
@pytest.mark.skip
def test_esummary_sra_with_history_retmax(small_esearch_results):
webenv = small_esearch_results.webenv
query_key = small_esearch_results.query_key
esummary_results = []
for docs in entrez.esummary(
DB, webenv=webenv, query_key=query_key, retmax=600, api_key=API_KEY
):
esummary_results.extend(list(parsers_sra_xml.parse_sra_esummary_result(docs)))
assert len(esummary_results) == 600
assert esummary_results[0].accn != ""
assert esummary_results[0].accn != ""
assert type(esummary_results[0].create_date) == datetime.datetime
assert type(esummary_results[0].update_date) == datetime.datetime
@pytest.mark.skip
def test_esummary_sra_with_history_count(small_esearch_results):
webenv = small_esearch_results.webenv
query_key = small_esearch_results.query_key
esummary_results = []
for docs in entrez.esummary(DB, webenv=webenv, query_key=query_key, count=600, api_key=API_KEY):
esummary_results.extend(list(parsers_sra_xml.parse_sra_esummary_result(docs)))
assert len(esummary_results) == 600
def test_parse_efetch_result(experiment_set_xml):
for experiment in parsers_sra_xml.parse_sra_efetch_result(experiment_set_xml):
if experiment.accn == "SRX5231949":
assert "Library_2" in experiment.xml
else:
assert "Library_1" in experiment.xml
@pytest.mark.skip
def test_efetch_no_history(small_esearch_results):
ids = small_esearch_results.ids
for result in entrez.efetch(DB, ids, api_key=API_KEY):
for experiment in parsers_sra_xml.parse_sra_efetch_result(result):
assert (
experiment.accn.startswith("SRX")
| experiment.accn.startswith("DRX")
| experiment.accn.startswith("ERX")
)
@pytest.mark.skip
def test_elink_no_hisotry(small_esearch_results):
ids = small_esearch_results.ids
result = entrez.elink(db="biosample", dbfrom="sra", ids=ids)
assert result.dbfrom == "sra"
assert result.dbto == "biosample"
assert result.query_key == "1"
@pytest.mark.skip
def test_elink_with_hisotry(small_esearch_results):
webenv = small_esearch_results.webenv
query_key = small_esearch_results.query_key
result = entrez.elink(db="biosample", dbfrom="sra", webenv=webenv, query_key=query_key)
assert result.dbfrom == "sra"
assert result.dbto == "biosample"
@pytest.mark.skip
def test_elink_no_hisotry_no_results(small_esearch_results):
ids = small_esearch_results.ids
result = entrez.elink(db="pubmed", dbfrom="sra", ids=ids)
assert result.dbfrom == "sra"
assert result.dbto == ""
assert result.query_key == ""
@pytest.mark.skip
def test_efetch_bioproject(small_esearch_results):
webenv = small_esearch_results.webenv
query_key = small_esearch_results.query_key
link = entrez.elink(
"bioproject", "sra", webenv=webenv, query_key=query_key, api_key=API_KEY, retmax=RETMAX
)
for result in entrez.efetch(
"bioproject", webenv=link.webenv, query_key=link.query_key, api_key=API_KEY, retmax=RETMAX
):
for document in parsers_bioproject_xml.parse_bioproject_efetch_result(result):
assert document.accn.startswith("PRJ")
@pytest.mark.skip
def test_efetch_biosample(small_esearch_results):
webenv = small_esearch_results.webenv
query_key = small_esearch_results.query_key
link = entrez.elink(
"biosample", "sra", webenv=webenv, query_key=query_key, api_key=API_KEY, retmax=RETMAX
)
for result in entrez.efetch(
"biosample", webenv=link.webenv, query_key=link.query_key, api_key=API_KEY, retmax=RETMAX
):
for document in parsers_biosample_xml.parse_biosample_efetch_result(result):
assert document.accn.startswith("SAMN")
@pytest.mark.skip
def test_efetch_pubmed(small_esearch_results):
webenv = small_esearch_results.webenv
query_key = small_esearch_results.query_key
link = entrez.elink(
"pubmed", "sra", webenv=webenv, query_key=query_key, api_key=API_KEY, retmax=RETMAX
)
for result in entrez.efetch(
"pubmed", webenv=link.webenv, query_key=link.query_key, api_key=API_KEY, retmax=RETMAX
):
for document in parsers_pubmed_xml.parse_pubmed_efetch_result(result):
assert isinstance(document.accn, str)
| 34.764977 | 100 | 0.70334 |
8b5d372fc9dd10498f329da4432bbae1adc39eae | 3,958 | py | Python | src/pyherc/rules/inventory/equip.py | tuturto/pyherc | 4e7c72a4d80d335f7d3c48cecac96cd7105acac4 | [
"MIT"
] | 25 | 2015-07-21T12:40:42.000Z | 2021-09-23T09:00:45.000Z | src/pyherc/rules/inventory/equip.py | tuturto/pyherc | 4e7c72a4d80d335f7d3c48cecac96cd7105acac4 | [
"MIT"
] | 65 | 2015-02-15T19:42:19.000Z | 2018-01-03T10:22:35.000Z | src/pyherc/rules/inventory/equip.py | tuturto/pyherc | 4e7c72a4d80d335f7d3c48cecac96cd7105acac4 | [
"MIT"
] | 3 | 2017-06-15T13:07:49.000Z | 2019-04-15T02:18:39.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2017 Tuukka Turto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module defining classes related to inventory actions
"""
from pyherc.data import is_armour, is_weapon, is_ammunition, is_boots
from pyherc.aspects import log_debug, log_info
from pyherc.events import new_equip_event
from pyherc.rules.factory import SubActionFactory
class EquipFactory(SubActionFactory):
"""
Factory for creating equip actions
.. versionadded:: 0.8
"""
@log_debug
def __init__(self):
"""
Constructor for this factory
"""
super().__init__()
self.sub_action = 'equip'
@log_debug
def can_handle(self, parameters):
"""
Can this factory process these parameters
:param parameters: parameters to check
:returns: True if factory is capable of handling parameters
:rtype: Boolean
"""
return self.sub_action == parameters.sub_action
@log_info
def get_action(self, parameters):
"""
Create an equip action
:param parameters: parameters used to control creation
:type parameters: InventoryParameters
"""
return EquipAction(parameters.character, parameters.item)
class EquipAction():
"""
Action for equiping an item
.. versionadded:: 0.8
"""
@log_debug
def __init__(self, character, item):
"""
Default constructor
:param character: character wearing the item
:type character: Character
:param item: item to equip
:type item: Item
"""
super().__init__()
self.character = character
self.item = item
@log_info
def execute(self):
"""
Executes this action
"""
if is_armour(self.item):
self.character.inventory.armour = self.item
self.character.raise_event(new_equip_event(self.character,
self.item))
if is_weapon(self.item):
self.character.inventory.weapon = self.item
self.character.raise_event(new_equip_event(self.character,
self.item))
if is_ammunition(self.item):
self.character.inventory.projectiles = self.item
self.character.raise_event(new_equip_event(self.character,
self.item))
elif is_boots(self.item):
self.character.inventory.boots = self.item
self.character.raise_event(new_equip_event(self.character,
self.item))
@log_debug
def is_legal(self):
"""
Check if the action is possible to perform
:returns: True if move is possible, false otherwise
:rtype: Boolean
"""
return True
| 32.983333 | 79 | 0.635169 |
1c76807b584d441bccc1451ef5432b05c989dc31 | 719 | py | Python | articles/migrations/0005_auto_20170310_1928.py | Ilyes-Hammadi/help-js | f2e7f6688631700c4ee270276ebc7d5aae26cc76 | [
"MIT"
] | 11 | 2017-03-20T14:00:34.000Z | 2019-09-17T13:10:37.000Z | articles/migrations/0005_auto_20170310_1928.py | Ilyes-Hammadi/help-js | f2e7f6688631700c4ee270276ebc7d5aae26cc76 | [
"MIT"
] | null | null | null | articles/migrations/0005_auto_20170310_1928.py | Ilyes-Hammadi/help-js | f2e7f6688631700c4ee270276ebc7d5aae26cc76 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-10 19:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0004_message_session'),
]
operations = [
migrations.AlterField(
model_name='session',
name='chat_room',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='articles.Message'),
),
migrations.AlterField(
model_name='session',
name='ended',
field=models.BooleanField(default=False),
),
]
| 26.62963 | 127 | 0.625869 |
651519e481d2aafe62d7106404e30dc2352cfb90 | 1,408 | py | Python | polling_stations/apps/data_importers/management/commands/import_bedford.py | danielgriffin48/UK-Polling-Stations | 0e5273357a4fdc00c2af794c71558b6f8f2a0a49 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_importers/management/commands/import_bedford.py | danielgriffin48/UK-Polling-Stations | 0e5273357a4fdc00c2af794c71558b6f8f2a0a49 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_importers/management/commands/import_bedford.py | danielgriffin48/UK-Polling-Stations | 0e5273357a4fdc00c2af794c71558b6f8f2a0a49 | [
"BSD-3-Clause"
] | null | null | null | from data_importers.management.commands import BaseDemocracyCountsCsvImporter
class Command(BaseDemocracyCountsCsvImporter):
council_id = "E06000055"
addresses_name = (
"parl.2019-12-12/Version 1/Democracy Club - Polling Districts UKPGE.csv"
)
stations_name = (
"parl.2019-12-12/Version 1/Democracy Club - Polling Stations UKPGE.csv"
)
elections = ["parl.2019-12-12"]
allow_station_point_from_postcode = False
# KEMPSTON WEST METHODIST CHURCH Carried forward from local.2019-05-02
def station_record_to_dict(self, record):
if record.stationcode == "BAS_1":
record = record._replace(xordinate="502614")
record = record._replace(yordinate="247440")
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
uprn = record.uprn.strip().lstrip("0")
# Lot's of junk UPRNs
record = record._replace(uprn="")
rec = super().address_record_to_dict(record)
if (
uprn == "10024229957"
): # MK442EY -> MK442EL : SKYLARKS, KIMBOLTON ROAD, BOLNHURST, BEDFORD
rec["postcode"] = "MK442EL"
if record.postcode in ["MK45 3PG", "MK45 3PW", "MK43 0BD"]:
return None
if record.add1 in [
"CRIEGNESH",
"THE SHIELING",
]:
return None
return rec
| 30.608696 | 80 | 0.62642 |
0c501e928ecff4b6715e368a6f60f6533f500f79 | 7,058 | py | Python | model.py | zhitao-wang/Hierarchical-Diffusion-Attention-Network | 62211c823dbd7ea4548b8f2e83fb11cd680389c5 | [
"MIT"
] | 12 | 2019-12-18T21:02:40.000Z | 2021-11-19T05:42:25.000Z | model.py | Toyzenk/Hierarchical-Diffusion-Attention-Network | 62211c823dbd7ea4548b8f2e83fb11cd680389c5 | [
"MIT"
] | 1 | 2020-02-12T06:10:23.000Z | 2020-04-23T15:37:42.000Z | model.py | Toyzenk/Hierarchical-Diffusion-Attention-Network | 62211c823dbd7ea4548b8f2e83fb11cd680389c5 | [
"MIT"
] | 5 | 2020-05-14T08:39:06.000Z | 2021-05-17T02:02:32.000Z | import tensorflow as tf
import numpy as np
from rank_metrics import rank_eval
import argparse
def ill_cal(pred, sl):
nll = 0
cur_pos = 0
for i in range(len(sl)):
length = sl[i]
cas_nll = pred[cur_pos : cur_pos+length]
cur_pos += length
nll += (np.sum(cas_nll)/float(length))
return nll
# cas_emb:[b,n,d] cas_mask:[b,n,1]
def hidan(cas_emb, cas_mask, time_weight, hidden_size, keep_prob):
cas_encoding = user2user(cas_emb, cas_mask, hidden_size, keep_prob) # [b,n,d]
return user2cas(cas_encoding, cas_mask, time_weight, hidden_size, keep_prob)
def user2user(cas_emb, cas_mask, hidden_size, keep_prob):
with tf.variable_scope('user2user'):
bs, sl = tf.shape(cas_emb)[0], tf.shape(cas_emb)[1]
col, row = tf.meshgrid(tf.range(sl), tf.range(sl)) # [n,n]
direction_mask = tf.greater(row, col) # [n,n]
direction_mask_tile = tf.tile(tf.expand_dims(direction_mask, 0), [bs, 1, 1]) # [b,n,n]
length_mask_tile = tf.tile(tf.expand_dims(tf.squeeze(tf.cast(cas_mask,tf.bool),-1), 1), [1, sl, 1]) # [b,1,n] -> [b,n,n]
attention_mask = tf.cast(tf.logical_and(direction_mask_tile, length_mask_tile), tf.float32) # [b,n,n]
cas_hidden = dense(cas_emb, hidden_size, tf.nn.elu, keep_prob, 'hidden') * cas_mask # [b,n,d]
head = dense(cas_hidden, hidden_size, tf.identity, keep_prob, 'head', False) # [b,n,d]
tail = dense(cas_hidden, hidden_size, tf.identity, keep_prob, 'tail', False) # [b,n,d]
matching_logit = tf.matmul(head, tf.transpose(tail,perm=[0,2,1])) + (1-attention_mask) * (-1e30)
attention_score = tf.nn.softmax(matching_logit, -1) * attention_mask
depend_emb = tf.matmul(attention_score, cas_hidden) # [b,n,d]
fusion_gate = dense(tf.concat([cas_hidden, depend_emb], 2), hidden_size, tf.sigmoid, keep_prob, 'fusion_gate') # [b,n,d]
return (fusion_gate*cas_hidden + (1-fusion_gate)*depend_emb) * cas_mask # [b,n,d]
def user2cas(cas_encoding, cas_mask, time_weight, hidden_size, keep_prob):
with tf.variable_scope('user2cas'):
map1 = dense(cas_encoding, hidden_size, tf.nn.elu, keep_prob, 'map1') # [b,n,d]
time_influence = dense(time_weight, hidden_size, tf.nn.elu, keep_prob, 'time_influence')
map2 = dense(map1 * time_influence, 1, tf.identity, keep_prob, 'map2')
attention_score = tf.nn.softmax(map2 + (-1e30) * (1 - cas_mask) , 1) * cas_mask
return tf.reduce_sum(attention_score * cas_encoding, 1)
def dense(input, out_size, activation, keep_prob, scope, need_bias=True):
with tf.variable_scope(scope):
W = tf.get_variable('W', [input.get_shape()[-1], out_size], dtype=tf.float32)
b = tf.get_variable('b', [out_size], tf.float32, tf.zeros_initializer(), trainable=need_bias)
flatten = tf.matmul(tf.reshape(input, [-1, tf.shape(input)[-1]]), W) + b
out_shape = [tf.shape(input)[i] for i in range(len(input.get_shape())-1)] + [out_size]
return tf.nn.dropout(activation(tf.reshape(flatten, out_shape)), keep_prob)
class Model(object):
def __init__(self, config):
self.num_nodes = config.num_nodes
self.hidden_size = config.hidden_size
self.embedding_size = config.embedding_size
self.learning_rate = config.learning_rate
self.l2_weight = config.l2_weight
self.train_dropout = config.dropout
self.n_time_interval = config.n_time_interval
self.optimizer = config.optimizer
def build_model(self):
with tf.variable_scope("model",initializer=tf.contrib.layers.xavier_initializer()) as scope:
self.cas = tf.placeholder(tf.int32, [None, None]) # (b,n)
self.cas_length= tf.reduce_sum(tf.sign(self.cas),1)
self.cas_mask = tf.expand_dims(tf.sequence_mask(self.cas_length, tf.shape(self.cas)[1], tf.float32), -1)
self.dropout = tf.placeholder(tf.float32)
self.labels = tf.placeholder(tf.int32, [None]) # (b,)
self.time_interval_index = tf.placeholder(tf.int32, [None, None]) # (b,n)
self.num_cas = tf.placeholder(tf.float32)
with tf.device("/cpu:0"):
self.embedding = tf.get_variable(
"embedding", [self.num_nodes,
self.embedding_size], dtype=tf.float32)
self.cas_emb = tf.nn.embedding_lookup(self.embedding, self.cas) # (b,n,l)
self.time_lambda = tf.get_variable('time_lambda', [self.n_time_interval+1, self.hidden_size], dtype=tf.float32) #,
self.time_weight = tf.nn.embedding_lookup(self.time_lambda, self.time_interval_index)
with tf.variable_scope("hidan") as scope:
self.hidan = hidan(self.cas_emb, self.cas_mask, self.time_weight, self.hidden_size, self.dropout)
with tf.variable_scope("loss"):
l0 = self.hidan
self.logits = dense(l0, self.num_nodes, tf.identity, 1.0, 'logits')
self.nll = tf.nn.softmax_cross_entropy_with_logits(labels=tf.one_hot(self.labels, self.num_nodes, dtype=tf.float32), logits=self.logits)
self.loss = tf.reduce_mean(self.nll,-1)
for v in tf.trainable_variables():
self.loss += self.l2_weight * tf.nn.l2_loss(v)
if self.optimizer == 'adaelta':
self.train_op = tf.train.AdadeltaOptimizer(self.learning_rate, rho=0.999).minimize(self.loss)
else:
self.train_op = tf.train.AdamOptimizer(self.learning_rate, beta1=0.99).minimize(self.loss)
def train_batch(self, sess, batch_data):
cas, next_user, time_interval_index, seq_len = batch_data
feed = {self.cas: cas,
self.labels: next_user,
self.dropout: self.train_dropout,
self.time_interval_index: time_interval_index,
self.num_cas: len(seq_len)
}
_, _, nll = sess.run([self.train_op, self.loss, self.nll], feed_dict = feed)
batch_nll = np.sum(nll)
return batch_nll
def test_batch(self, sess, batch_test):
cas, next_user, time_interval_index, seq_len = batch_test
feed = {self.cas: cas,
self.labels: next_user,
self.time_interval_index: time_interval_index,
self.dropout: 1.0
}
logits, nll = sess.run([self.logits, self.nll], feed_dict = feed)
# batch_rr = mrr_cal(logits, next_user, seq_len)
mrr, macc1, macc5, macc10, macc50, macc100 = rank_eval(logits, next_user, seq_len)
batch_cll = np.sum(nll)
batch_ill = ill_cal(nll, seq_len)
return batch_cll, batch_ill, mrr, macc1, macc5, macc10, macc50, macc100
| 53.067669 | 153 | 0.613063 |
a783a8f785008715d00f946ae94b7bd75a71fd2c | 55,829 | py | Python | models/poe_multimodal_cgqn.py | lim0606/pytorch-generative-multisensory-network | 646404db3f6fdad0c6663b861be747c1032ec291 | [
"MIT"
] | 2 | 2019-11-06T14:03:52.000Z | 2019-12-25T22:35:19.000Z | models/poe_multimodal_cgqn.py | lim0606/pytorch-generative-multisensory-network | 646404db3f6fdad0c6663b861be747c1032ec291 | [
"MIT"
] | null | null | null | models/poe_multimodal_cgqn.py | lim0606/pytorch-generative-multisensory-network | 646404db3f6fdad0c6663b861be747c1032ec291 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from models.convdraw2 import ConvLSTMCell, StackedConvDrawEncoderCell, StackedConvDrawDecoderCell, ConvLSTM
from models.reparam import NormalDistributionConv2d, NormalDistributionConvTranspose2d
from models.image_representation import ContextNetwork as ImageContextNetwork
from models.image_representation import CameraEncoder
from models.image_renderer import ImageRenderer
from models.haptic_representation import ContextNetwork as HapticContextNetwork
from models.haptic_representation import HandEncoder
from models.haptic_renderer import HapticRenderer
from utils import loss_kld_gaussian_vs_gaussian, loss_recon_gaussian_w_fixed_var
from utils import logprob_gaussian, logprob_gaussian_w_fixed_var
from utils import broadcast_representation, sum_tensor_per_episode
from utils import pack_tensor_list, flatten_packed_tensor
from utils import merge_two_batch, pad_sequence, get_reversed_tensor, get_reversed_sequence, sort_padded_sequence
from utils import rgb2gray
def combine_reps(inputs):
'''
Input:
inputs: a list of tuples, each of which (reps, context_sizes)
ex) [(reps_1, context_sizes_1), (reps_2, context_sizes_2), ... ]
Output:
reps: concatenated reps
context_sizes: concatenated context_sizes
'''
# check sizes
assert len(inputs) > 0
_reps, _context_sizes = inputs[0]
for _rs, _css in inputs:
assert len(_reps) == len(_rs)
assert len(_context_sizes) == len(_css)
# preprocess
items = []
for item in inputs:
items += list(item)
# combine
reps, context_sizes = [], []
for rep_context_size in zip(*items):
rep = torch.cat(rep_context_size[::2], dim=0)
reps += [rep]
context_size = sum(rep_context_size[1::2])
context_sizes += [context_size]
return reps, context_sizes
class ProductOfExperts(nn.Module):
''' copied from https://github.com/mhw32/multimodal-vae-public/blob/master/mnist/model.py '''
''' Return parameters for product of independent experts.
See https://arxiv.org/pdf/1410.7827.pdf for equations.
@param mu: M x D for M experts
@param logvar: M x D for M experts
'''
def forward(self, mu, logvar, eps=1e-8):
var = torch.exp(logvar) + eps
# precision of i-th Gaussian expert at point x
T = 1. / (var + eps)
pd_mu = torch.sum(mu * T, dim=0) / torch.sum(T, dim=0)
pd_var = 1. / torch.sum(T, dim=0)
pd_logvar = torch.log(pd_var + eps)
return pd_mu, pd_logvar
class CGQNConvDraw(nn.Module):
def __init__(self,
dims, #=[(3, 64, 64, 7, 'image'), (132, 1, 1, 7, 'haptic')],
#im_height, # image height
#im_channels, # number of channels in image
#nc_img_query, # kernel size (number of channels) for query
#hp_height, # haptic height
#hp_channels, # number of channels in haptic
#nc_hpt_query, # kernel size (number of channels) for query
nc_enc, # kernel size (number of channels) for encoder
nc_lstm, # kernel size (number of channels) for lstm
nc_context, # kernel size (number of channels) for representation
nz, # size of latent variable
num_steps, # number of steps in Draw
num_layers, # number of StackedConvDrawEncoderCell/StackedConvDrawDecoderCell layers
):
super().__init__()
# check conditions
assert len(dims) > 0
for dim in dims:
assert len(dim) == 5, dim
channels, height, width, nc_query, mtype = dim
#assert height == width
assert mtype in ['image', 'haptic']
# find im_height
im_heights = [height for _, height, _, _, mtype in dims if mtype == 'image']
if len(im_heights) == 0:
im_height = 64
else:
im_height = im_heights[0]
for _im_height in im_heights[1:]:
assert im_height == _im_height
im_widths = [width for _, _, width, _, mtype in dims if mtype == 'image']
if len(im_widths) == 0:
im_width = 64
else:
im_width = im_widths[0]
for _im_width in im_widths[1:]:
assert im_width == _im_width
# init
self.dims = dims
#self.im_height = im_height
#self.im_channels = im_channels
#self.nc_img_query = nc_img_query
#self.hp_height = hp_height
#self.hp_channels = hp_channels
#self.nc_hpt_query = nc_hpt_query
self.nc_enc = nc_enc
self.nc_lstm = nc_lstm
self.nc_context = nc_context
self.nz = nz
self.num_steps = num_steps
self.num_layers = num_layers
self.z_height = im_height // 4 # height of feature map size for z
self.z_width = im_width // 4 # width of feature map size for z
# init
self.num_multimodalities = len(dims)
# define networks
rnn_qs, rnn_ps = [], []
renderers, query_encoders = [], []
for dim in dims:
channels, height, width, nc_query, mtype = dim
# decoder
rnn_qs += [StackedConvDrawEncoderCell(
nc_context*2,
#nc_context,
nc_lstm,
nz=nz,
kernel_size=5,
padding=2,
num_layers=num_layers,
)]
# prior
rnn_ps += [StackedConvDrawDecoderCell(
nc_context,
nc_lstm,
nz=nz,
kernel_size=5,
padding=2,
num_layers=num_layers,
)]
# renderer
if mtype == 'image':
renderers += [ImageRenderer(
im_height = height,
im_width = width,
im_channels = channels,
nc_query = nc_query,
nc_enc = self.nc_enc,
nc_lstm = self.nc_lstm,
nz_per_step = self.num_layers * self.nz,
z_num_steps = self.num_steps,
z_height = self.z_height,
z_width = self.z_width,
num_steps = self.num_steps,
)]
query_encoders += [CameraEncoder()]
elif mtype == 'haptic':
renderers += [HapticRenderer(
hp_height = height,
hp_width = width,
hp_channels = channels,
nc_query = nc_query,
nc_enc = self.nc_enc,
nc_lstm = self.nc_lstm,
nz_per_step = self.num_layers * self.nz,
z_num_steps = self.num_steps,
z_height = self.z_height,
z_width = self.z_width,
num_steps = self.num_steps,
)]
query_encoders += [HandEncoder()]
self.rnn_qs = nn.ModuleList(rnn_qs)
self.rnn_ps = nn.ModuleList(rnn_ps)
self.renderers = nn.ModuleList(renderers)
self.query_encoders = nn.ModuleList(query_encoders)
self.experts = ProductOfExperts()
def forward(self,
reps_context_sizes_pairs_c,
#reps_context, context_sizes,
reps_context_sizes_pairs_t,
#reps_target, target_sizes,
input_tuples,
#img_target=None, img_queries=None, img_batch_sizes=[], img_target_indices=[],
#hpt_target=None, hpt_queries=None, hpt_batch_sizes=[], hpt_target_indices=[],
num_steps=None, beta=1.0, std=1.0,
is_grayscale=False):
# init
num_episodes = len(reps_context_sizes_pairs_c[0][0])
#assert len(set([index for _, _, mod_target_indices, _ in input_tuples for index in mod_target_indices])) == num_episodes
loss_kl = 0
''' forward posterior / prior '''
# init states
states_ps, states_qs, hiddens_ps, hiddens_qs = [], [], [], []
for m in range(self.num_multimodalities):
states_ps += [self.rnn_ps[m].init_state(num_episodes, [self.z_height, self.z_width])]
states_qs += [self.rnn_qs[m].init_state(num_episodes, [self.z_height, self.z_width])]
hiddens_ps += [[state_p[0] for state_p in states_ps[m]]]
#hiddens_qs += [[state_q[0] for state_q in states_qs[m]]]
latents = []
inputs_q = []
inputs_p = []
init_input_q = False
init_input_p = False
for i in range(num_steps if num_steps is not None else self.num_steps):
means_qs, logvars_qs = [], []
for m in range(self.num_multimodalities):
# unpack states_q, hiddens_q
states_q = states_qs[m]
#hiddens_q = hiddens_qs[m]
hiddens_p = hiddens_ps[m]
# aggregate observations (posterior)
if not init_input_q:
# unpack input and combined contexts
reps_context, context_sizes = reps_context_sizes_pairs_c[m]
reps_context = pad_sequence(reps_context, context_sizes)
reps_context = torch.sum(reps_context, dim=1)
reps_context = reps_context.view(-1, self.nc_context, self.z_height, self.z_width)
input_p = reps_context
inputs_p += [input_p]
reps_target, target_sizes = reps_context_sizes_pairs_t[m]
reps_target = pad_sequence(reps_target, target_sizes)
reps_target = torch.sum(reps_target, dim=1)
reps_target = reps_target.view(-1, self.nc_context, self.z_height, self.z_width)
input_q = torch.cat([reps_target, reps_context], dim=1)
inputs_q += [input_q]
else:
input_q = inputs_q[m]
# forward posterior
means_q, logvars_q, hiddens_q, states_q = self.rnn_qs[m](input_q, states_q, hiddens_p)
# append to list
means_qs += [means_q]
logvars_qs += [logvars_q]
#hiddens_qs[m] = hiddens_q
states_qs[m] = states_q
# update flag
init_input_q = True
init_input_p = True
# get experts
# means_qs : a list num_multimodalities x num_layers x (batch, zc, zh, zw)
# -> num_layers x (batch, num_mod_data, zc, zh, zw)
means_q = [torch.cat([means_qs[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
logvars_q = [torch.cat([logvars_qs[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
for j in range(self.num_layers):
_means_q, _logvars_q = self.experts(means_q[j], logvars_q[j])
means_q[j] = _means_q
logvars_q[j] = _logvars_q
# sample z from posterior
zs = self.rnn_qs[0].sample(means_q, logvars_q)
means_ps, logvars_ps = [], []
for m in range(self.num_multimodalities):
# unpack states_p, hiddens_p
states_p = states_ps[m]
# aggregate observations (prior)
if not init_input_p:
## unpack input and combined contexts
#reps_context, context_sizes = reps_context_sizes_pairs_c[m]
#reps_context = pad_sequence(reps_context, context_sizes)
#reps_context = torch.sum(reps_context, dim=1)
#reps_context = reps_context.view(-1, self.nc_context, self.z_height, self.z_width)
#input_p = reps_context
#inputs_p += [input_p]
raise ValueError
else:
input_p = inputs_p[m]
# forward prior
_, means_p, logvars_p, hiddens_p, states_p = self.rnn_ps[m](input_p, states_p, latents_q=zs)
# append to list
means_ps += [means_p]
logvars_ps += [logvars_p]
hiddens_ps[m] = hiddens_p
states_ps[m] = states_p
## update flag
#init_input_p = True
# get experts
# means_ps : a list num_multimodalities x num_layers x (batch, zc, zh, zw)
# -> num_layers x (batch, num_mod_data, zc, zh, zw)
means_p = [torch.cat([means_ps[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
logvars_p = [torch.cat([logvars_ps[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
for j in range(self.num_layers):
_means_p, _logvars_p = self.experts(means_p[j], logvars_p[j])
means_p[j] = _means_p
logvars_p[j] = _logvars_p
# append z to latent
latents += [torch.cat(zs, dim=1).unsqueeze(1)] if len(zs) > 1 else [zs[0].unsqueeze(1)]
# update accumulated KL
for j in range(self.num_layers):
loss_kl += loss_kld_gaussian_vs_gaussian(means_q[j], logvars_q[j], means_p[j], logvars_p[j])
''' likelihood '''
info = {}
info['mod_likelihoods'] = []
loss_likelihood = 0
mean_recons = []
for idx, (dim, input_tuple) in enumerate(zip(self.dims, input_tuples)):
channels, height, width, _, mtype = dim
mod_target, mod_queries, mod_target_indices, mod_batch_sizes = input_tuple
if len(mod_queries) > 0:# is not None:
num_mod_data = len(mod_target)
assert sum(mod_batch_sizes) == num_mod_data
# run renderer (likelihood)
mod_mean_recon = self._forward_renderer(idx, mod_queries, latents, num_episodes, mod_batch_sizes, mod_target_indices)
# convert to gray scale
if mtype == 'image' and is_grayscale:
mod_mean_recon = rgb2gray(mod_mean_recon)
mod_target = rgb2gray(mod_target)
# estimate recon loss
loss_mod_likelihood = loss_recon_gaussian_w_fixed_var(mod_mean_recon, mod_target, std=std, add_logvar=False)
# estimate recon loss without std
loss_mod_likelihood_nostd = loss_recon_gaussian_w_fixed_var(mod_mean_recon.detach(), mod_target)
else:
mod_mean_recon = reps_context.new_zeros(1, channels, height, width)
loss_mod_likelihood = None
loss_mod_likelihood_nostd = None
# append to list
mean_recons += [mod_mean_recon]
info['mod_likelihoods'] += [loss_mod_likelihood_nostd]
# add to loss_likelihood
if loss_mod_likelihood is not None:
loss_likelihood += loss_mod_likelihood
''' loss '''
# sum loss
loss = loss_likelihood + beta * loss_kl
# additional loss info
info['likelihood'] = loss_likelihood.detach()
info['kl'] = loss_kl.detach()
## temporary
#img_mean_recon, hpt_mean_recon = mean_recons[0], mean_recons[1]
#info['img_likelihood'] = info['mod_likelihoods'][0]
#info['hpt_likelihood'] = info['mod_likelihoods'][1]
# return
#return img_mean_recon, hpt_mean_recon, None, loss, info
return mean_recons, latents, loss, info
def generate(self,
reps_context_sizes_pairs_c,
#reps_context, context_sizes,
input_tuples,
#img_queries, img_batch_sizes,
#hpt_queries, hpt_batch_sizes,
num_steps=None,
is_grayscale=False):
# init
num_episodes = len(reps_context_sizes_pairs_c[0][0])
# init states
#states_ps, states_qs, hiddens_ps, hiddens_qs = [], [], [], []
states_ps, states_qs, hiddens_qs = [], [], []
for m in range(self.num_multimodalities):
states_ps += [self.rnn_ps[m].init_state(num_episodes, [self.z_height, self.z_width])]
#hiddens_ps += [[state_p[0] for state_p in states_ps[m]]]
latents = []
inputs_p = []
init_input_p = False
for i in range(num_steps if num_steps is not None else self.num_steps):
means_ps, logvars_ps = [], []
for m in range(self.num_multimodalities):
# unpack states_p, hiddens_p
states_p = states_ps[m]
# forward prior (prob)
means_p, logvars_p = self.rnn_ps[m].forward_prob(states_p)
# append to list
means_ps += [means_p]
logvars_ps += [logvars_p]
# get experts
# means_ps : a list num_multimodalities x num_layers x (batch, zc, zh, zw)
# -> num_layers x (batch, num_mod_data, zc, zh, zw)
means_p = [torch.cat([means_ps[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
logvars_p = [torch.cat([logvars_ps[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
for j in range(self.num_layers):
_means_p, _logvars_p = self.experts(means_p[j], logvars_p[j])
means_p[j] = _means_p
logvars_p[j] = _logvars_p
# sample z from prior
zs = self.rnn_ps[0].sample(means_p, logvars_p)
# forward rnn
for m in range(self.num_multimodalities):
# unpack states_p, hiddens_p
states_p = states_ps[m]
# aggregate observations (prior)
if not init_input_p:
# unpack input and combined contexts
reps_context, context_sizes = reps_context_sizes_pairs_c[m]
reps_context = pad_sequence(reps_context, context_sizes)
reps_context = torch.sum(reps_context, dim=1)
reps_context = reps_context.view(-1, self.nc_context, self.z_height, self.z_width)
input_p = reps_context
inputs_p += [input_p]
else:
input_p = inputs_p[m]
# forward prior (rnn)
hiddens_p, states_p = self.rnn_ps[m].forward_rnn(input_p, states_p, zs)
# append to list
#hiddens_ps[m] = hiddens_p
states_ps[m] = states_p
# update flag
init_input_p = True
# append z to latent
latents += [torch.cat(zs, dim=1).unsqueeze(1)] if len(zs) > 1 else [zs[0].unsqueeze(1)]
''' forward renderers '''
mean_recons = []
for idx, (dim, input_tuple) in enumerate(zip(self.dims, input_tuples)):
channels, height, width, _, mtype = dim
mod_queries, mod_batch_sizes = input_tuple
# forward image renderer
if len(mod_queries) > 0:
# forward image renderer
mod_mean_recon = self._forward_renderer(idx, mod_queries, latents, num_episodes, mod_batch_sizes)
# convert to gray scale
if mtype == 'image' and is_grayscale:
mod_mean_recon = rgb2gray(mod_mean_recon)
else:
mod_mean_recon = reps_context.new_zeros(1, channels, height, width)
# append to list
mean_recons += [mod_mean_recon]
## temporary
#img_mean_recon, hpt_mean_recon = mean_recons[0], mean_recons[1]
# return
#return img_mean_recon, hpt_mean_recon, None
return mean_recons, None
def predict(self,
reps_context_sizes_pairs_c,
reps_context_sizes_pairs_t,
input_tuples,
num_steps=None, beta=1.0, std=1.0,
is_grayscale=False,
use_uint8=True):
# init
num_episodes = len(reps_context_sizes_pairs_c[0][0])
logprob_kl = 0
loss_kl = 0
''' forward posterior / prior '''
# init states
states_ps, states_qs, hiddens_ps, hiddens_qs = [], [], [], []
for m in range(self.num_multimodalities):
states_ps += [self.rnn_ps[m].init_state(num_episodes, [self.z_height, self.z_width])]
states_qs += [self.rnn_qs[m].init_state(num_episodes, [self.z_height, self.z_width])]
hiddens_ps += [[state_p[0] for state_p in states_ps[m]]]
#hiddens_qs += [[state_q[0] for state_q in states_qs[m]]]
latents = []
inputs_q = []
inputs_p = []
init_input_q = False
init_input_p = False
for i in range(num_steps if num_steps is not None else self.num_steps):
means_qs, logvars_qs = [], []
for m in range(self.num_multimodalities):
# unpack states_q, hiddens_q
states_q = states_qs[m]
#hiddens_q = hiddens_qs[m]
hiddens_p = hiddens_ps[m]
# aggregate observations (posterior)
if not init_input_q:
# unpack input and combined contexts
reps_context, context_sizes = reps_context_sizes_pairs_c[m]
reps_context = pad_sequence(reps_context, context_sizes)
reps_context = torch.sum(reps_context, dim=1)
reps_context = reps_context.view(-1, self.nc_context, self.z_height, self.z_width)
input_p = reps_context
inputs_p += [input_p]
reps_target, target_sizes = reps_context_sizes_pairs_t[m]
reps_target = pad_sequence(reps_target, target_sizes)
reps_target = torch.sum(reps_target, dim=1)
reps_target = reps_target.view(-1, self.nc_context, self.z_height, self.z_width)
input_q = torch.cat([reps_target, reps_context], dim=1)
inputs_q += [input_q]
else:
input_q = inputs_q[m]
# forward posterior
means_q, logvars_q, hiddens_q, states_q = self.rnn_qs[m](input_q, states_q, hiddens_p)
# append to list
means_qs += [means_q]
logvars_qs += [logvars_q]
#hiddens_qs[m] = hiddens_q
states_qs[m] = states_q
# update flag
init_input_q = True
init_input_p = True
# get experts
# means_qs : a list num_multimodalities x num_layers x (batch, zc, zh, zw)
# -> num_layers x (batch, num_mod_data, zc, zh, zw)
means_q = [torch.cat([means_qs[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
logvars_q = [torch.cat([logvars_qs[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
for j in range(self.num_layers):
_means_q, _logvars_q = self.experts(means_q[j], logvars_q[j])
means_q[j] = _means_q
logvars_q[j] = _logvars_q
# sample z from posterior
zs = self.rnn_qs[0].sample(means_q, logvars_q)
means_ps, logvars_ps = [], []
for m in range(self.num_multimodalities):
# unpack states_p, hiddens_p
states_p = states_ps[m]
# aggregate observations (prior)
if not init_input_p:
## unpack input and combined contexts
#reps_context, context_sizes = reps_context_sizes_pairs_c[m]
#reps_context = pad_sequence(reps_context, context_sizes)
#reps_context = torch.sum(reps_context, dim=1)
#reps_context = reps_context.view(-1, self.nc_context, self.z_height, self.z_width)
#input_p = reps_context
#inputs_p += [input_p]
raise ValueError
else:
input_p = inputs_p[m]
# forward prior
_, means_p, logvars_p, hiddens_p, states_p = self.rnn_ps[m](input_p, states_p, latents_q=zs)
# append to list
means_ps += [means_p]
logvars_ps += [logvars_p]
hiddens_ps[m] = hiddens_p
states_ps[m] = states_p
## update flag
#init_input_p = True
# get experts
# means_ps : a list num_multimodalities x num_layers x (batch, zc, zh, zw)
# -> num_layers x (batch, num_mod_data, zc, zh, zw)
means_p = [torch.cat([means_ps[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
logvars_p = [torch.cat([logvars_ps[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
for j in range(self.num_layers):
_means_p, _logvars_p = self.experts(means_p[j], logvars_p[j])
means_p[j] = _means_p
logvars_p[j] = _logvars_p
# append z to latent
latents += [torch.cat(zs, dim=1).unsqueeze(1)] if len(zs) > 1 else [zs[0].unsqueeze(1)]
# update accumulated KL
for j in range(self.num_layers):
loss_kl += loss_kld_gaussian_vs_gaussian(means_q[j], logvars_q[j], means_p[j], logvars_p[j])
logprob_kl += logprob_gaussian(means_p[j],#.view(num_episodes, -1),
logvars_p[j],#.view(num_episodes, -1),
zs[j],#.view(num_episodes, -1),
do_sum=False)
logprob_kl += -logprob_gaussian(means_q[j],#.view(num_episodes, -1),
logvars_q[j],#.view(num_episodes, -1),
zs[j],#.view(num_episodes, -1),
do_sum=False)
''' likelihood '''
info = {}
info['logprob_mod_likelihoods'] = []
logprob_likelihood = 0
info['mod_likelihoods'] = []
loss_likelihood = 0
mean_recons = []
for idx, (dim, input_tuple) in enumerate(zip(self.dims, input_tuples)):
channels, height, width, _, mtype = dim
mod_target, mod_queries, mod_target_indices, mod_batch_sizes = input_tuple
if len(mod_queries) > 0:# is not None:
num_mod_data = len(mod_target)
assert sum(mod_batch_sizes) == num_mod_data
# run renderer (likelihood)
mod_mean_recon = self._forward_renderer(idx, mod_queries, latents, num_episodes, mod_batch_sizes, mod_target_indices).detach()
# convert to gray scale
if mtype == 'image' and is_grayscale:
mod_mean_recon = rgb2gray(mod_mean_recon)
mod_target = rgb2gray(mod_target)
if not use_uint8:
mod_mean_recon = mod_mean_recon/255
mod_target = mod_target/255
elif mtype == 'image' and use_uint8:
mod_mean_recon = 255*mod_mean_recon
mod_target = 255*mod_target
# estimate recon loss
loss_mod_likelihood = loss_recon_gaussian_w_fixed_var(mod_mean_recon, mod_target, std=std, add_logvar=False).detach()
logprob_mod_likelihood = logprob_gaussian_w_fixed_var(
mod_mean_recon, #.view(num_episodes, -1),
mod_target, #.view(num_episodes, -1),
std=std,
do_sum=False).detach()
# estimate recon loss without std
loss_mod_likelihood_nostd = loss_recon_gaussian_w_fixed_var(mod_mean_recon.detach(), mod_target).detach()
#logprob_mod_likelihood_nostd = logprob_gaussian_w_fixed_var(
# mod_mean_recon.detach(), #.view(num_episodes, -1),
# mod_target, #.view(num_episodes, -1),
# do_sum=False).detach()
# sum per episode
logprob_mod_likelihood = sum_tensor_per_episode(
logprob_mod_likelihood,
mod_batch_sizes,
mod_target_indices,
num_episodes)
else:
mod_mean_recon = reps_context.new_zeros(0, channels, height, width)
loss_mod_likelihood = None
loss_mod_likelihood_nostd = None
logprob_mod_likelihood = None
# add to loss_likelihood
if loss_mod_likelihood is not None:
loss_likelihood += loss_mod_likelihood
if logprob_mod_likelihood is not None:
logprob_likelihood += logprob_mod_likelihood
# append to list
mean_recons += [mod_mean_recon]
info['mod_likelihoods'] += [loss_mod_likelihood_nostd]
info['logprob_mod_likelihoods'] += [logprob_mod_likelihood]
''' loss '''
# sum loss
loss = loss_likelihood + beta * loss_kl
logprob = logprob_likelihood + logprob_kl
# additional loss info
info['likelihood'] = loss_likelihood.detach()
info['kl'] = loss_kl.detach()
# return
#return img_mean_recon, hpt_mean_recon, None, loss, info
#return mean_recons, latents, loss, info
return mean_recons, latents, logprob, info
def infogain(self,
reps_context_sizes_pairs_c,
reps_context_sizes_pairs_t,
input_tuples,
num_steps=None, beta=1.0, std=1.0):
# init
num_episodes = len(reps_context_sizes_pairs_c[0][0])
#assert len(set([index for _, _, mod_target_indices, _ in input_tuples for index in mod_target_indices])) == num_episodes
loss_kl = 0
''' forward posterior / prior '''
# init states
states_ps, states_qs, hiddens_ps, hiddens_qs = [], [], [], []
for m in range(self.num_multimodalities):
states_ps += [self.rnn_ps[m].init_state(num_episodes, [self.z_height, self.z_width])]
states_qs += [self.rnn_qs[m].init_state(num_episodes, [self.z_height, self.z_width])]
hiddens_ps += [[state_p[0] for state_p in states_ps[m]]]
#hiddens_qs += [[state_q[0] for state_q in states_qs[m]]]
latents = []
inputs_q = []
inputs_p = []
init_input_q = False
init_input_p = False
for i in range(num_steps if num_steps is not None else self.num_steps):
means_qs, logvars_qs = [], []
for m in range(self.num_multimodalities):
# unpack states_q, hiddens_q
states_q = states_qs[m]
#hiddens_q = hiddens_qs[m]
hiddens_p = hiddens_ps[m]
# aggregate observations (posterior)
if not init_input_q:
# unpack input and combined contexts
reps_context, context_sizes = reps_context_sizes_pairs_c[m]
reps_context = pad_sequence(reps_context, context_sizes)
reps_context = torch.sum(reps_context, dim=1)
reps_context = reps_context.view(-1, self.nc_context, self.z_height, self.z_width)
input_p = reps_context
inputs_p += [input_p]
reps_target, target_sizes = reps_context_sizes_pairs_t[m]
reps_target = pad_sequence(reps_target, target_sizes)
reps_target = torch.sum(reps_target, dim=1)
reps_target = reps_target.view(-1, self.nc_context, self.z_height, self.z_width)
input_q = torch.cat([reps_target, reps_context], dim=1)
inputs_q += [input_q]
else:
input_q = inputs_q[m]
# forward posterior
means_q, logvars_q, hiddens_q, states_q = self.rnn_qs[m](input_q, states_q, hiddens_p)
# append to list
means_qs += [means_q]
logvars_qs += [logvars_q]
#hiddens_qs[m] = hiddens_q
states_qs[m] = states_q
# update flag
init_input_q = True
init_input_p = True
# get experts
# means_qs : a list num_multimodalities x num_layers x (batch, zc, zh, zw)
# -> num_layers x (batch, num_mod_data, zc, zh, zw)
means_q = [torch.cat([means_qs[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
logvars_q = [torch.cat([logvars_qs[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
for j in range(self.num_layers):
_means_q, _logvars_q = self.experts(means_q[j], logvars_q[j])
means_q[j] = _means_q
logvars_q[j] = _logvars_q
# sample z from posterior
zs = self.rnn_qs[0].sample(means_q, logvars_q)
means_ps, logvars_ps = [], []
for m in range(self.num_multimodalities):
# unpack states_p, hiddens_p
states_p = states_ps[m]
# aggregate observations (prior)
if not init_input_p:
## unpack input and combined contexts
#reps_context, context_sizes = reps_context_sizes_pairs_c[m]
#reps_context = pad_sequence(reps_context, context_sizes)
#reps_context = torch.sum(reps_context, dim=1)
#reps_context = reps_context.view(-1, self.nc_context, self.z_height, self.z_width)
#input_p = reps_context
#inputs_p += [input_p]
raise ValueError
else:
input_p = inputs_p[m]
# forward prior
_, means_p, logvars_p, hiddens_p, states_p = self.rnn_ps[m](input_p, states_p, latents_q=zs)
# append to list
means_ps += [means_p]
logvars_ps += [logvars_p]
hiddens_ps[m] = hiddens_p
states_ps[m] = states_p
## update flag
#init_input_p = True
# get experts
# means_ps : a list num_multimodalities x num_layers x (batch, zc, zh, zw)
# -> num_layers x (batch, num_mod_data, zc, zh, zw)
means_p = [torch.cat([means_ps[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
logvars_p = [torch.cat([logvars_ps[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
for j in range(self.num_layers):
_means_p, _logvars_p = self.experts(means_p[j], logvars_p[j])
means_p[j] = _means_p
logvars_p[j] = _logvars_p
# append z to latent
latents += [torch.cat(zs, dim=1).unsqueeze(1)] if len(zs) > 1 else [zs[0].unsqueeze(1)]
# update accumulated KL
for j in range(self.num_layers):
#loss_kl += loss_kld_gaussian_vs_gaussian(means_q[j], logvars_q[j], means_p[j], logvars_p[j], do_sum=False)
loss_kl += logprob_gaussian(means_q[j],#.view(num_episodes, -1),
logvars_q[j],#.view(num_episodes, -1),
zs[j],#.view(num_episodes, -1),
do_sum=False)
loss_kl += -logprob_gaussian(means_p[j],#.view(num_episodes, -1),
logvars_p[j],#.view(num_episodes, -1),
zs[j],#.view(num_episodes, -1),
do_sum=False)
''' loss '''
# additional loss info
info = {}
info['kl'] = loss_kl.detach()
# return
#return img_mean_recon, hpt_mean_recon, None, loss, info
return None, latents, loss_kl.detach(), info
def infer(self,
reps_context_sizes_pairs_c,
num_steps=None,
):
# init
num_episodes = len(reps_context_sizes_pairs_c[0][0])
# init states
#states_ps, states_qs, hiddens_ps, hiddens_qs = [], [], [], []
states_ps, states_qs, hiddens_qs = [], [], []
for m in range(self.num_multimodalities):
states_ps += [self.rnn_ps[m].init_state(num_episodes, [self.z_height, self.z_width])]
#hiddens_ps += [[state_p[0] for state_p in states_ps[m]]]
latents = []
inputs_p = []
init_input_p = False
for i in range(num_steps if num_steps is not None else self.num_steps):
means_ps, logvars_ps = [], []
for m in range(self.num_multimodalities):
# unpack states_p, hiddens_p
states_p = states_ps[m]
# forward prior (prob)
means_p, logvars_p = self.rnn_ps[m].forward_prob(states_p)
# append to list
means_ps += [means_p]
logvars_ps += [logvars_p]
# get experts
# means_ps : a list num_multimodalities x num_layers x (batch, zc, zh, zw)
# -> num_layers x (batch, num_mod_data, zc, zh, zw)
means_p = [torch.cat([means_ps[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
logvars_p = [torch.cat([logvars_ps[m][j].unsqueeze(0) for m in range(self.num_multimodalities)], dim=0) for j in range(self.num_layers)]
for j in range(self.num_layers):
_means_p, _logvars_p = self.experts(means_p[j], logvars_p[j])
means_p[j] = _means_p
logvars_p[j] = _logvars_p
# sample z from prior
zs = self.rnn_ps[0].sample(means_p, logvars_p)
# forward rnn
for m in range(self.num_multimodalities):
# unpack states_p, hiddens_p
states_p = states_ps[m]
# aggregate observations (prior)
if not init_input_p:
# unpack input and combined contexts
reps_context, context_sizes = reps_context_sizes_pairs_c[m]
reps_context = pad_sequence(reps_context, context_sizes)
reps_context = torch.sum(reps_context, dim=1)
reps_context = reps_context.view(-1, self.nc_context, self.z_height, self.z_width)
input_p = reps_context
inputs_p += [input_p]
else:
input_p = inputs_p[m]
# forward prior (rnn)
hiddens_p, states_p = self.rnn_ps[m].forward_rnn(input_p, states_p, zs)
# append to list
#hiddens_ps[m] = hiddens_p
states_ps[m] = states_p
# update flag
init_input_p = True
# append z to latent
latents += [torch.cat(zs, dim=1).unsqueeze(1)] if len(zs) > 1 else [zs[0].unsqueeze(1)]
# return
return latents
def _forward_renderer(self, mod_idx, mod_queries, latents, num_episodes, mod_batch_sizes, mod_target_indices=[]):
''' forward image renderer '''
# embed query
emb_mod_queries = [self.query_encoders[mod_idx](mod_query) for mod_query in mod_queries]
# concat z
z = torch.cat(latents, dim=1)
# run renderer (likelihood)
#mod_mean_recon, mod_logvar_recon = self.renderers[0](z, emb_mod_queries, mod_batch_sizes)
mean_recon = self.renderers[mod_idx](z, emb_mod_queries, mod_batch_sizes, mod_target_indices, num_episodes)
return mean_recon#, logvar_recon
class CGQN(nn.Module):
def __init__(self,
dims=[(3, 64, 64, 7, 'image'),
(132, 1, 1, 7, 'haptic')],
#im_height, # image height
#im_channels, # number of channels in image
#nc_img_query=7, # kernel size (number of channels) for query
#hp_height=1, # haptic height
#hp_channels=132, # number of channels in haptic
#nc_hpt_query=7, #nc_hpt_query=20, # kernel size (number of channels) for query
nc_enc=32, # kernel size (number of channels) for encoder
nc_lstm=64, # kernel size (number of channels) for lstm
nc_context=256, # kernel size (number of channels) for representation
nz=3, # size of latent variable
num_steps=4, # number of steps in Draw
num_layers=1, # number of StackedConvDrawEncoderCell/StackedConvDrawDecoderCell layers
):
super().__init__()
# check conditions
assert len(dims) > 0
for dim in dims:
assert len(dim) == 5, dim
channels, height, width, nc_query, mtype = dim
#assert height == width
assert mtype in ['image', 'haptic']
# init
self.dims = dims
self.num_multimodalities = len(dims)
## temporary
#assert len(dims) == 2
#im_height, im_channels, nc_img_query, mtype = dims[0]
#hp_height, hp_channels, nc_hpt_query, mtype = dims[1]
# define networks
self.convdraw = CGQNConvDraw(
dims,
#im_height, im_channels, nc_img_query,
#hp_height, hp_channels, nc_hpt_query,
nc_enc, nc_lstm, nc_context,
nz,
num_steps,
num_layers,
)
repnets = []
for dim in dims:
channels, height, width, nc_query, mtype = dim
if mtype == 'image':
repnets += [ImageContextNetwork(
nheight=height,
nwidth=width,
nchannels=channels,
nz=self.convdraw.nc_context,
train_init_representation=True,
)]
elif mtype == 'haptic':
repnets += [HapticContextNetwork(
nheight=height,
nchannels=channels,
nhidden = 512, #1024,
nz=self.convdraw.nc_context*self.convdraw.z_height*self.convdraw.z_width,
train_init_representation=True,
)]
else:
raise NotImplementedError
self.repnets = nn.ModuleList(repnets)
#def forward(self, contexts, targets, num_steps=None, beta=1.0, std=1.0):
def _forward(self, contexts, targets, merge_cxt_tgt=False):
'''
Input:
contexts: a list, whose element is context
where context = (image, camera)
targets: a list, whose element is target
where target = (image, camera)
Output:
representations = batch_size x num_channels x num_height x num_width
'''
# init
assert len(contexts[0]) == 2*self.num_multimodalities
''' run repnets (contexts) '''
# run repnets (contexts)
reps_context_sizes_pairs_c = []
for i in range(self.num_multimodalities):
# split contexts and targets
mod_contexts = [(data_query_pairs[i*2], data_query_pairs[i*2+1]) for data_query_pairs in contexts] # (data, query) pair
# get context representation
mod_reps_c, _, mod_context_sizes_c = self.repnets[i](mod_contexts) # context of i-th modularity
mod_reps_c = [mod_rep_c.view(-1, self.convdraw.nc_context, self.convdraw.z_height, self.convdraw.z_width) for mod_rep_c in mod_reps_c]
# append to list
reps_context_sizes_pairs_c += [(mod_reps_c, mod_context_sizes_c)]
## combine reps
#reps_c, context_sizes_c = combine_reps(reps_context_sizes_pairs_c)
''' run repnets (targets) '''
# run repnets (targets)
if merge_cxt_tgt:
contexts_and_targets = merge_two_batch(contexts, targets)
else:
contexts_and_targets = targets #merge_two_batch(contexts, targets)
reps_context_sizes_pairs_t = []
for i in range(self.num_multimodalities):
# split contexts and targets
mod_targets = [(data_query_pairs[i*2], data_query_pairs[i*2+1]) for data_query_pairs in contexts_and_targets]
# get context representation
mod_reps_t, _, mod_context_sizes_t = self.repnets[i](mod_targets) # context of i-th modularity
mod_reps_t = [mod_rep_t.view(-1, self.convdraw.nc_context, self.convdraw.z_height, self.convdraw.z_width) for mod_rep_t in mod_reps_t]
# append to list
reps_context_sizes_pairs_t += [(mod_reps_t, mod_context_sizes_t)]
## combine reps
#reps_t, context_sizes_t = combine_reps(reps_context_sizes_pairs_t)
''' pre-processing targets '''
# pre-processing targets
data_queries_target_indices_batch_sizes = []
for i in range(self.num_multimodalities):
mod_data, mod_queries, mod_target_indices = [], [], []
for idx, target in enumerate(targets):
# unpack
mod_datum, mod_query = target[i*2], target[i*2+1]
#assert daum is not None or haptic is not None, 'empty target'
# add targets
mod_data += [mod_datum] if mod_datum is not None else []
mod_queries += [mod_query] if mod_datum is not None else []
mod_target_indices += [idx] if mod_datum is not None else []
assert len(mod_data) == len(mod_queries)
# concatenate
mod_data = torch.cat(mod_data, dim=0) if len(mod_data) > 0 else None
# get batch_sizes
mod_batch_sizes = [targets[idx][i*2].size(0) for idx in mod_target_indices]
# append to list
data_queries_target_indices_batch_sizes += [(mod_data, mod_queries, mod_target_indices, mod_batch_sizes)]
return (reps_context_sizes_pairs_c,
reps_context_sizes_pairs_t,
data_queries_target_indices_batch_sizes)
def forward(self, contexts, targets, num_steps=None, beta=1.0, std=1.0, is_grayscale=False):
(reps_context_sizes_pairs_c,
reps_context_sizes_pairs_t,
data_queries_target_indices_batch_sizes) = self._forward(contexts, targets)
# run conv-draw
(mean_recons,
latent,
loss,
info) = self.convdraw(
reps_context_sizes_pairs_c,
reps_context_sizes_pairs_t,
data_queries_target_indices_batch_sizes,
num_steps=num_steps, beta=beta, std=std, is_grayscale=is_grayscale)
return mean_recons, latent, loss, info
def predict(self, contexts, targets, num_steps=None, beta=1.0, std=1.0, is_grayscale=False, use_uint8=True):
(reps_context_sizes_pairs_c,
reps_context_sizes_pairs_t,
data_queries_target_indices_batch_sizes) = self._forward(contexts, targets)
# run conv-draw
(mean_recons,
latent,
loss,
info) = self.convdraw.predict(
reps_context_sizes_pairs_c,
reps_context_sizes_pairs_t,
data_queries_target_indices_batch_sizes,
num_steps=num_steps, beta=beta, std=std, is_grayscale=is_grayscale, use_uint8=use_uint8)
return mean_recons, latent, loss, info
def infogain(self, contexts, targets, num_steps=None, beta=1.0, std=1.0):
(reps_context_sizes_pairs_c,
reps_context_sizes_pairs_t,
data_queries_target_indices_batch_sizes) = self._forward(contexts, targets)
# run conv-draw
(mean_recons,
latent,
loss,
info) = self.convdraw.infogain(
reps_context_sizes_pairs_c,
reps_context_sizes_pairs_t,
data_queries_target_indices_batch_sizes,
num_steps=num_steps, beta=beta, std=std)
return mean_recons, latent, loss, info
def _generate(self, contexts):
'''
Input:
contexts: a list, whose element is context
where context = (image, camera)
targets: a list, whose element is target
where target = (image, camera)
Output:
representations = batch_size x num_channels x num_height x num_width
'''
# init
assert len(contexts[0]) == 2*self.num_multimodalities
''' run repnets (contexts) '''
# run repnets (contexts)
reps_context_sizes_pairs_c = []
for i in range(self.num_multimodalities):
# split contexts and targets
mod_contexts = [(data_query_pairs[i*2], data_query_pairs[i*2+1]) for data_query_pairs in contexts] # (data, query) pair
# get context representation
mod_reps_c, _, mod_context_sizes_c = self.repnets[i](mod_contexts) # context of i-th modularity
mod_reps_c = [mod_rep_c.view(-1, self.convdraw.nc_context, self.convdraw.z_height, self.convdraw.z_width) for mod_rep_c in mod_reps_c]
# append to list
reps_context_sizes_pairs_c += [(mod_reps_c, mod_context_sizes_c)]
## combine reps
#reps_c, context_sizes_c = combine_reps(reps_context_sizes_pairs_c)
return reps_context_sizes_pairs_c
def generate(self, contexts, queries, num_steps=None, is_grayscale=False):
reps_context_sizes_pairs_c = self._generate(contexts)
''' pre-processing targets '''
# pre-processing img targets
queries_batch_sizes = []
for i in range(self.num_multimodalities):
mod_queries = queries[i]
# pre-processing img targets
if mod_queries is not None:
mod_batch_sizes = [mod_query.size(0) for mod_query in mod_queries]
else:
mod_batch_sizes = []
mod_queries = [] #None
# append to list
queries_batch_sizes += [(mod_queries, mod_batch_sizes)]
# run conv-draw
#img_mean_recon, hpt_mean_recon, latent = self.convdraw.generate(
mean_recons, latents = self.convdraw.generate(
reps_context_sizes_pairs_c,
#reps_c, context_sizes_c,
queries_batch_sizes,
#img_queries=img_queries, img_batch_sizes=img_batch_sizes,
#hpt_queries=hpt_queries, hpt_batch_sizes=hpt_batch_sizes,
num_steps=num_steps,
is_grayscale=is_grayscale)
#return img_mean_recon, hpt_mean_recon, latent
return mean_recons, latents
def infer(self, contexts, num_steps=None):
reps_context_sizes_pairs_c = self._generate(contexts)
# run conv-draw
latents = self.convdraw.infer(
reps_context_sizes_pairs_c,
num_steps=num_steps)
return latents
##############
class CGQN_v1(CGQN):
def __init__(self,
dims,
nc_enc=32, # kernel size (number of channels) for encoder
nc_lstm=32, # kernel size (number of channels) for lstm
nc_context=256, # kernel size (number of channels) for representation
nz=4, # size of latent variable
num_steps=12, # number of steps in Draw
num_layers=1,
):
super().__init__(
dims=dims,
nc_enc=nc_enc,
nc_lstm=nc_lstm,
nc_context=nc_context,
nz=nz,
num_steps=num_steps,
num_layers=num_layers,
)
class CGQN_v2(CGQN):
def __init__(self,
dims,
nc_enc=32, # kernel size (number of channels) for encoder
nc_lstm=64, # kernel size (number of channels) for lstm
nc_context=256, # kernel size (number of channels) for representation
nz=3, # size of latent variable
num_steps=4, # number of steps in Draw
num_layers=1,
):
super().__init__(
dims=dims,
nc_enc=nc_enc,
nc_lstm=nc_lstm,
nc_context=nc_context,
nz=nz,
num_steps=num_steps,
num_layers=num_layers,
)
class CGQN_v3(CGQN):
def __init__(self,
dims,
nc_enc=32, # kernel size (number of channels) for encoder
nc_lstm=64, # kernel size (number of channels) for lstm
nc_context=256, # kernel size (number of channels) for representation
nz=3, # size of latent variable
num_steps=8, # number of steps in Draw
num_layers=1,
):
super().__init__(
dims=dims,
nc_enc=nc_enc,
nc_lstm=nc_lstm,
nc_context=nc_context,
nz=nz,
num_steps=num_steps,
num_layers=num_layers,
)
class CGQN_v4(CGQN):
def __init__(self,
dims,
nc_enc=32, # kernel size (number of channels) for encoder
nc_lstm=64, # kernel size (number of channels) for lstm
nc_context=256, # kernel size (number of channels) for representation
nz=3, # size of latent variable
num_steps=12, # number of steps in Draw
num_layers=1,
):
super().__init__(
dims=dims,
nc_enc=nc_enc,
nc_lstm=nc_lstm,
nc_context=nc_context,
nz=nz,
num_steps=num_steps,
num_layers=num_layers,
)
| 43.446693 | 148 | 0.557613 |
c5ad6c220b9e43e11e12da7c93d7df4ff76c449d | 5,091 | py | Python | labscript_devices/PrawnBlaster/runviewer_parsers.py | philipstarkey/labscript-devices | 141a299024daaf9c9580ab4852e2901ecb082ea7 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | labscript_devices/PrawnBlaster/runviewer_parsers.py | philipstarkey/labscript-devices | 141a299024daaf9c9580ab4852e2901ecb082ea7 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | labscript_devices/PrawnBlaster/runviewer_parsers.py | philipstarkey/labscript-devices | 141a299024daaf9c9580ab4852e2901ecb082ea7 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | #####################################################################
# #
# /labscript_devices/PrawnBlaster/runviewer_parsers.py #
# #
# Copyright 2021, Philip Starkey #
# #
# This file is part of labscript_devices, in the labscript suite #
# (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
import labscript_utils.h5_lock # noqa: F401
import h5py
import numpy as np
import labscript_utils.properties as properties
class PrawnBlasterParser(object):
"""Runviewer parser for the PrawnBlaster Pseudoclocks."""
def __init__(self, path, device):
"""
Args:
path (str): path to h5 shot file
device (str): labscript name of PrawnBlaster device
"""
self.path = path
self.name = device.name
self.device = device
def get_traces(self, add_trace, clock=None):
"""Reads the shot file and extracts hardware instructions to produce
runviewer traces.
Args:
add_trace (func): function handle that adds traces to runviewer
clock (tuple, optional): clock times from timing device, if not
the primary pseudoclock
Returns:
dict: Dictionary of clocklines and triggers derived from instructions
"""
if clock is not None:
times, clock_value = clock[0], clock[1]
clock_indices = np.where((clock_value[1:] - clock_value[:-1]) == 1)[0] + 1
# If initial clock value is 1, then this counts as a rising edge
# (clock should be 0 before experiment) but this is not picked up
# by the above code. So we insert it!
if clock_value[0] == 1:
clock_indices = np.insert(clock_indices, 0, 0)
clock_ticks = times[clock_indices]
# get the pulse program
pulse_programs = []
with h5py.File(self.path, "r") as f:
# Get the device properties
device_props = properties.get(f, self.name, "device_properties")
conn_props = properties.get(f, self.name, "connection_table_properties")
self.clock_resolution = device_props["clock_resolution"]
self.trigger_delay = device_props["trigger_delay"]
self.wait_delay = device_props["wait_delay"]
# Extract the pulse programs
num_pseudoclocks = conn_props["num_pseudoclocks"]
for i in range(num_pseudoclocks):
pulse_programs.append(f[f"devices/{self.name}/PULSE_PROGRAM_{i}"][:])
# Generate clocklines and triggers
clocklines_and_triggers = {}
for pulse_program in pulse_programs:
time = []
states = []
trigger_index = 0
t = 0 if clock is None else clock_ticks[trigger_index] + self.trigger_delay
trigger_index += 1
clock_factor = self.clock_resolution / 2.0
last_instruction_was_wait = False
for row in pulse_program:
if row["reps"] == 0 and not last_instruction_was_wait: # WAIT
last_instruction_was_wait = True
if clock is not None:
t = clock_ticks[trigger_index] + self.trigger_delay
trigger_index += 1
else:
t += self.wait_delay
elif last_instruction_was_wait:
# two waits in a row means an indefinite wait, so we just skip this
# instruction.
last_instruction_was_wait = False
continue
else:
last_instruction_was_wait = False
for i in range(row["reps"]):
for j in range(1, -1, -1):
time.append(t)
states.append(j)
t += row["half_period"] * clock_factor
clock = (np.array(time), np.array(states))
for pseudoclock_name, pseudoclock in self.device.child_list.items():
for clock_line_name, clock_line in pseudoclock.child_list.items():
# Ignore the dummy internal wait monitor clockline
if clock_line.parent_port.startswith("GPIO"):
clocklines_and_triggers[clock_line_name] = clock
add_trace(
clock_line_name, clock, self.name, clock_line.parent_port
)
return clocklines_and_triggers
| 43.512821 | 87 | 0.52033 |
9657282962c87ac2cd87ae4ae0dd010d76c84427 | 1,800 | py | Python | WCS/WCS_test-USACE.py | petercunning/notebook | 5b26f2dc96bcb36434542b397de6ca5fa3b61a0a | [
"MIT"
] | 32 | 2015-01-07T01:48:05.000Z | 2022-03-02T07:07:42.000Z | WCS/WCS_test-USACE.py | petercunning/notebook | 5b26f2dc96bcb36434542b397de6ca5fa3b61a0a | [
"MIT"
] | 1 | 2015-04-13T21:00:18.000Z | 2015-04-13T21:00:18.000Z | WCS/WCS_test-USACE.py | petercunning/notebook | 5b26f2dc96bcb36434542b397de6ca5fa3b61a0a | [
"MIT"
] | 30 | 2015-01-28T09:31:29.000Z | 2022-03-07T03:08:28.000Z | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Extract data from USACE WCS Service
# <codecell>
import matplotlib.pyplot as plt
from owslib.wcs import WebCoverageService
%matplotlib inline
# <codecell>
endpoint='http://gis.sam.usace.army.mil/server/services/JALBTCX/NCMP_BareEarth_1m/ImageServer/WCSServer?request=GetCapabilities&service=WCS'
# <codecell>
wcs = WebCoverageService(endpoint,version='1.0.0',timeout=60)
# <codecell>
for k,v in wcs.contents.iteritems():
print v.title
# <codecell>
wcs.contents
# <codecell>
lidar = wcs['1']
print lidar.title
print lidar.boundingBoxWGS84
print lidar.timelimits
print lidar.supportedFormats
# <codecell>
# try Plum Island Sound Region
bbox = (-70.825,42.701,-70.7526,42.762)
output = wcs.getCoverage(identifier="1",bbox=bbox,crs='EPSG:4326',format='GeoTIFF',
resx=0.0001, resy=0.0001)
# <codecell>
f=open('test.tif','wb')
f.write(output.read())
f.close()
# <codecell>
from osgeo import gdal
gdal.UseExceptions()
# <codecell>
ds = gdal.Open('test.tif')
# <codecell>
band = ds.GetRasterBand(1)
elevation = band.ReadAsArray()
nrows, ncols = elevation.shape
# I'm making the assumption that the image isn't rotated/skewed/etc.
# This is not the correct method in general, but let's ignore that for now
# If dxdy or dydx aren't 0, then this will be incorrect
x0, dx, dxdy, y0, dydx, dy = ds.GetGeoTransform()
if dxdy == 0.0:
x1 = x0 + dx * ncols
y1 = y0 + dy * nrows
# <codecell>
import cartopy.crs as ccrs
# <codecell>
print x0,x1,y1,y0
# <codecell>
plt.figure(figsize=(8,8))
ax = plt.axes(projection=ccrs.PlateCarree())
plt.imshow(elevation, cmap='jet', extent=[x0, x1, y1, y0],transform=ccrs.PlateCarree());
ax.gridlines(draw_labels=True);
# <codecell>
| 19.148936 | 140 | 0.702778 |
1a02ccb1d682654f9baa4630ec1b69658be490a7 | 20,570 | py | Python | example/psychopy_integration/audio_experiment_lastrun.py | m2march/runAudioExperiment | 642df2da9b9dea743278f89149176893a83929b9 | [
"MIT"
] | null | null | null | example/psychopy_integration/audio_experiment_lastrun.py | m2march/runAudioExperiment | 642df2da9b9dea743278f89149176893a83929b9 | [
"MIT"
] | null | null | null | example/psychopy_integration/audio_experiment_lastrun.py | m2march/runAudioExperiment | 642df2da9b9dea743278f89149176893a83929b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v2021.1.1),
on Sun 28 Feb 2021 04:15:36 PM -03
If you publish work using this script the most relevant publication is:
Peirce J, Gray JR, Simpson S, MacAskill M, Höchenberger R, Sogo H, Kastman E, Lindeløv JK. (2019)
PsychoPy2: Experiments in behavior made easy Behav Res 51: 195.
https://doi.org/10.3758/s13428-018-01193-y
"""
from __future__ import absolute_import, division
from psychopy import locale_setup
from psychopy import prefs
from psychopy import sound, gui, visual, core, data, event, logging, clock, colors
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle, choice as randchoice
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '2021.1.1'
expName = 'audio_experiment' # from the Builder filename that created this script
expInfo = {'participant': '', 'session': '001'}
dlg = gui.DlgFromDict(dictionary=expInfo, sortKeys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='/home/march/science/music/code/lib/runAudioExperiment/example/psychopy_integration/audio_experiment_lastrun.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
frameTolerance = 0.001 # how close to onset before 'same' frame
# Start Code - component code to be run after the window creation
# Setup the Window
win = visual.Window(
size=(1024, 768), fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "intro"
introClock = core.Clock()
text = visual.TextStim(win=win, name='text',
text='Audio recording experiment.',
font='Open Sans',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0.0,
color='white', colorSpace='rgb', opacity=None,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "trial"
trialClock = core.Clock()
# Required libraries
import sounddevice
import pydub
from scipy.io import wavfile
import m2.runAudioExperiment.experiment_config as ec
# Configuration
## Sound device name as string partial patch of names provided by:
## `$> runAudioExperiment -l`
sound_device = "USB Audio"
# Device name is used to find device id
device_info, device_id = ec.find_sound_device(sound_device)
# Selected device information is printed
print("Sound device selected info:", device_info)
# Selected device is configured in `sounddevice`
sounddevice.default.device = device_id
# Default stimuli, overriden by the loop
stimuli = "training_1.wav"
# Creating output folders (recordings/{participant}/)
if not os.path.isdir('recordings'):
os.mkdir('recordings')
participant = expInfo['participant']
r2 = os.path.join('recordings', participant)
if not os.path.isdir(r2):
os.mkdir(r2)
# Function to create recordings filenames.
def recording_path(participant, stimuli):
basename, exp = os.path.splitext(os.path.basename(stimuli))
return os.path.join('recordings', participant, '.'.join([basename, 'rec'+exp]))
# Initialize components for Routine "trial_break"
trial_breakClock = core.Clock()
text_2 = visual.TextStim(win=win, name='text_2',
text='Break',
font='Open Sans',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0.0,
color='white', colorSpace='rgb', opacity=None,
languageStyle='LTR',
depth=0.0);
# Initialize components for Routine "thanks"
thanksClock = core.Clock()
text_3 = visual.TextStim(win=win, name='text_3',
text='Thanks for participating',
font='Open Sans',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0.0,
color='white', colorSpace='rgb', opacity=None,
languageStyle='LTR',
depth=0.0);
key_resp = keyboard.Keyboard()
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "intro"-------
continueRoutine = True
routineTimer.add(1.000000)
# update component parameters for each repeat
# keep track of which components have finished
introComponents = [text]
for thisComponent in introComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
introClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "intro"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = introClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=introClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text* updates
if text.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text.frameNStart = frameN # exact frame index
text.tStart = t # local t and not account for scr refresh
text.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text, 'tStartRefresh') # time at next scr refresh
text.setAutoDraw(True)
if text.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
text.tStop = t # not accounting for scr refresh
text.frameNStop = frameN # exact frame index
win.timeOnFlip(text, 'tStopRefresh') # time at next scr refresh
text.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in introComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "intro"-------
for thisComponent in introComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text.started', text.tStartRefresh)
thisExp.addData('text.stopped', text.tStopRefresh)
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=1.0, method='random',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('stims/stims.csv', selection='0'),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "trial"-------
continueRoutine = True
# update component parameters for each repeat
# from loop config: `stimuli`
# as path to audio to be played
# clear screen
win.flip()
# audio is opened
seg = pydub.AudioSegment.from_file(stimuli)
data = np.array(seg.get_array_of_samples()).reshape((-1, seg.channels))
# Audio is played while recording the input device
# This is a blocking call, meaning that any other component
# configured in the routine will execute after the audio is played
rec_data = sounddevice.playrec(data,
samplerate=seg.frame_rate,
blocking=True,
channels=2)
# keep track of which components have finished
trialComponents = []
for thisComponent in trialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
trialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "trial"-------
while continueRoutine:
# get current time
t = trialClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=trialClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# Recorded audio is saved in the participant's folder with
# extension ".rec.wav" into `recordings/{participant}`.
output_stimuli = recording_path(expInfo['participant'], stimuli)
wavfile.write(output_stimuli, seg.frame_rate, rec_data)
# the Routine "trial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# ------Prepare to start Routine "trial_break"-------
continueRoutine = True
routineTimer.add(2.000000)
# update component parameters for each repeat
# keep track of which components have finished
trial_breakComponents = [text_2]
for thisComponent in trial_breakComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
trial_breakClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "trial_break"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = trial_breakClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=trial_breakClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_2* updates
if text_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_2.frameNStart = frameN # exact frame index
text_2.tStart = t # local t and not account for scr refresh
text_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_2, 'tStartRefresh') # time at next scr refresh
text_2.setAutoDraw(True)
if text_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > text_2.tStartRefresh + 2.0-frameTolerance:
# keep track of stop time/frame for later
text_2.tStop = t # not accounting for scr refresh
text_2.frameNStop = frameN # exact frame index
win.timeOnFlip(text_2, 'tStopRefresh') # time at next scr refresh
text_2.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trial_breakComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial_break"-------
for thisComponent in trial_breakComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
trials.addData('text_2.started', text_2.tStartRefresh)
trials.addData('text_2.stopped', text_2.tStopRefresh)
thisExp.nextEntry()
# completed 1.0 repeats of 'trials'
# ------Prepare to start Routine "thanks"-------
continueRoutine = True
# update component parameters for each repeat
key_resp.keys = []
key_resp.rt = []
_key_resp_allKeys = []
# keep track of which components have finished
thanksComponents = [text_3, key_resp]
for thisComponent in thanksComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
thanksClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "thanks"-------
while continueRoutine:
# get current time
t = thanksClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=thanksClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *text_3* updates
if text_3.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
text_3.frameNStart = frameN # exact frame index
text_3.tStart = t # local t and not account for scr refresh
text_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(text_3, 'tStartRefresh') # time at next scr refresh
text_3.setAutoDraw(True)
# *key_resp* updates
waitOnFlip = False
if key_resp.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
key_resp.frameNStart = frameN # exact frame index
key_resp.tStart = t # local t and not account for scr refresh
key_resp.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(key_resp, 'tStartRefresh') # time at next scr refresh
key_resp.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(key_resp.clock.reset) # t=0 on next screen flip
win.callOnFlip(key_resp.clearEvents, eventType='keyboard') # clear events on next screen flip
if key_resp.status == STARTED and not waitOnFlip:
theseKeys = key_resp.getKeys(keyList=['y', 'n', 'left', 'right', 'space'], waitRelease=False)
_key_resp_allKeys.extend(theseKeys)
if len(_key_resp_allKeys):
key_resp.keys = _key_resp_allKeys[-1].name # just the last key pressed
key_resp.rt = _key_resp_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in thanksComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "thanks"-------
for thisComponent in thanksComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('text_3.started', text_3.tStartRefresh)
thisExp.addData('text_3.stopped', text_3.tStopRefresh)
# check responses
if key_resp.keys in ['', [], None]: # No response was made
key_resp.keys = None
thisExp.addData('key_resp.keys',key_resp.keys)
if key_resp.keys != None: # we had a response
thisExp.addData('key_resp.rt', key_resp.rt)
thisExp.addData('key_resp.started', key_resp.tStartRefresh)
thisExp.addData('key_resp.stopped', key_resp.tStopRefresh)
thisExp.nextEntry()
# the Routine "thanks" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# Flip one final time so any remaining win.callOnFlip()
# and win.timeOnFlip() tasks get executed before quitting
win.flip()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv', delim='auto')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| 41.38833 | 128 | 0.68279 |
995bf67760f86e408486071e269986ab88cacdf2 | 345 | py | Python | mars/settings.py | sddyates/mars | a56735bd344b7337151fb419b1c832b0c702ea69 | [
"MIT"
] | 1 | 2019-12-20T20:29:14.000Z | 2019-12-20T20:29:14.000Z | mars/settings.py | sddyates/mars | a56735bd344b7337151fb419b1c832b0c702ea69 | [
"MIT"
] | 3 | 2019-08-30T08:12:16.000Z | 2020-05-15T16:19:53.000Z | mars/settings.py | sddyates/mars | a56735bd344b7337151fb419b1c832b0c702ea69 | [
"MIT"
] | 1 | 2019-12-21T03:51:30.000Z | 2019-12-21T03:51:30.000Z | """
Synopsis
--------
Define default global variables and indexes for element reference.
Args
----
None.
Attributes
----------
None.
TODO
----
None.
"""
global small_pressure
global rho, prs, eng
global vx1, vx2, vx3
global mvx1, mvx2, mvx3
global u, v, w
rho = 0
prs = 1
vx1 = 2
vx2 = 3
vx3 = 4
eng = prs
mvx1 = vx1
mvx2 = vx2
mvx3 = vx3
| 9.857143 | 66 | 0.634783 |
6c95c9695a3568f822556b38fba36c446d565b72 | 249 | py | Python | tests/user_commands/management/commands/no_translations.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/user_commands/management/commands/no_translations.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/user_commands/management/commands/no_translations.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | from django.core.management.base import BaseCommand, no_translations
from django.utils import translation
class Command(BaseCommand):
@no_translations
def handle(self, *args, **options):
return translation.get_language()
| 24.9 | 69 | 0.73494 |
d7acaf4c90dfd48bbc7877d2f85c029f459350fe | 15,205 | py | Python | var/spack/repos/builtin/packages/mumps/package.py | bjoo/spack | 448ac2c68dc3d11331f7d20ab9b87d63fbabdb86 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/mumps/package.py | bjoo/spack | 448ac2c68dc3d11331f7d20ab9b87d63fbabdb86 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13 | 2021-05-12T06:16:20.000Z | 2022-03-11T18:39:32.000Z | var/spack/repos/builtin/packages/mumps/package.py | bjoo/spack | 448ac2c68dc3d11331f7d20ab9b87d63fbabdb86 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import sys
class Mumps(Package):
"""MUMPS: a MUltifrontal Massively Parallel sparse direct Solver"""
homepage = "http://mumps.enseeiht.fr"
url = "http://mumps.enseeiht.fr/MUMPS_5.3.5.tar.gz"
version('5.3.5', sha256='e5d665fdb7043043f0799ae3dbe3b37e5b200d1ab7a6f7b2a4e463fd89507fa4')
version('5.3.3', sha256='27e7749ac05006bf8e81a457c865402bb72a42bf3bc673da49de1020f0f32011')
version('5.2.0', sha256='41f2c7cb20d69599fb47e2ad6f628f3798c429f49e72e757e70722680f70853f')
version('5.1.2', sha256='eb345cda145da9aea01b851d17e54e7eef08e16bfa148100ac1f7f046cd42ae9')
version('5.1.1', sha256='a2a1f89c470f2b66e9982953cbd047d429a002fab9975400cef7190d01084a06')
version('5.0.2', sha256='77292b204942640256097a3da482c2abcd1e0d5a74ecd1d4bab0f5ef6e60fe45')
# Alternate location if main server is down.
# version('5.0.1', sha256='50355b2e67873e2239b4998a46f2bbf83f70cdad6517730ab287ae3aae9340a0',
# url='http://pkgs.fedoraproject.org/repo/pkgs/MUMPS/MUMPS_5.0.1.tar.gz/md5/b477573fdcc87babe861f62316833db0/MUMPS_5.0.1.tar.gz')
version('5.0.1', sha256='50355b2e67873e2239b4998a46f2bbf83f70cdad6517730ab287ae3aae9340a0')
variant('mpi', default=True,
description='Compile MUMPS with MPI support')
variant('scotch', default=False,
description='Activate Scotch as a possible ordering library')
variant('ptscotch', default=False,
description='Activate PT-Scotch as a possible ordering library')
variant('metis', default=False,
description='Activate Metis as a possible ordering library')
variant('parmetis', default=False,
description='Activate Parmetis as a possible ordering library')
variant('double', default=True,
description='Activate the compilation of dmumps')
variant('float', default=True,
description='Activate the compilation of smumps')
variant('complex', default=True,
description='Activate the compilation of cmumps and/or zmumps')
variant('int64', default=False,
description='Use int64_t/integer*8 as default index type')
variant('shared', default=True, description='Build shared libraries')
depends_on('scotch + esmumps', when='~ptscotch+scotch')
depends_on('scotch + esmumps ~ metis + mpi', when='+ptscotch')
depends_on('metis@5:', when='+metis')
depends_on('parmetis', when="+parmetis")
depends_on('blas')
depends_on('lapack')
depends_on('scalapack', when='+mpi')
depends_on('mpi', when='+mpi')
patch('examples.patch', when='@5.1.1%clang^spectrum-mpi')
patch('gfortran8.patch', when='@5.1.2')
# The following patches src/Makefile to fix some dependency
# issues in lib[cdsz]mumps.so
patch('mumps.src-makefile.5.2.patch', when='@5.2.0 +shared')
patch('mumps.src-makefile.5.3.patch', when='@5.3.0: +shared')
conflicts('+parmetis', when='~mpi',
msg="You cannot use the parmetis variant without mpi")
conflicts('+parmetis', when='~metis',
msg="You cannot use the parmetis variant without metis")
conflicts('+ptscotch', when='~mpi',
msg="You cannot use the ptscotch variant without mpi")
def write_makefile_inc(self):
# The makefile variables LIBBLAS, LSCOTCH, LMETIS, and SCALAP are only
# used to link the examples, so if building '+shared' there is no need
# to explicitly link with the respective libraries because we make sure
# the mumps shared libraries are already linked with them. See also the
# comment below about 'inject_libs'. This behaviour may cause problems
# if building '+shared' and the used libraries were build static
# without the PIC option.
shared = '+shared' in self.spec
lapack_blas = (self.spec['lapack'].libs + self.spec['blas'].libs)
makefile_conf = ["LIBBLAS = %s" %
lapack_blas.ld_flags if not shared else '']
orderings = ['-Dpord']
# All of the lib[cdsz]mumps.* libs depend on mumps_common
extra_libs4mumps = ['-L$(topdir)/lib', '-lmumps_common']
# and mumps_common depends on pord
extra_libs4mumps += ['-L$(topdir)/PORD/lib', '-lpord']
if '+ptscotch' in self.spec or '+scotch' in self.spec:
makefile_conf.extend([
"ISCOTCH = -I%s" % self.spec['scotch'].prefix.include,
"LSCOTCH = {0}".format(
self.spec['scotch'].libs.ld_flags if not shared else '')
])
orderings.append('-Dscotch')
if '+ptscotch' in self.spec:
orderings.append('-Dptscotch')
if '+parmetis' in self.spec:
makefile_conf.extend([
"IMETIS = -I%s" % self.spec['parmetis'].prefix.include,
("LMETIS = -L%s -l%s -L%s -l%s" % (
self.spec['parmetis'].prefix.lib, 'parmetis',
self.spec['metis'].prefix.lib, 'metis')) if not shared
else 'LMETIS ='
])
orderings.append('-Dparmetis')
elif '+metis' in self.spec:
makefile_conf.extend([
"IMETIS = -I%s" % self.spec['metis'].prefix.include,
("LMETIS = -L%s -l%s" % (
self.spec['metis'].prefix.lib, 'metis')) if not shared
else 'LMETIS ='
])
orderings.append('-Dmetis')
makefile_conf.append("ORDERINGSF = %s" % (' '.join(orderings)))
# Determine which compiler suite we are using
using_gcc = self.compiler.name == "gcc"
using_pgi = self.compiler.name == "pgi"
using_nvhpc = self.compiler.name == "nvhpc"
using_intel = self.compiler.name == "intel"
using_oneapi = self.compiler.name == "oneapi"
using_xl = self.compiler.name in ['xl', 'xl_r']
using_fj = self.compiler.name == "fj"
# The llvm compiler suite does not contain a Fortran compiler by
# default. Its possible that a Spack user may have configured
# ~/.spack/<platform>/compilers.yaml for using xlf.
using_xlf = using_xl or \
(spack_f77.endswith('xlf') or spack_f77.endswith('xlf_r'))
# when building shared libs need -fPIC, otherwise
# /usr/bin/ld: graph.o: relocation R_X86_64_32 against `.rodata.str1.1'
# can not be used when making a shared object; recompile with -fPIC
cpic = self.compiler.cc_pic_flag if shared else ''
fpic = self.compiler.fc_pic_flag if shared else ''
# TODO: test this part, it needs a full blas, scalapack and
# partitionning environment with 64bit integers
# The mumps.src-makefile.patch wants us to set these PIC variables
makefile_conf.append('FC_PIC_FLAG={0}'.format(fpic))
makefile_conf.append('CC_PIC_FLAG={0}'.format(cpic))
opt_level = '3' if using_xl else ''
optc = ['-O{0}'.format(opt_level)]
optf = ['-O{0}'.format(opt_level)]
optl = ['-O{0}'.format(opt_level)]
if shared:
optc.append(cpic)
optf.append(fpic)
optl.append(cpic)
if not using_xlf:
optf.append('-DALLOW_NON_INIT')
if '+int64' in self.spec:
if not using_xlf:
# the fortran compilation flags most probably are
# working only for intel and gnu compilers this is
# perhaps something the compiler should provide
optf.append('-fdefault-integer-8' if using_gcc else '-i8')
optc.append('-DINTSIZE64')
else:
if using_xlf:
optf.append('-qfixed')
makefile_conf.extend([
'OPTC = {0}'.format(' '.join(optc)),
'OPTF = {0}'.format(' '.join(optf)),
'OPTL = {0}'.format(' '.join(optl))
])
if '+mpi' in self.spec:
scalapack = self.spec['scalapack'].libs if not shared \
else LibraryList([])
makefile_conf.extend(
['CC = {0}'.format(self.spec['mpi'].mpicc),
'FC = {0}'.format(self.spec['mpi'].mpifc),
'FL = {0}'.format(self.spec['mpi'].mpifc),
"SCALAP = %s" % scalapack.ld_flags,
"MUMPS_TYPE = par"])
else:
makefile_conf.extend(
["CC = {0}".format(spack_cc),
"FC = {0}".format(spack_fc),
"FL = {0}".format(spack_fc),
"MUMPS_TYPE = seq"])
# For sequential MUMPS, we need to link to a fake MPI lib
extra_libs4mumps += ['-L$(topdir)/libseq', '-lmpiseq']
# TODO: change the value to the correct one according to the
# compiler possible values are -DAdd_, -DAdd__ and/or -DUPPER
if using_intel or using_oneapi or using_pgi or using_nvhpc or using_fj:
# Intel, PGI, and Fujitsu Fortran compiler provides
# the main() function so C examples linked with the Fortran
# compiler require a hack defined by _DMAIN_COMP
# (see examples/c_example.c)
makefile_conf.append("CDEFS = -DAdd_ -DMAIN_COMP")
else:
if not using_xlf:
makefile_conf.append("CDEFS = -DAdd_")
if '+shared' in self.spec:
# All Mumps libraries will be linked with 'inject_libs'.
inject_libs = []
if '+mpi' in self.spec:
inject_libs += [self.spec['scalapack'].libs.ld_flags]
if '+ptscotch' in self.spec or '+scotch' in self.spec:
inject_libs += [self.spec['scotch'].libs.ld_flags]
if '+parmetis' in self.spec and '+metis' in self.spec:
inject_libs += [
"-L%s -l%s -L%s -l%s" % (
self.spec['parmetis'].prefix.lib, 'parmetis',
self.spec['metis'].prefix.lib, 'metis')]
elif '+metis' in self.spec:
inject_libs += [
"-L%s -l%s" % (self.spec['metis'].prefix.lib, 'metis')]
inject_libs += [lapack_blas.ld_flags]
inject_libs = ' '.join(inject_libs)
if sys.platform == 'darwin':
# Building dylibs with mpif90 causes segfaults on 10.8 and
# 10.10. Use gfortran. (Homebrew)
makefile_conf.extend([
'LIBEXT=.dylib',
'AR=%s -dynamiclib -Wl,-install_name -Wl,%s/$(notdir $@)'
' -undefined dynamic_lookup %s -o ' %
(os.environ['FC'], prefix.lib, inject_libs),
'RANLIB=echo'
])
else:
if using_xlf:
build_shared_flag = "qmkshrobj"
else:
build_shared_flag = "shared"
makefile_conf.extend([
'LIBEXT=.so',
'AR=link_cmd() { $(FL) -%s -Wl,-soname '
'-Wl,$(notdir $@) -o "$$@" %s; }; link_cmd ' %
(build_shared_flag, inject_libs),
'RANLIB=ls'
])
# When building libpord, read AR from Makefile.inc instead of
# going through the make command line - this prevents various
# problems with the substring "$$@".
filter_file(r' AR="\$\(AR\)"', '', 'Makefile')
filter_file(r'^(INCLUDES = -I../include)',
'\\1\ninclude ../../Makefile.inc',
join_path('PORD', 'lib', 'Makefile'))
else:
makefile_conf.extend([
'LIBEXT = .a',
'AR = ar vr ',
'RANLIB = ranlib'
])
# The mumps.src-makefile.patch wants EXTRA_LIBS4MUMPS defined
makefile_conf.extend([
'EXTRA_LIBS4MUMPS = {0}'.format(' '.join(extra_libs4mumps))
])
makefile_inc_template = join_path(
os.path.dirname(self.module.__file__), 'Makefile.inc')
with open(makefile_inc_template, "r") as fh:
makefile_conf.extend(fh.read().split('\n'))
with working_dir('.'):
with open("Makefile.inc", "w") as fh:
makefile_inc = '\n'.join(makefile_conf)
fh.write(makefile_inc)
def flag_handler(self, name, flags):
if name == 'fflags':
if self.spec.satisfies('%gcc@10:'):
if flags is None:
flags = []
flags.append('-fallow-argument-mismatch')
return (flags, None, None)
def install(self, spec, prefix):
self.write_makefile_inc()
# Build fails in parallel
# That is why we split the builds of 's', 'c', 'd', and/or 'z' which
# can be build one after the other, each using a parallel build.
letters_variants = [
['s', '+float'], ['c', '+complex+float'],
['d', '+double'], ['z', '+complex+double']]
for ltr, v in letters_variants:
if v in spec:
make(ltr + 'examples')
install_tree('lib', prefix.lib)
install_tree('include', prefix.include)
if '~mpi' in spec:
lib_dsuffix = '.dylib' if sys.platform == 'darwin' else '.so'
lib_suffix = lib_dsuffix if '+shared' in spec else '.a'
install('libseq/libmpiseq%s' % lib_suffix, prefix.lib)
install(join_path('libseq', '*.h'), prefix.include)
# FIXME: extend the tests to mpirun -np 2 when build with MPI
# FIXME: use something like numdiff to compare output files
# Note: In some cases, when 'mpi' is enabled, the examples below cannot
# be run without 'mpirun', so we enabled the tests only if explicitly
# requested with the Spack '--test' option.
if self.run_tests:
with working_dir('examples'):
if '+float' in spec:
ssimpletest = Executable('./ssimpletest')
ssimpletest(input='input_simpletest_real')
if '+complex' in spec:
csimpletest = Executable('./csimpletest')
csimpletest(input='input_simpletest_cmplx')
if '+double' in spec:
dsimpletest = Executable('./dsimpletest')
dsimpletest(input='input_simpletest_real')
if '+complex' in spec:
zsimpletest = Executable('./zsimpletest')
zsimpletest(input='input_simpletest_cmplx')
@property
def libs(self):
component_libs = ['*mumps*', 'pord']
return find_libraries(['lib' + comp for comp in component_libs],
root=self.prefix.lib,
shared=('+shared' in self.spec),
recursive=False) or None
| 44.720588 | 141 | 0.568103 |
2612872d79a1cc06d2cf60d4e0e0d161f697229a | 6,135 | py | Python | scripts/flurry.py | chaturatbs/specimen-tools | 3f149800dea77ba30e00427f3c79be8e4e4b6105 | [
"MIT"
] | null | null | null | scripts/flurry.py | chaturatbs/specimen-tools | 3f149800dea77ba30e00427f3c79be8e4e4b6105 | [
"MIT"
] | null | null | null | scripts/flurry.py | chaturatbs/specimen-tools | 3f149800dea77ba30e00427f3c79be8e4e4b6105 | [
"MIT"
] | null | null | null | # Copyright 2018 Jose Cambronero and Phillip Stanley-Marbell
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject
# to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from argparse import ArgumentParser
from datetime import datetime, timedelta
import json
import os
import requests
import sys
class SpecimenDownloader:
"""
Download monthly csv reports for specimen from flurry
"""
# some general constants
DUMMY_SESSION_ID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
INPUT_DATE_FORMAT = "%m/%d/%Y"
GET_DATE_FORMAT = "%Y_%m_%d"
def __init__(self, email, password):
self.email = email
self.password = password
self.auth_url = 'https://auth.flurry.com/auth/v1/session'
self.login_url = 'https://login.flurry.com'
self.auth_method = 'application/vnd.api+json'
self.session = requests.Session()
def login(self):
""" Login in to flurry """
opts_response = self.session.options(self.auth_url, data='')
headers = opts_response.headers
headers.update(
{
'origin': self.login_url,
'referer': self.login_url,
'accept': self.auth_method,
'content-type': self.auth_method
}
)
data = {'data': {'type': 'session', 'id': SpecimenDownloader.DUMMY_SESSION_ID, 'attributes': {'scopes': '', 'email': self.email, 'password': self.password, 'remember': 'false'}}}
payload = json.dumps(data)
login_response = self.session.post(self.auth_url, data = payload, headers = headers)
if not login_response.ok:
raise Exception("Unable to connect: %s" % login_response.status_code)
old_site_response = self.session.get('https://dev.flurry.com/home.do')
if not old_site_response.ok:
raise Exception("Unable to reach old Flurry: %s" % login_response.status_code)
def __download__(self, start_date, end_date, dir):
""" Download a single file """
# preparing data for GET request
params = {'projectID': 687883, 'versionCut':'versionsAll', 'childProjectId': 0, 'stream': 'true', 'direction': 1, 'offset': 0}
start_date = self.date_to_flurry(start_date)
end_date = self.date_to_flurry(end_date)
params['intervalCut'] = "customInterval%s-%s" % (start_date, end_date)
print("Requesting %s-%s" % (start_date, end_date))
download = self.session.get("https://dev.flurry.com/eventsLogCsv.do", params = params)
if download.ok:
file_name = os.path.join(dir, "specimen-%s-%s.csv" % (start_date, end_date))
file = open(file_name, "w")
file.write(download.text)
file.close()
else:
raise Exception("Unable to download file for %s-%s" % (start_date, end_date))
def __seq_dates__(self, start_date, end_date):
""" Return list of pairs consecutive dates between start and end dates, inclusive both ends"""
pairs = []
if end_date < start_date:
raise ValueError("start must be <= end: %s - %s" % (start_date.date(), end_date.date()))
curr_date = start_date
while curr_date <= end_date:
next_date = curr_date + timedelta(days = 1)
pairs.append((curr_date, next_date))
curr_date = next_date
return pairs
def check_input_date(self, date):
""" if input not a valid datetime, parse as such """
if not isinstance(date, datetime):
return datetime.strptime(date, SpecimenDownloader.INPUT_DATE_FORMAT)
return date
def date_to_flurry(self, date):
return datetime.strftime(date, SpecimenDownloader.GET_DATE_FORMAT)
def download(self, start_date, end_date, dir_name):
""" Download all csv reports between start and end date and store to dir directory"""
print("Downloading daily csv files [%s, %s] to %s" % (start_date, end_date, dir_name))
start_date = self.check_input_date(start_date)
end_date = self.check_input_date(end_date)
self.login()
dates = self.__seq_dates__(start_date, end_date)
for start, end in dates:
try:
self.__download__(start, end, dir_name)
except Exception as e:
print(e.message)
def main(args):
downloader = SpecimenDownloader(args.email, args.password)
downloader.download(args.start_date, args.end_date, args.dir)
if __name__ == "__main__":
argparser = ArgumentParser(description='Download CSVs from Specimen Flurry account (requires proper authentication)')
argparser.add_argument('email', type=str, help='Email for Flurry account')
argparser.add_argument('password', type=str, help='Password for Flurry account')
argparser.add_argument('start_date', type=str, help='Start date for download (mm/dd/yyyy)')
argparser.add_argument('end_date', type=str, help='End date for download (mm/dd/yyyy)')
argparser.add_argument('-d', '--dir', type=str, help='Directory for download, if not provided defaults to current directory', default='.')
args = argparser.parse_args()
main(args)
| 44.781022 | 186 | 0.669764 |
c4540f5c377a930a725b4b3109b11702a3a82731 | 3,399 | py | Python | tests/regression/test_pearson.py | bibinwils/metrics | e1c3fda24f90367803c2b04315ad7c8bced719db | [
"Apache-2.0"
] | 4 | 2021-03-22T09:02:31.000Z | 2021-03-23T07:35:39.000Z | tests/regression/test_pearson.py | bibinwils/metrics | e1c3fda24f90367803c2b04315ad7c8bced719db | [
"Apache-2.0"
] | 4 | 2021-06-14T08:40:18.000Z | 2021-07-27T20:01:08.000Z | tests/regression/test_pearson.py | bibinwils/metrics | e1c3fda24f90367803c2b04315ad7c8bced719db | [
"Apache-2.0"
] | 2 | 2021-10-16T05:02:43.000Z | 2022-02-10T16:01:52.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import pytest
import torch
from scipy.stats import pearsonr
from tests.helpers import seed_all
from tests.helpers.testers import BATCH_SIZE, NUM_BATCHES, MetricTester
from torchmetrics.functional.regression.pearson import pearson_corrcoef
from torchmetrics.regression.pearson import PearsonCorrcoef
seed_all(42)
Input = namedtuple("Input", ["preds", "target"])
_single_target_inputs1 = Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_single_target_inputs2 = Input(
preds=torch.randn(NUM_BATCHES, BATCH_SIZE),
target=torch.randn(NUM_BATCHES, BATCH_SIZE),
)
def _sk_pearsonr(preds, target):
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
return pearsonr(sk_target, sk_preds)[0]
@pytest.mark.parametrize(
"preds, target",
[
(_single_target_inputs1.preds, _single_target_inputs1.target),
(_single_target_inputs2.preds, _single_target_inputs2.target),
],
)
class TestPearsonCorrcoef(MetricTester):
atol = 1e-2
@pytest.mark.parametrize("ddp", [True, False])
def test_pearson_corrcoef(self, preds, target, ddp):
self.run_class_metric_test(
ddp=ddp,
preds=preds,
target=target,
metric_class=PearsonCorrcoef,
sk_metric=_sk_pearsonr,
dist_sync_on_step=False,
)
def test_pearson_corrcoef_functional(self, preds, target):
self.run_functional_metric_test(
preds=preds, target=target, metric_functional=pearson_corrcoef, sk_metric=_sk_pearsonr
)
def test_pearson_corrcoef_differentiability(self, preds, target):
self.run_differentiability_test(
preds=preds, target=target, metric_module=PearsonCorrcoef, metric_functional=pearson_corrcoef
)
# Pearson half + cpu does not work due to missing support in torch.sqrt
@pytest.mark.xfail(reason="PearsonCorrcoef metric does not support cpu + half precision")
def test_pearson_corrcoef_half_cpu(self, preds, target):
self.run_precision_test_cpu(preds, target, PearsonCorrcoef, pearson_corrcoef)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires cuda")
def test_pearson_corrcoef_half_gpu(self, preds, target):
self.run_precision_test_gpu(preds, target, PearsonCorrcoef, pearson_corrcoef)
def test_error_on_different_shape():
metric = PearsonCorrcoef()
with pytest.raises(RuntimeError, match="Predictions and targets are expected to have the same shape"):
metric(torch.randn(100), torch.randn(50))
with pytest.raises(ValueError, match="Expected both predictions and target to be 1 dimensional tensors."):
metric(torch.randn(100, 2), torch.randn(100, 2))
| 36.159574 | 110 | 0.736393 |
1f7fac7f42f6be6eb6784c0a2456eb51222167ca | 33,059 | py | Python | ow_lander/scripts/all_action_trajectories.py | thewindsofwinter/ow_simulator | a92698a1cc7e80ac6aec17d74bcbac81edf726f7 | [
"NASA-1.3"
] | null | null | null | ow_lander/scripts/all_action_trajectories.py | thewindsofwinter/ow_simulator | a92698a1cc7e80ac6aec17d74bcbac81edf726f7 | [
"NASA-1.3"
] | null | null | null | ow_lander/scripts/all_action_trajectories.py | thewindsofwinter/ow_simulator | a92698a1cc7e80ac6aec17d74bcbac81edf726f7 | [
"NASA-1.3"
] | null | null | null | #!/usr/bin/env python3
# The Notices and Disclaimers for Ocean Worlds Autonomy Testbed for Exploration
# Research and Simulation can be found in README.md in the root directory of
# this repository.
import rospy
import math
import constants
import copy
from tf.transformations import quaternion_from_euler
from tf.transformations import euler_from_quaternion
from utils import is_shou_yaw_goal_in_range
from moveit_msgs.msg import PositionConstraint
from geometry_msgs.msg import Quaternion
from shape_msgs.msg import SolidPrimitive
from moveit_msgs.msg import RobotTrajectory
from trajectory_msgs.msg import JointTrajectory
from trajectory_msgs.msg import JointTrajectoryPoint
from std_msgs.msg import Header
def calculate_joint_state_end_pose_from_plan_arm(robot, plan, move_arm, moveit_fk):
'''
calculate the end pose (position and orientation), joint states and robot states
from the current plan
inputs: current plan, robot, arm interface, and moveit forward kinematics object
outputs: goal_pose, robot state and joint states at end of the plan
'''
#joint_names: [j_shou_yaw, j_shou_pitch, j_prox_pitch, j_dist_pitch, j_hand_yaw, j_scoop_yaw]
# robot full state name: [j_ant_pan, j_ant_tilt, j_shou_yaw, j_shou_pitch, j_prox_pitch, j_dist_pitch, j_hand_yaw,
# j_grinder, j_scoop_yaw]
# get joint states from the end of the plan
joint_states = plan.joint_trajectory.points[len(
plan.joint_trajectory.points)-1].positions
# construct robot state at the end of the plan
robot_state = robot.get_current_state()
# adding antenna (0,0) and grinder positions (-0.1) which should not change
new_value = new_value = (
0, 0) + joint_states[:5] + (-0.1,) + (joint_states[5],)
# modify current state of robot to the end state of the previous plan
robot_state.joint_state.position = new_value
# calculate goal pose at the end of the plan using forward kinematics
goal_pose = move_arm.get_current_pose().pose
header = Header(0, rospy.Time.now(), "base_link")
fkln = ['l_scoop']
goal_pose_stamped = moveit_fk(header, fkln, robot_state)
goal_pose = goal_pose_stamped.pose_stamped[0].pose
return robot_state, joint_states, goal_pose
def cascade_plans(plan1, plan2):
'''
Joins two robot motion plans into one
inputs: two robot trajactories
outputs: final robot trjactory
'''
# Create a new trajectory object
new_traj = RobotTrajectory()
# Initialize the new trajectory to be the same as the planned trajectory
traj_msg = JointTrajectory()
# Get the number of joints involved
n_joints1 = len(plan1.joint_trajectory.joint_names)
n_joints2 = len(plan2.joint_trajectory.joint_names)
# Get the number of points on the trajectory
n_points1 = len(plan1.joint_trajectory.points)
n_points2 = len(plan2.joint_trajectory.points)
# Store the trajectory points
points1 = list(plan1.joint_trajectory.points)
points2 = list(plan2.joint_trajectory.points)
end_time = plan1.joint_trajectory.points[n_points1-1].time_from_start
start_time = plan1.joint_trajectory.points[0].time_from_start
duration = end_time - start_time
# add a time toleracne between successive plans
time_tolerance = rospy.Duration.from_sec(0.1)
for i in range(n_points1):
point = JointTrajectoryPoint()
point.time_from_start = plan1.joint_trajectory.points[i].time_from_start
point.velocities = list(plan1.joint_trajectory.points[i].velocities)
point.accelerations = list(
plan1.joint_trajectory.points[i].accelerations)
point.positions = plan1.joint_trajectory.points[i].positions
points1[i] = point
traj_msg.points.append(point)
end_time = plan1.joint_trajectory.points[i].time_from_start
for i in range(n_points2):
point = JointTrajectoryPoint()
point.time_from_start = plan2.joint_trajectory.points[i].time_from_start + \
end_time + time_tolerance
point.velocities = list(plan2.joint_trajectory.points[i].velocities)
point.accelerations = list(
plan2.joint_trajectory.points[i].accelerations)
point.positions = plan2.joint_trajectory.points[i].positions
traj_msg.points.append(point)
traj_msg.joint_names = plan1.joint_trajectory.joint_names
traj_msg.header.frame_id = plan1.joint_trajectory.header.frame_id
new_traj.joint_trajectory = traj_msg
return new_traj
def go_to_XYZ_coordinate(move_arm, cs, goal_pose, x_start, y_start, z_start, approximate=True):
"""
:param approximate: use an approximate solution. default True
:type move_group: class 'moveit_commander.move_group.MoveGroupCommander'
:type x_start: float
:type y_start: float
:type z_start: float
:type approximate: bool
"""
move_arm.set_start_state(cs)
goal_pose.position.x = x_start
goal_pose.position.y = y_start
goal_pose.position.z = z_start
goal_pose.orientation.x = goal_pose.orientation.x
goal_pose.orientation.y = goal_pose.orientation.y
goal_pose.orientation.z = goal_pose.orientation.z
goal_pose.orientation.w = goal_pose.orientation.w
# Ask the planner to generate a plan to the approximate joint values generated
# by kinematics builtin IK solver. For more insight on this issue refer to:
# https://github.com/nasa/ow_simulator/pull/60
if approximate:
move_arm.set_joint_value_target(goal_pose, True)
else:
move_arm.set_pose_target(goal_pose)
_, plan, _, _ = move_arm.plan()
if len(plan.joint_trajectory.points) == 0: # If no plan found, abort
return False
return plan
def go_to_Z_coordinate_dig_circular(move_arm, cs, goal_pose, z_start, approximate=True):
"""
:param approximate: use an approximate solution. default True
:type move_arm: class 'moveit_commander.move_group.MoveGroupCommander'
:type cs: robot current state
:type goal_pose: Pose
:type z_start: float
:type approximate: bool
"""
move_arm.set_start_state(cs)
goal_pose.position.z = z_start
goal_pose.orientation.x = goal_pose.orientation.x
goal_pose.orientation.y = goal_pose.orientation.y
goal_pose.orientation.z = goal_pose.orientation.z
goal_pose.orientation.w = goal_pose.orientation.w
# Ask the planner to generate a plan to the approximate joint values generated
# by kinematics builtin IK solver. For more insight on this issue refer to:
# https://github.com/nasa/ow_simulator/pull/60
if approximate:
move_arm.set_joint_value_target(goal_pose, True)
else:
move_arm.set_pose_target(goal_pose)
_, plan, _, _ = move_arm.plan()
if len(plan.joint_trajectory.points) == 0: # If no plan found, abort
return False
return plan
def move_to_pre_trench_configuration_dig_circ(move_arm, robot, x_start, y_start):
"""
:type move_arm: class 'moveit_commander.move_group.MoveGroupCommander'
:type x_start: float
:type y_start: float
"""
# Initilize to current position
joint_goal = move_arm.get_current_pose().pose
robot_state = robot.get_current_state()
move_arm.set_start_state(robot_state)
# Compute shoulder yaw angle to trench
alpha = math.atan2(y_start-constants.Y_SHOU, x_start-constants.X_SHOU)
h = math.sqrt(pow(y_start-constants.Y_SHOU, 2) +
pow(x_start-constants.X_SHOU, 2))
l = constants.Y_SHOU - constants.HAND_Y_OFFSET
beta = math.asin(l/h)
# Move to pre trench position, align shoulder yaw
joint_goal = move_arm.get_current_joint_values()
joint_goal[constants.J_DIST_PITCH] = 0.0
joint_goal[constants.J_HAND_YAW] = 0.0
joint_goal[constants.J_PROX_PITCH] = -math.pi/2
joint_goal[constants.J_SHOU_PITCH] = math.pi/2
joint_goal[constants.J_SHOU_YAW] = alpha + beta
# If out of joint range, abort
if (is_shou_yaw_goal_in_range(joint_goal) == False):
return False
joint_goal[constants.J_SCOOP_YAW] = 0
move_arm.set_joint_value_target(joint_goal)
_, plan, _, _ = move_arm.plan()
return plan
def dig_circular(move_arm, move_limbs, robot, moveit_fk, args):
"""
:type move_arm: class 'moveit_commander.move_group.MoveGroupCommander'
:type args: List[bool, float, int, float, float, float]
"""
circ_traj = None
circ_traj = RobotTrajectory()
x_start = args.x_start
y_start = args.y_start
depth = args.depth
parallel = args.parallel
ground_position = args.ground_position
if not parallel:
plan_a = move_to_pre_trench_configuration_dig_circ(
move_arm, robot, x_start, y_start)
if len(plan_a.joint_trajectory.points) == 0: # If no plan found, abort
return False
# Once aligned to move goal and offset, place scoop tip at surface target offset
cs, start_state, end_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, plan_a, move_arm, moveit_fk)
z_start = ground_position + constants.R_PARALLEL_FALSE_A # - depth
end_pose.position.x = x_start
end_pose.position.y = y_start
end_pose.position.z = z_start
move_arm.set_start_state(cs)
move_arm.set_pose_target(end_pose)
_, plan_b, _, _ = move_arm.plan()
if len(plan_b.joint_trajectory.points) == 0: # If no plan found, abort
return False
circ_traj = cascade_plans(plan_a, plan_b)
# Rotate J_HAND_YAW to correct postion
cs, start_state, end_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, circ_traj, move_arm, moveit_fk)
plan_c = change_joint_value(
move_arm, cs, start_state, constants.J_HAND_YAW, math.pi/2.2)
circ_traj = cascade_plans(circ_traj, plan_c)
cs, start_state, end_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, circ_traj, move_arm, moveit_fk)
# if not parallel:
# Once aligned to trench goal, place hand above trench middle point
z_start = ground_position + constants.R_PARALLEL_FALSE_A # - depth
plan_d = go_to_Z_coordinate_dig_circular(
move_arm, cs, end_pose, z_start)
circ_traj = cascade_plans(circ_traj, plan_d)
# Rotate hand perpendicular to arm direction
cs, start_state, end_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, circ_traj, move_arm, moveit_fk)
plan_e = change_joint_value(
move_arm, cs, start_state, constants.J_HAND_YAW, -0.29*math.pi)
circ_traj = cascade_plans(circ_traj, plan_e)
else:
plan_a = move_to_pre_trench_configuration(
move_arm, robot, x_start, y_start)
if len(plan_a.joint_trajectory.points) == 0: # If no plan found, abort
return False
# Rotate hand so scoop is in middle point
cs, start_state, end_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, plan_a, move_arm, moveit_fk)
plan_b = change_joint_value(
move_arm, cs, start_state, constants.J_HAND_YAW, 0.0)
circ_traj = cascade_plans(plan_a, plan_b)
# Rotate scoop
cs, start_state, end_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, circ_traj, move_arm, moveit_fk)
plan_c = change_joint_value(
move_arm, cs, start_state, constants.J_SCOOP_YAW, math.pi/2)
circ_traj = cascade_plans(circ_traj, plan_c)
# Rotate dist so scoop is back
cs, start_state, end_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, circ_traj, move_arm, moveit_fk)
plan_d = change_joint_value(
move_arm, cs, start_state, constants.J_DIST_PITCH, -19.0/54.0*math.pi)
circ_traj = cascade_plans(circ_traj, plan_d)
# Once aligned to trench goal, place hand above trench middle point
cs, start_state, end_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, circ_traj, move_arm, moveit_fk)
z_start = ground_position + constants.R_PARALLEL_FALSE_A - depth
plan_e = go_to_XYZ_coordinate(
move_arm, cs, end_pose, x_start, y_start, z_start)
circ_traj = cascade_plans(circ_traj, plan_e)
# Rotate dist to dig
cs, start_state, end_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, circ_traj, move_arm, moveit_fk)
dist_now = start_state[3]
plan_f = change_joint_value(
move_arm, cs, start_state, constants.J_DIST_PITCH, dist_now + 2*math.pi/3)
circ_traj = cascade_plans(circ_traj, plan_f)
return circ_traj
def move_to_pre_trench_configuration(move_arm, robot, x_start, y_start):
"""
:type move_arm: class 'moveit_commander.move_group.MoveGroupCommander'
:type x_start: float
:type y_start: float
"""
# Initilize to current position
joint_goal = move_arm.get_current_pose().pose
robot_state = robot.get_current_state()
move_arm.set_start_state(robot_state)
# Compute shoulder yaw angle to trench
alpha = math.atan2(y_start-constants.Y_SHOU, x_start-constants.X_SHOU)
h = math.sqrt(pow(y_start-constants.Y_SHOU, 2) +
pow(x_start-constants.X_SHOU, 2))
l = constants.Y_SHOU - constants.HAND_Y_OFFSET
beta = math.asin(l/h)
# Move to pre trench position, align shoulder yaw
joint_goal = move_arm.get_current_joint_values()
joint_goal[constants.J_DIST_PITCH] = 0.0
joint_goal[constants.J_HAND_YAW] = math.pi/2.2
joint_goal[constants.J_PROX_PITCH] = -math.pi/2
joint_goal[constants.J_SHOU_PITCH] = math.pi/2
joint_goal[constants.J_SHOU_YAW] = alpha + beta
# If out of joint range, abort
if (is_shou_yaw_goal_in_range(joint_goal) == False):
return False
joint_goal[constants.J_SCOOP_YAW] = 0
move_arm.set_joint_value_target(joint_goal)
_, plan, _, _ = move_arm.plan()
return plan
def plan_cartesian_path(move_group, wpose, length, alpha, parallel, z_start, cs):
"""
:type move_group: class 'moveit_commander.move_group.MoveGroupCommander'
:type length: float
:type alpha: float
:type parallel: bool
"""
if parallel == False:
alpha = alpha - math.pi/2
move_group.set_start_state(cs)
waypoints = []
wpose.position.z = z_start
wpose.position.x += length*math.cos(alpha)
wpose.position.y += length*math.sin(alpha)
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = move_group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # end effector follow step (meters)
0.0) # jump threshold
return plan, fraction
def plan_cartesian_path_lin(move_arm, wpose, length, alpha, z_start, cs):
"""
:type move_arm: class 'moveit_commander.move_group.MoveGroupCommander'
:type length: float
:type alpha: float
"""
move_arm.set_start_state(cs)
waypoints = []
wpose.position.x += length*math.cos(alpha)
wpose.position.y += length*math.sin(alpha)
waypoints.append(copy.deepcopy(wpose))
(plan, fraction) = move_arm.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # end effector follow step (meters)
0.0) # jump threshold
return plan, fraction
def change_joint_value(move_arm, cs, start_state, joint_index, target_value):
"""
:type move_group: class 'moveit_commander.move_group.MoveGroupCommander'
:type joint_index: int
:type target_value: float
"""
move_arm.set_start_state(cs)
joint_goal = move_arm.get_current_joint_values()
for k in range(0, len(start_state)):
joint_goal[k] = start_state[k]
joint_goal[joint_index] = target_value
move_arm.set_joint_value_target(joint_goal)
_, plan, _, _ = move_arm.plan()
return plan
def go_to_Z_coordinate(move_arm, cs, goal_pose, x_start, y_start, z_start, approximate=True):
"""
:param approximate: use an approximate solution. default True
:type move_group: class 'moveit_commander.move_group.MoveGroupCommander'
:type x_start: float
:type y_start: float
:type z_start: float
:type approximate: bool
"""
move_arm.set_start_state(cs)
goal_pose.position.x = x_start
goal_pose.position.y = y_start
goal_pose.position.z = z_start
# Ask the planner to generate a plan to the approximate joint values generated
# by kinematics builtin IK solver. For more insight on this issue refer to:
# https://github.com/nasa/ow_simulator/pull/60
if approximate:
move_arm.set_joint_value_target(goal_pose, True)
else:
move_arm.set_pose_target(goal_pose)
_, plan, _, _ = move_arm.plan()
if len(plan.joint_trajectory.points) == 0: # If no plan found, abort
return False
return plan
def dig_linear(move_arm, robot, moveit_fk, args):
"""
:type move_arm: class 'moveit_commander.move_group.MoveGroupCommander'
:type args: List[bool, float, int, float, float, float]
"""
x_start = args.x_start
y_start = args.y_start
depth = args.depth
length = args.length
ground_position = args.ground_position
plan_a = move_to_pre_trench_configuration(
move_arm, robot, x_start, y_start)
if len(plan_a.joint_trajectory.points) == 0: # If no plan found, abort
return False
cs, start_state, current_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, plan_a, move_arm, moveit_fk)
#################### Rotate hand yaw to dig in#################################
plan_b = change_joint_value(
move_arm, cs, start_state, constants.J_HAND_YAW, 0.0)
if len(plan_b.joint_trajectory.points) == 0: # If no plan found, send the previous plan only
return plan_a
dig_linear_traj = cascade_plans(plan_a, plan_b)
######################### rotate scoop #######################################
cs, start_state, current_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, dig_linear_traj, move_arm, moveit_fk)
plan_c = change_joint_value(
move_arm, cs, start_state, constants.J_SCOOP_YAW, math.pi/2)
dig_linear_traj = cascade_plans(dig_linear_traj, plan_c)
######################### rotate dist pith to pre-trenching position###########
cs, start_state, current_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, dig_linear_traj, move_arm, moveit_fk)
plan_d = change_joint_value(
move_arm, cs, start_state, constants.J_DIST_PITCH, -math.pi/2)
dig_linear_traj = cascade_plans(dig_linear_traj, plan_d)
# Once aligned to trench goal,
# place hand above the desired start point
alpha = math.atan2(constants.WRIST_SCOOP_PARAL, constants.WRIST_SCOOP_PERP)
distance_from_ground = constants.ROT_RADIUS * \
(math.cos(alpha) - math.sin(alpha))
z_start = ground_position + constants.SCOOP_HEIGHT - depth + distance_from_ground
cs, start_state, goal_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, dig_linear_traj, move_arm, moveit_fk)
plan_e = go_to_Z_coordinate(
move_arm, cs, goal_pose, x_start, y_start, z_start)
dig_linear_traj = cascade_plans(dig_linear_traj, plan_e)
# rotate to dig in the ground
cs, start_state, goal_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, dig_linear_traj, move_arm, moveit_fk)
plan_f = change_joint_value(
move_arm, cs, start_state, constants.J_DIST_PITCH, 2.0/9.0*math.pi)
dig_linear_traj = cascade_plans(dig_linear_traj, plan_f)
# determine linear trenching direction (alpha) value obtained from rviz
cs, start_state, current_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, dig_linear_traj, move_arm, moveit_fk)
quaternion = [current_pose.orientation.x, current_pose.orientation.y,
current_pose.orientation.z, current_pose.orientation.w]
current_euler = euler_from_quaternion(quaternion)
alpha = current_euler[2]
# linear trenching
cs, start_state, current_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, dig_linear_traj, move_arm, moveit_fk)
cartesian_plan, fraction = plan_cartesian_path_lin(
move_arm, current_pose, length, alpha, z_start, cs)
dig_linear_traj = cascade_plans(dig_linear_traj, cartesian_plan)
# rotate to dig out
cs, start_state, current_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, dig_linear_traj, move_arm, moveit_fk)
plan_g = change_joint_value(
move_arm, cs, start_state, constants.J_DIST_PITCH, math.pi/2)
dig_linear_traj = cascade_plans(dig_linear_traj, plan_g)
return dig_linear_traj
def calculate_starting_state_grinder(plan, robot):
#joint_names: [j_shou_yaw, j_shou_pitch, j_prox_pitch, j_dist_pitch, j_hand_yaw, j_grinder]
# robot full state name: [j_ant_pan, j_ant_tilt, j_shou_yaw, j_shou_pitch, j_prox_pitch, j_dist_pitch, j_hand_yaw,
# j_grinder, j_scoop_yaw]
start_state = plan.joint_trajectory.points[len(
plan.joint_trajectory.points)-1].positions
cs = robot.get_current_state()
# adding antenna state (0, 0) and j_scoop_yaw to the robot states.
# j_scoop_yaw state obstained from rviz
new_value = (0, 0) + start_state[:6] + (0.17403329917811217,)
# modify current state of robot to the end state of the previous plan
cs.joint_state.position = new_value
return cs, start_state
def calculate_joint_state_end_pose_from_plan_grinder(robot, plan, move_arm, moveit_fk):
'''
calculate the end pose (position and orientation), joint states and robot states
from the current plan
inputs: current plan, robot, grinder interface, and moveit forward kinematics object
outputs: goal_pose, robot state and joint states at end of the plan
'''
#joint_names: [j_shou_yaw, j_shou_pitch, j_prox_pitch, j_dist_pitch, j_hand_yaw, j_scoop_yaw]
# robot full state name: [j_ant_pan, j_ant_tilt, j_shou_yaw, j_shou_pitch, j_prox_pitch, j_dist_pitch, j_hand_yaw,
# j_grinder, j_scoop_yaw]
# get joint states from the end of the plan
joint_states = plan.joint_trajectory.points[len(
plan.joint_trajectory.points)-1].positions
# construct robot state at the end of the plan
robot_state = robot.get_current_state()
# adding antenna (0,0) and j_scoop_yaw (0.1) which should not change
new_value = (0, 0) + joint_states[:6] + (0.1740,)
# modify current state of robot to the end state of the previous plan
robot_state.joint_state.position = new_value
# calculate goal pose at the end of the plan using forward kinematics
goal_pose = move_arm.get_current_pose().pose
header = Header(0, rospy.Time.now(), "base_link")
fkln = ['l_grinder']
goal_pose_stamped = moveit_fk(header, fkln, robot_state)
goal_pose = goal_pose_stamped.pose_stamped[0].pose
return robot_state, joint_states, goal_pose
def grind(move_grinder, robot, moveit_fk, args):
"""
:type move_grinder: class 'moveit_commander.move_group.MoveGroupCommander'
:type args: List[bool, float, float, float, float, bool, float, bool]
"""
x_start = args.x_start
y_start = args.y_start
depth = args.depth
length = args.length
parallel = args.parallel
ground_position = args.ground_position
# Compute shoulder yaw angle to trench
alpha = math.atan2(y_start-constants.Y_SHOU, x_start-constants.X_SHOU)
h = math.sqrt(pow(y_start-constants.Y_SHOU, 2) +
pow(x_start-constants.X_SHOU, 2))
l = constants.Y_SHOU - constants.HAND_Y_OFFSET
beta = math.asin(l/h)
alpha = alpha+beta
if parallel:
R = math.sqrt(x_start*x_start+y_start*y_start)
# adjust trench to fit scoop circular motion
dx = 0.04*R*math.sin(alpha) # Center dig_circular in grind trench
dy = 0.04*R*math.cos(alpha)
# Move starting point back to avoid scoop-terrain collision
x_start = 0.9*(x_start + dx)
y_start = 0.9*(y_start - dy)
else:
dx = 5*length/8*math.sin(alpha)
dy = 5*length/8*math.cos(alpha)
# Move starting point back to avoid scoop-terrain collision
x_start = 0.97*(x_start - dx)
y_start = 0.97*(y_start + dy)
# Place the grinder vertical, above the desired starting point, at
# an altitude of 0.25 meters in the base_link frame.
robot_state = robot.get_current_state()
move_grinder.set_start_state(robot_state)
goal_pose = move_grinder.get_current_pose().pose
goal_pose.position.x = x_start # Position
goal_pose.position.y = y_start
goal_pose.position.z = 0.25
goal_pose.orientation.x = 0.70616885803 # Orientation
goal_pose.orientation.y = 0.0303977418722
goal_pose.orientation.z = -0.706723318474
goal_pose.orientation.w = 0.0307192507001
move_grinder.set_pose_target(goal_pose)
_, plan_a, _, _ = move_grinder.plan()
if len(plan_a.joint_trajectory.points) == 0: # If no plan found, abort
return False
# entering terrain
z_start = ground_position + constants.GRINDER_OFFSET - depth
cs, start_state, goal_pose = calculate_joint_state_end_pose_from_plan_grinder(
robot, plan_a, move_grinder, moveit_fk)
plan_b = go_to_Z_coordinate(
move_grinder, cs, goal_pose, x_start, y_start, z_start, False)
grind_traj = cascade_plans(plan_a, plan_b)
# grinding ice forward
cs, start_state, goal_pose = calculate_joint_state_end_pose_from_plan_grinder(
robot, grind_traj, move_grinder, moveit_fk)
cartesian_plan, fraction = plan_cartesian_path(
move_grinder, goal_pose, length, alpha, parallel, z_start, cs)
grind_traj = cascade_plans(grind_traj, cartesian_plan)
# grinding sideways
cs, start_state, joint_goal = calculate_joint_state_end_pose_from_plan_grinder(
robot, grind_traj, move_grinder, moveit_fk)
if parallel:
plan_c = change_joint_value(
move_grinder, cs, start_state, constants.J_SHOU_YAW, start_state[0]+0.08)
else:
x_now = joint_goal.position.x
y_now = joint_goal.position.y
z_now = joint_goal.position.z
x_goal = x_now + 0.08*math.cos(alpha)
y_goal = y_now + 0.08*math.sin(alpha)
plan_c = go_to_Z_coordinate(
move_grinder, cs, joint_goal, x_goal, y_goal, z_now, False)
grind_traj = cascade_plans(grind_traj, plan_c)
# grinding ice backwards
cs, start_state, joint_goal = calculate_joint_state_end_pose_from_plan_grinder(
robot, grind_traj, move_grinder, moveit_fk)
cartesian_plan2, fraction2 = plan_cartesian_path(
move_grinder, joint_goal, -length, alpha, parallel, z_start, cs)
grind_traj = cascade_plans(grind_traj, cartesian_plan2)
# exiting terrain
cs, start_state, joint_goal = calculate_joint_state_end_pose_from_plan_grinder(
robot, grind_traj, move_grinder, moveit_fk)
plan_d = go_to_Z_coordinate(
move_grinder, cs, joint_goal, x_start, y_start, 0.22, False)
grind_traj = cascade_plans(grind_traj, plan_d)
return grind_traj
def guarded_move_plan(move_arm, robot, moveit_fk, args):
robot_state = robot.get_current_state()
move_arm.set_start_state(robot_state)
### pre-guarded move starts here ###
targ_x = args.start.x
targ_y = args.start.y
targ_z = args.start.z
direction_x = args.normal.x
direction_y = args.normal.y
direction_z = args.normal.z
search_distance = args.search_distance
# STUB: GROUND HEIGHT TO BE EXTRACTED FROM DEM
targ_elevation = -0.2
if (targ_z+targ_elevation) == 0:
offset = search_distance
else:
offset = (targ_z*search_distance)/(targ_z+targ_elevation)
# Compute shoulder yaw angle to target
alpha = math.atan2((targ_y+direction_y*offset)-constants.Y_SHOU,
(targ_x+direction_x*offset)-constants.X_SHOU)
h = math.sqrt(pow((targ_y+direction_y*offset)-constants.Y_SHOU, 2) +
pow((targ_x+direction_x*offset)-constants.X_SHOU, 2))
l = constants.Y_SHOU - constants.HAND_Y_OFFSET
beta = math.asin(l/h)
# Move to pre move position, align shoulder yaw
joint_goal = move_arm.get_current_joint_values()
joint_goal[constants.J_DIST_PITCH] = 0
joint_goal[constants.J_HAND_YAW] = 0
joint_goal[constants.J_PROX_PITCH] = -math.pi/2
joint_goal[constants.J_SHOU_PITCH] = math.pi/2
joint_goal[constants.J_SHOU_YAW] = alpha + beta
# If out of joint range, abort
if (is_shou_yaw_goal_in_range(joint_goal) == False):
return False
joint_goal[constants.J_SCOOP_YAW] = 0
move_arm.set_joint_value_target(joint_goal)
_, plan_a, _, _ = move_arm.plan()
if len(plan_a.joint_trajectory.points) == 0: # If no plan found, abort
return False
# Once aligned to move goal and offset, place scoop tip at surface target offset
cs, start_state, goal_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, plan_a, move_arm, moveit_fk)
move_arm.set_start_state(cs)
goal_pose.position.x = targ_x
goal_pose.position.y = targ_y
goal_pose.position.z = targ_z
move_arm.set_pose_target(goal_pose)
_, plan_b, _, _ = move_arm.plan()
if len(plan_b.joint_trajectory.points) == 0: # If no plan found, abort
return False
pre_guarded_move_traj = cascade_plans(plan_a, plan_b)
### pre-guarded move ends here ###
# Drive scoop tip along norm vector, distance is search_distance
cs, start_state, goal_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, pre_guarded_move_traj, move_arm, moveit_fk)
move_arm.set_start_state(cs)
goal_pose.position.x = targ_x
goal_pose.position.y = targ_y
goal_pose.position.z = targ_z
goal_pose.position.x -= direction_x*search_distance
goal_pose.position.y -= direction_y*search_distance
goal_pose.position.z -= direction_z*search_distance
move_arm.set_pose_target(goal_pose)
_, plan_c, _, _ = move_arm.plan()
guarded_move_traj = cascade_plans(pre_guarded_move_traj, plan_c)
pre_guarded_move_end_time = pre_guarded_move_traj.joint_trajectory.points[len(
pre_guarded_move_traj.joint_trajectory.points)-1].time_from_start
guarded_move_end_time = guarded_move_traj.joint_trajectory.points[len(
guarded_move_traj.joint_trajectory.points)-1].time_from_start
estimated_time_ratio = pre_guarded_move_end_time/guarded_move_end_time
return guarded_move_traj, estimated_time_ratio
def deliver_sample(move_arm, robot, moveit_fk, args):
"""
:type move_arm: class 'moveit_commander.move_group.MoveGroupCommander'
:type args: List[bool, float, float, float]
"""
move_arm.set_planner_id("RRTstar")
robot_state = robot.get_current_state()
move_arm.set_start_state(robot_state)
x_delivery = args.delivery.x
y_delivery = args.delivery.y
z_delivery = args.delivery.z
# after sample collect
mypi = 3.14159
d2r = mypi/180
r2d = 180/mypi
goal_pose = move_arm.get_current_pose().pose
# position was found from rviz tool
goal_pose.position.x = x_delivery
goal_pose.position.y = y_delivery
goal_pose.position.z = z_delivery
r = -179
p = -20
y = -90
q = quaternion_from_euler(r*d2r, p*d2r, y*d2r)
goal_pose.orientation = Quaternion(q[0], q[1], q[2], q[3])
move_arm.set_pose_target(goal_pose)
_, plan_a, _, _ = move_arm.plan()
if len(plan_a.joint_trajectory.points) == 0: # If no plan found, abort
return False
# rotate scoop to deliver sample at current location...
# adding position constraint on the solution so that the tip doesnot diverge to get to the solution.
pos_constraint = PositionConstraint()
pos_constraint.header.frame_id = "base_link"
pos_constraint.link_name = "l_scoop"
pos_constraint.target_point_offset.x = 0.1
pos_constraint.target_point_offset.y = 0.1
# rotate scoop to deliver sample at current location begin
pos_constraint.target_point_offset.z = 0.1
pos_constraint.constraint_region.primitives.append(
SolidPrimitive(type=SolidPrimitive.SPHERE, dimensions=[0.01]))
pos_constraint.weight = 1
# using euler angles for own verification..
r = +180
p = 90 # 45 worked get
y = -90
q = quaternion_from_euler(r*d2r, p*d2r, y*d2r)
cs, start_state, goal_pose = calculate_joint_state_end_pose_from_plan_arm(
robot, plan_a, move_arm, moveit_fk)
move_arm.set_start_state(cs)
goal_pose.orientation = Quaternion(q[0], q[1], q[2], q[3])
move_arm.set_pose_target(goal_pose)
_, plan_b, _, _ = move_arm.plan()
if len(plan_b.joint_trajectory.points) == 0: # If no plan found, send the previous plan only
return plan_a
deliver_sample_traj = cascade_plans(plan_a, plan_b)
# move_arm.set_planner_id("RRTconnect")
return deliver_sample_traj
| 38.262731 | 118 | 0.709126 |
90d8b6e21a5a98ef832fd0d4d5145ea5815c4ad7 | 1,765 | py | Python | setup.py | sam-writer/fastT5 | f8db7f7d905f6c594e7a502adca465ae752e1f3d | [
"Apache-2.0"
] | null | null | null | setup.py | sam-writer/fastT5 | f8db7f7d905f6c594e7a502adca465ae752e1f3d | [
"Apache-2.0"
] | null | null | null | setup.py | sam-writer/fastT5 | f8db7f7d905f6c594e7a502adca465ae752e1f3d | [
"Apache-2.0"
] | null | null | null | import setuptools
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setuptools.setup(
name="fastt5",
version="0.1.0",
license="apache-2.0",
author="Kiran R",
author_email="kiranr8k@gmail.com",
description="boost inference speed of T5 models by 5x & reduce the model size by 3x using fastT5.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Ki6an/fastT5",
project_urls={
"Repo": "https://github.com/Ki6an/fastT5",
"Bug Tracker": "https://github.com/Ki6an/fastT5/issues",
},
keywords=[
"T5",
"ONNX",
"onnxruntime",
"NLP",
"transformer",
"quantization",
"generate text",
"summarization",
"translation",
"q&a",
"qg",
"machine learning",
"inference",
"fast inference",
],
packages=setuptools.find_packages(),
python_requires=">=3.5",
install_requires=[
"torch>=1.7.0,!=1.8.0", # excludes torch v1.8.0
"onnx",
"onnxruntime==1.7.0",
"transformers>4.6.1",
"progress>=1.5",
"sentencepiece",
"psutil",
],
classifiers=[
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 28.934426 | 103 | 0.577904 |
e5a0e1d8476add1d1a793e7c8df2be77fe63efd2 | 1,233 | py | Python | tests/unit/utils/cache_test.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | 3 | 2016-09-03T06:26:42.000Z | 2019-06-30T13:04:53.000Z | tests/unit/utils/cache_test.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | null | null | null | tests/unit/utils/cache_test.py | styro/salt | d087d94dca02ca8bf53a6c21b94944bc7957522c | [
"Apache-2.0"
] | 1 | 2021-12-02T15:30:00.000Z | 2021-12-02T15:30:00.000Z | # -*- coding: utf-8 -*-
'''
tests.unit.utils.cache_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test the salt cache objects
'''
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import cache
import time
class CacheDictTestCase(TestCase):
def test_sanity(self):
'''
Make sure you can instantiate etc.
'''
cd = cache.CacheDict(5)
self.assertIsInstance(cd, cache.CacheDict)
# do some tests to make sure it looks like a dict
self.assertNotIn('foo', cd)
cd['foo'] = 'bar'
self.assertEqual(cd['foo'], 'bar')
del cd['foo']
self.assertNotIn('foo', cd)
def test_ttl(self):
cd = cache.CacheDict(0.1)
cd['foo'] = 'bar'
self.assertIn('foo', cd)
self.assertEqual(cd['foo'], 'bar')
time.sleep(0.2)
self.assertNotIn('foo', cd)
# make sure that a get would get a regular old key error
self.assertRaises(KeyError, cd.__getitem__, 'foo')
if __name__ == '__main__':
from integration import run_tests
run_tests(CacheDictTestCase, needs_daemon=False)
| 24.176471 | 64 | 0.60665 |
6e963a2fd2e3ab710e291dbd5dbd8997a4dab1b9 | 2,340 | py | Python | airflow/api/common/experimental/pool.py | KarthikKothareddy/AirFlow | faaf0b8b4467bcf5bff4a5b49086a9e02cb9c112 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6 | 2016-04-20T20:40:43.000Z | 2022-02-20T10:32:00.000Z | airflow/api/common/experimental/pool.py | KarthikKothareddy/AirFlow | faaf0b8b4467bcf5bff4a5b49086a9e02cb9c112 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 13 | 2018-11-30T18:18:32.000Z | 2021-02-19T17:04:12.000Z | airflow/api/common/experimental/pool.py | KarthikKothareddy/AirFlow | faaf0b8b4467bcf5bff4a5b49086a9e02cb9c112 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 8 | 2016-04-13T21:22:46.000Z | 2020-07-31T18:31:59.000Z | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.exceptions import AirflowException
from airflow.models import Pool
from airflow.utils.db import provide_session
class PoolBadRequest(AirflowException):
status = 400
class PoolNotFound(AirflowException):
status = 404
@provide_session
def get_pool(name, session=None):
"""Get pool by a given name."""
if not (name and name.strip()):
raise PoolBadRequest("Pool name shouldn't be empty")
pool = session.query(Pool).filter_by(pool=name).first()
if pool is None:
raise PoolNotFound("Pool '%s' doesn't exist" % name)
return pool
@provide_session
def get_pools(session=None):
"""Get all pools."""
return session.query(Pool).all()
@provide_session
def create_pool(name, slots, description, session=None):
"""Create a pool with a given parameters."""
if not (name and name.strip()):
raise PoolBadRequest("Pool name shouldn't be empty")
try:
slots = int(slots)
except ValueError:
raise PoolBadRequest("Bad value for `slots`: %s" % slots)
session.expire_on_commit = False
pool = session.query(Pool).filter_by(pool=name).first()
if pool is None:
pool = Pool(pool=name, slots=slots, description=description)
session.add(pool)
else:
pool.slots = slots
pool.description = description
session.commit()
return pool
@provide_session
def delete_pool(name, session=None):
"""Delete pool by a given name."""
if not (name and name.strip()):
raise PoolBadRequest("Pool name shouldn't be empty")
pool = session.query(Pool).filter_by(pool=name).first()
if pool is None:
raise PoolNotFound("Pool '%s' doesn't exist" % name)
session.delete(pool)
session.commit()
return pool
| 27.209302 | 74 | 0.691026 |
556294846df3addfceeb51bc92000dba63908e66 | 1,240 | py | Python | tests/test_relation.py | haginot/auto-prep | b1de3eceba5b82432e7042e7e62270df467ed828 | [
"Apache-2.0"
] | null | null | null | tests/test_relation.py | haginot/auto-prep | b1de3eceba5b82432e7042e7e62270df467ed828 | [
"Apache-2.0"
] | 4 | 2019-01-15T01:55:46.000Z | 2019-02-21T04:15:25.000Z | tests/test_relation.py | haginot/auto-prep | b1de3eceba5b82432e7042e7e62270df467ed828 | [
"Apache-2.0"
] | null | null | null | import unittest
import pandas as pd
from autoprep.relation import Relation
from autoprep.table import Table
class TestRelation(unittest.TestCase):
def setUp(self):
self.__source_table = Table(title='source', data=pd.DataFrame({
'id': [1, 2, 3], 'name': ['Ichiro', 'Harper', 'Bonds']}))
self.__target_table_related = Table(title='target', data=pd.DataFrame({
'hitter_id': [1, 2, 3, 1, 2, 3], 'record': ['HIT', 'HIT', 'HR', 'HIT', '2B', 'HR']}))
self.__target_table_unrelated = Table(title='target', data=pd.DataFrame({
'hitter_id': [4, 5, 6, 7, 8, 9], 'record': ['HIT', 'HIT', 'HR', 'HIT', '2B', 'HR']}))
def test_related(self):
relation_related = Relation(source_table=self.__source_table, source_key='id',
target_table=self.__target_table_related, target_key='hitter_id')
self.assertTrue(relation_related.related)
relation_unrelated = Relation(source_table=self.__source_table, source_key='id',
target_table=self.__target_table_unrelated, target_key='hitter_id')
self.assertFalse(relation_unrelated.related)
if __name__ == '__main__':
unittest.main()
| 38.75 | 105 | 0.625806 |
4f51d6220a544f965294f38e33117ad8456f5427 | 3,097 | py | Python | src/pactor.py | kstrempel/pactor | bc12dd6253bec7c08f691697108dcabd2a1c0e00 | [
"MIT"
] | 1 | 2021-03-19T21:36:35.000Z | 2021-03-19T21:36:35.000Z | src/pactor.py | kstrempel/pactor | bc12dd6253bec7c08f691697108dcabd2a1c0e00 | [
"MIT"
] | null | null | null | src/pactor.py | kstrempel/pactor | bc12dd6253bec7c08f691697108dcabd2a1c0e00 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import argparse
import sys
import logging
import importlib # needed for runtime
from pactor.compiler import load_file, load_script
from pactor.vm import VM
from pactor.ast import Ast
from pactor.repl import repl
from pactor.runtime_exceptions import InnerPactorRuntimeError
from pactor.error_listener import SyntaxException
__author__ = "kstrempel"
__copyright__ = "kstrempel"
__license__ = "mit"
__version__ = 0.2
_logger = logging.getLogger(__name__)
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Pactor Language")
parser.add_argument(
'file',
metavar='FILE',
nargs='?',
type=str,
default=None,
help='starts the pactor source file')
parser.add_argument(
"--version",
action="version",
version="pactor {ver}".format(ver=__version__))
parser.add_argument(
"-v",
"--verbose",
dest="loglevel",
help="set loglevel to INFO",
action="store_const",
const=logging.INFO)
parser.add_argument(
"-vv",
"--very-verbose",
dest="loglevel",
help="set loglevel to DEBUG",
action="store_const",
const=logging.DEBUG)
parser.add_argument(
"-s",
"--stack",
dest="stack",
help="prints the stack when script finised",
action="store_const",
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
if args.file:
try:
ast = load_file(args.file)
vm = VM(ast)
vm.run()
except InnerPactorRuntimeError as e:
print(f"Runtime error in {args.file} at [{e.line}:{e.column}]")
with open(args.file) as f:
line = f.readline()
for _ in range(1, e.line):
line = f.readline()
print("> " + line[:-1])
print("> " + e.error_arrow)
print("> " + e.message)
except SyntaxException as e:
print(f"Syntax Error: {e.message}")
print(f"{e.error_arrow}")
except Exception as e:
print(f"Error: {e}")
if(args.stack):
print(vm.stack)
else:
repl()
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| 25.385246 | 75 | 0.586051 |
d3cc5593c0e29dc9c7af0608f21e517639666a8e | 2,263 | py | Python | classes/PositionMessageManager.py | ericauv/rsi_tradingview | 1e05f222aa0d230e0482f5afa5c19f696afcecbb | [
"MIT"
] | null | null | null | classes/PositionMessageManager.py | ericauv/rsi_tradingview | 1e05f222aa0d230e0482f5afa5c19f696afcecbb | [
"MIT"
] | null | null | null | classes/PositionMessageManager.py | ericauv/rsi_tradingview | 1e05f222aa0d230e0482f5afa5c19f696afcecbb | [
"MIT"
] | null | null | null | class PositionMessageManager:
# sample message
# {
# "id":3,
# "result":[
# {
# "req":"gN0SiRrevtS4O0ufdCpzd4N0MzHu2lVmwbHh6hj4g9eTT9Yfe55eUc4klmsEhnwC@position",
# "res":{
# "positions":[
# {
# "entryPrice":"12044.90000003",
# "marginType":"ISOLATED", // margin type, "CROSSED" or "ISOLATED"
# "isAutoAddMargin":false,
# "isolatedMargin":"0.00006388", // isolated margin balance
# "leverage":125, // current leverage
# "liquidationPrice":"12002.39091452", // estimated liquidation price
# "markPrice":"12046.06021667", // current mark price
# "maxQty":"50", // maximum quantity of base asset
# "positionAmt":"1", // position amount
# "symbol":"BTCUSD_200925", // symbol
# "unRealizedProfit":"0.00000079", // unrealized PnL
# "positionSide":"LONG" // position side
# },
# {
# "entryPrice":"0.0",
# "marginType":"ISOLATED",
# "isAutoAddMargin":false,
# "isolatedMargin":"0",
# "leverage":125,
# "liquidationPrice":"0",
# "markPrice":"12046.06021667",
# "maxQty":"50",
# "positionAmt":"0",
# "symbol":"BTCUSD_200925",
# "unRealizedProfit":"0.00000000",
# "positionSide":"SHORT"
# }
# ]
# }
# }
# ]
# }
def onMessage(self, message):
def onCloseMessage(self, message):
self.DataStore.storeCloseMessage(message)
closeSignal = self.Indicator.getCloseSignal(self.DataStore.data)
self.Trader.handleCloseSignal(closeSignal)
def onNonCloseMessage(self, message):
openSignal = self.Indicator.getOpenSignal(self.DataStore.getDataForLiveIndicator(message))
self.Trader.handleOpenSignal(openSignal)
| 35.920635 | 98 | 0.468405 |
daabf4f8f7031c5fbc90e8c5c6cbd7febf908e01 | 976 | py | Python | calibration_wrappers.py | samadeusfp/prescriptiveProcessMonitoring | 7b39c9b3cb20208d409e733e91cb91fb69dbf238 | [
"MIT"
] | 1 | 2018-11-09T14:09:47.000Z | 2018-11-09T14:09:47.000Z | calibration_wrappers.py | samadeusfp/prescriptiveProcessMonitoring | 7b39c9b3cb20208d409e733e91cb91fb69dbf238 | [
"MIT"
] | null | null | null | calibration_wrappers.py | samadeusfp/prescriptiveProcessMonitoring | 7b39c9b3cb20208d409e733e91cb91fb69dbf238 | [
"MIT"
] | null | null | null | from sklearn.base import BaseEstimator
import numpy as np
class LGBMCalibrationWrapper(BaseEstimator):
def __init__(self, cls):
self.cls = cls
self.classes_ = [0,1]
def predict_proba(self, X):
preds = self.cls.predict(X)
preds = np.array([1-preds, preds]).T
return preds
"""
from sklearn.base import BaseEstimator
import numpy as np
import lightgbm as lgb
class LGBMCalibrationWrapper(BaseEstimator):
def __init__(self, cls=None, param=None, n_lgbm_iter=100):
self.cls = cls
self.param = param
self.classes_ = [0,1]
self.n_lgbm_iter = n_lgbm_iter
def fit(self, X_train, y_train):
train_data = lgb.Dataset(X_train, label=y_train)
self.cls = lgb.train(self.param, train_data, self.n_lgbm_iter)
return self.cls
def predict_proba(self, X):
preds = self.cls.predict(X)
preds = np.array([1-preds, preds]).T
return preds
""" | 27.111111 | 70 | 0.640369 |
cd3fcc1416554cfa846c314879e657b313e0eae4 | 10,402 | py | Python | venv/lib/python2.7/site-packages/openpyxl/writer/theme.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
] | 7 | 2016-12-12T02:29:42.000Z | 2020-05-12T21:21:21.000Z | venv/lib/python2.7/site-packages/openpyxl/writer/theme.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
] | 31 | 2017-01-05T06:07:28.000Z | 2018-05-27T13:13:06.000Z | venv/lib/python2.7/site-packages/openpyxl/writer/theme.py | Christian-Castro/castro_odoo8 | 8247fdb20aa39e043b6fa0c4d0af509462ab3e00 | [
"Unlicense"
] | 3 | 2017-12-21T23:30:12.000Z | 2019-01-03T20:51:52.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
"""Write the theme xml based on a fixed string."""
# package imports
theme_xml = """<?xml version="1.0"?>
<a:theme xmlns:a="http://schemas.openxmlformats.org/drawingml/2006/main" name="Office Theme">
<a:themeElements>
<a:clrScheme name="Office">
<a:dk1>
<a:sysClr val="windowText" lastClr="000000"/>
</a:dk1>
<a:lt1>
<a:sysClr val="window" lastClr="FFFFFF"/>
</a:lt1>
<a:dk2>
<a:srgbClr val="1F497D"/>
</a:dk2>
<a:lt2>
<a:srgbClr val="EEECE1"/>
</a:lt2>
<a:accent1>
<a:srgbClr val="4F81BD"/>
</a:accent1>
<a:accent2>
<a:srgbClr val="C0504D"/>
</a:accent2>
<a:accent3>
<a:srgbClr val="9BBB59"/>
</a:accent3>
<a:accent4>
<a:srgbClr val="8064A2"/>
</a:accent4>
<a:accent5>
<a:srgbClr val="4BACC6"/>
</a:accent5>
<a:accent6>
<a:srgbClr val="F79646"/>
</a:accent6>
<a:hlink>
<a:srgbClr val="0000FF"/>
</a:hlink>
<a:folHlink>
<a:srgbClr val="800080"/>
</a:folHlink>
</a:clrScheme>
<a:fontScheme name="Office">
<a:majorFont>
<a:latin typeface="Cambria"/>
<a:ea typeface=""/>
<a:cs typeface=""/>
<a:font script="Jpan" typeface="MS Pゴシック"/>
<a:font script="Hang" typeface="맑은 고딕"/>
<a:font script="Hans" typeface="宋体"/>
<a:font script="Hant" typeface="新細明體"/>
<a:font script="Arab" typeface="Times New Roman"/>
<a:font script="Hebr" typeface="Times New Roman"/>
<a:font script="Thai" typeface="Tahoma"/>
<a:font script="Ethi" typeface="Nyala"/>
<a:font script="Beng" typeface="Vrinda"/>
<a:font script="Gujr" typeface="Shruti"/>
<a:font script="Khmr" typeface="MoolBoran"/>
<a:font script="Knda" typeface="Tunga"/>
<a:font script="Guru" typeface="Raavi"/>
<a:font script="Cans" typeface="Euphemia"/>
<a:font script="Cher" typeface="Plantagenet Cherokee"/>
<a:font script="Yiii" typeface="Microsoft Yi Baiti"/>
<a:font script="Tibt" typeface="Microsoft Himalaya"/>
<a:font script="Thaa" typeface="MV Boli"/>
<a:font script="Deva" typeface="Mangal"/>
<a:font script="Telu" typeface="Gautami"/>
<a:font script="Taml" typeface="Latha"/>
<a:font script="Syrc" typeface="Estrangelo Edessa"/>
<a:font script="Orya" typeface="Kalinga"/>
<a:font script="Mlym" typeface="Kartika"/>
<a:font script="Laoo" typeface="DokChampa"/>
<a:font script="Sinh" typeface="Iskoola Pota"/>
<a:font script="Mong" typeface="Mongolian Baiti"/>
<a:font script="Viet" typeface="Times New Roman"/>
<a:font script="Uigh" typeface="Microsoft Uighur"/>
</a:majorFont>
<a:minorFont>
<a:latin typeface="Calibri"/>
<a:ea typeface=""/>
<a:cs typeface=""/>
<a:font script="Jpan" typeface="MS Pゴシック"/>
<a:font script="Hang" typeface="맑은 고딕"/>
<a:font script="Hans" typeface="宋体"/>
<a:font script="Hant" typeface="新細明體"/>
<a:font script="Arab" typeface="Arial"/>
<a:font script="Hebr" typeface="Arial"/>
<a:font script="Thai" typeface="Tahoma"/>
<a:font script="Ethi" typeface="Nyala"/>
<a:font script="Beng" typeface="Vrinda"/>
<a:font script="Gujr" typeface="Shruti"/>
<a:font script="Khmr" typeface="DaunPenh"/>
<a:font script="Knda" typeface="Tunga"/>
<a:font script="Guru" typeface="Raavi"/>
<a:font script="Cans" typeface="Euphemia"/>
<a:font script="Cher" typeface="Plantagenet Cherokee"/>
<a:font script="Yiii" typeface="Microsoft Yi Baiti"/>
<a:font script="Tibt" typeface="Microsoft Himalaya"/>
<a:font script="Thaa" typeface="MV Boli"/>
<a:font script="Deva" typeface="Mangal"/>
<a:font script="Telu" typeface="Gautami"/>
<a:font script="Taml" typeface="Latha"/>
<a:font script="Syrc" typeface="Estrangelo Edessa"/>
<a:font script="Orya" typeface="Kalinga"/>
<a:font script="Mlym" typeface="Kartika"/>
<a:font script="Laoo" typeface="DokChampa"/>
<a:font script="Sinh" typeface="Iskoola Pota"/>
<a:font script="Mong" typeface="Mongolian Baiti"/>
<a:font script="Viet" typeface="Arial"/>
<a:font script="Uigh" typeface="Microsoft Uighur"/>
</a:minorFont>
</a:fontScheme>
<a:fmtScheme name="Office">
<a:fillStyleLst>
<a:solidFill>
<a:schemeClr val="phClr"/>
</a:solidFill>
<a:gradFill rotWithShape="1">
<a:gsLst>
<a:gs pos="0">
<a:schemeClr val="phClr">
<a:tint val="50000"/>
<a:satMod val="300000"/>
</a:schemeClr>
</a:gs>
<a:gs pos="35000">
<a:schemeClr val="phClr">
<a:tint val="37000"/>
<a:satMod val="300000"/>
</a:schemeClr>
</a:gs>
<a:gs pos="100000">
<a:schemeClr val="phClr">
<a:tint val="15000"/>
<a:satMod val="350000"/>
</a:schemeClr>
</a:gs>
</a:gsLst>
<a:lin ang="16200000" scaled="1"/>
</a:gradFill>
<a:gradFill rotWithShape="1">
<a:gsLst>
<a:gs pos="0">
<a:schemeClr val="phClr">
<a:shade val="51000"/>
<a:satMod val="130000"/>
</a:schemeClr>
</a:gs>
<a:gs pos="80000">
<a:schemeClr val="phClr">
<a:shade val="93000"/>
<a:satMod val="130000"/>
</a:schemeClr>
</a:gs>
<a:gs pos="100000">
<a:schemeClr val="phClr">
<a:shade val="94000"/>
<a:satMod val="135000"/>
</a:schemeClr>
</a:gs>
</a:gsLst>
<a:lin ang="16200000" scaled="0"/>
</a:gradFill>
</a:fillStyleLst>
<a:lnStyleLst>
<a:ln w="9525" cap="flat" cmpd="sng" algn="ctr">
<a:solidFill>
<a:schemeClr val="phClr">
<a:shade val="95000"/>
<a:satMod val="105000"/>
</a:schemeClr>
</a:solidFill>
<a:prstDash val="solid"/>
</a:ln>
<a:ln w="25400" cap="flat" cmpd="sng" algn="ctr">
<a:solidFill>
<a:schemeClr val="phClr"/>
</a:solidFill>
<a:prstDash val="solid"/>
</a:ln>
<a:ln w="38100" cap="flat" cmpd="sng" algn="ctr">
<a:solidFill>
<a:schemeClr val="phClr"/>
</a:solidFill>
<a:prstDash val="solid"/>
</a:ln>
</a:lnStyleLst>
<a:effectStyleLst>
<a:effectStyle>
<a:effectLst>
<a:outerShdw blurRad="40000" dist="20000" dir="5400000" rotWithShape="0">
<a:srgbClr val="000000">
<a:alpha val="38000"/>
</a:srgbClr>
</a:outerShdw>
</a:effectLst>
</a:effectStyle>
<a:effectStyle>
<a:effectLst>
<a:outerShdw blurRad="40000" dist="23000" dir="5400000" rotWithShape="0">
<a:srgbClr val="000000">
<a:alpha val="35000"/>
</a:srgbClr>
</a:outerShdw>
</a:effectLst>
</a:effectStyle>
<a:effectStyle>
<a:effectLst>
<a:outerShdw blurRad="40000" dist="23000" dir="5400000" rotWithShape="0">
<a:srgbClr val="000000">
<a:alpha val="35000"/>
</a:srgbClr>
</a:outerShdw>
</a:effectLst>
<a:scene3d>
<a:camera prst="orthographicFront">
<a:rot lat="0" lon="0" rev="0"/>
</a:camera>
<a:lightRig rig="threePt" dir="t">
<a:rot lat="0" lon="0" rev="1200000"/>
</a:lightRig>
</a:scene3d>
<a:sp3d>
<a:bevelT w="63500" h="25400"/>
</a:sp3d>
</a:effectStyle>
</a:effectStyleLst>
<a:bgFillStyleLst>
<a:solidFill>
<a:schemeClr val="phClr"/>
</a:solidFill>
<a:gradFill rotWithShape="1">
<a:gsLst>
<a:gs pos="0">
<a:schemeClr val="phClr">
<a:tint val="40000"/>
<a:satMod val="350000"/>
</a:schemeClr>
</a:gs>
<a:gs pos="40000">
<a:schemeClr val="phClr">
<a:tint val="45000"/>
<a:shade val="99000"/>
<a:satMod val="350000"/>
</a:schemeClr>
</a:gs>
<a:gs pos="100000">
<a:schemeClr val="phClr">
<a:shade val="20000"/>
<a:satMod val="255000"/>
</a:schemeClr>
</a:gs>
</a:gsLst>
<a:path path="circle">
<a:fillToRect l="50000" t="-80000" r="50000" b="180000"/>
</a:path>
</a:gradFill>
<a:gradFill rotWithShape="1">
<a:gsLst>
<a:gs pos="0">
<a:schemeClr val="phClr">
<a:tint val="80000"/>
<a:satMod val="300000"/>
</a:schemeClr>
</a:gs>
<a:gs pos="100000">
<a:schemeClr val="phClr">
<a:shade val="30000"/>
<a:satMod val="200000"/>
</a:schemeClr>
</a:gs>
</a:gsLst>
<a:path path="circle">
<a:fillToRect l="50000" t="50000" r="50000" b="50000"/>
</a:path>
</a:gradFill>
</a:bgFillStyleLst>
</a:fmtScheme>
</a:themeElements>
<a:objectDefaults/>
<a:extraClrSchemeLst/>
</a:theme>
"""
def write_theme():
"""Write the theme xml."""
return theme_xml
| 35.141892 | 100 | 0.489617 |
9a8a6aae3db8620b701a830b30fe4fd0bfdbd9d4 | 368 | py | Python | boutiques/admin.py | Amechi101/concepteur-market | 428cf9843dbe2f3fefe63fb44255d98e449d3d18 | [
"MIT"
] | null | null | null | boutiques/admin.py | Amechi101/concepteur-market | 428cf9843dbe2f3fefe63fb44255d98e449d3d18 | [
"MIT"
] | null | null | null | boutiques/admin.py | Amechi101/concepteur-market | 428cf9843dbe2f3fefe63fb44255d98e449d3d18 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.contrib import admin
from boutiques.models import Boutique
class BoutiqueAdmin(admin.ModelAdmin):
list_display = ["name", "address", "city", "state", "zipcode", "boutique_website", "is_active"]
search_fields = ["name"]
list_per_page = 10
#Register Models below
admin.site.register(Boutique, BoutiqueAdmin)
| 21.647059 | 96 | 0.769022 |
4ddbdff9908fb19fa3f53f3c97c95c6ed730d32a | 3,543 | py | Python | openxc/sources/trace.py | hopper-maker/openxc-python | 2054c3d7a7ba09b8f0eeecc2348185857dc22f5f | [
"BSD-3-Clause"
] | null | null | null | openxc/sources/trace.py | hopper-maker/openxc-python | 2054c3d7a7ba09b8f0eeecc2348185857dc22f5f | [
"BSD-3-Clause"
] | null | null | null | openxc/sources/trace.py | hopper-maker/openxc-python | 2054c3d7a7ba09b8f0eeecc2348185857dc22f5f | [
"BSD-3-Clause"
] | null | null | null | """A data source for reading from pre-recorded OpenXC trace files."""
import logging
import time
from .base import DataSourceError, BytestreamDataSource
from openxc.formats.json import JsonFormatter
LOG = logging.getLogger(__name__)
class TraceDataSource(BytestreamDataSource):
"""A class to replay a previously recorded OpenXC vehicle data trace file.
For details on the trace file format, see
http://openxcplatform.com/android/testing.html.
"""
def __init__(self, filename=None, realtime=True, loop=True, **kwargs):
"""Construct the source and attempt to open the trace file.
filename - the full absolute path to the trace file
realtime - if ``True``, the trace will be replayed at approximately
the same cadence as it was recorded. Otherwise, the trace file
will be replayed as fast as possible (likely much faster than
any vehicle).
loop - if ``True``, the trace file will be looped and will provide
data until the process exist or the source is stopped.
"""
super(TraceDataSource, self).__init__(**kwargs)
self.realtime = realtime
self.loop = loop
self.filename = filename
self._reopen_file()
def _reopen_file(self):
if getattr(self, 'trace_file', None) is not None:
self.trace_file.close()
self.trace_file = self._open_file(self.filename)
self.starting_time = time.time()
def _store_timestamp(self, timestamp):
"""If not already saved, cache the first timestamp in the active trace
file on the instance.
"""
if getattr(self, 'first_timestamp', None) is None:
self.first_timestamp = timestamp
LOG.debug("Storing %d as the first timestamp of the trace file %s",
self.first_timestamp, self.filename)
def read(self):
"""Read a line of data from the input source at a time."""
line = self.trace_file.readline()
if line == '':
if self.loop:
self._reopen_file()
else:
self.trace_file.close()
self.trace_file = None
raise DataSourceError()
message = JsonFormatter.deserialize(line)
timestamp = message.get('timestamp', None)
if self.realtime and timestamp is not None:
self._store_timestamp(timestamp)
self._wait(self.starting_time, self.first_timestamp, timestamp)
return line + "\x00"
@staticmethod
def _open_file(filename):
"""Attempt to open the the file at ``filename`` for reading.
Raises:
DataSourceError, if the file cannot be opened.
"""
if filename is None:
raise DataSourceError("Trace filename is not defined")
try:
trace_file = open(filename, "r")
except IOError as e:
raise DataSourceError("Unable to open trace file %s" % filename, e)
else:
LOG.debug("Opened trace file %s", filename)
return trace_file
@staticmethod
def _wait(starting_time, first_timestamp, timestamp):
"""Given that the first timestamp in the trace file is
``first_timestamp`` and we started playing back the file at
``starting_time``, block until the current ``timestamp`` should occur.
"""
target_time = starting_time + (timestamp - first_timestamp)
time.sleep(max(target_time - time.time(), 0))
| 36.153061 | 79 | 0.627717 |
78a182f4375ea14e8968899ba8b22c167947fa30 | 1,724 | py | Python | poetryhub/urls.py | KadogoKenya/PoetryHub | 80769c7b7657ee03b6bdcbc420c022a8518d9fc4 | [
"MIT"
] | null | null | null | poetryhub/urls.py | KadogoKenya/PoetryHub | 80769c7b7657ee03b6bdcbc420c022a8518d9fc4 | [
"MIT"
] | null | null | null | poetryhub/urls.py | KadogoKenya/PoetryHub | 80769c7b7657ee03b6bdcbc420c022a8518d9fc4 | [
"MIT"
] | null | null | null | """poetryhub URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
# from django.urls import path
from django.contrib.auth import views
from django.urls import path, include
from django.conf.urls import url
from users import views as user_views
from django.contrib.auth import views as auth_views
# from rest_framework.authtoken.views import obtain_auth_token
from poetry import views
# from django.contrib.auth import views as auth_views
# from rest_framework.authtoken.views import obtain_auth_token
# from computersystem.views import ComputerList
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('poetry.urls')),
url(r'^tinymce/', include('tinymce.urls')),
path('register/', user_views.register, name='register'),
path('profile/', user_views.profile, name='profile'),
path('login/', auth_views.LoginView.as_view(template_name='users/login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='users/logout.html'), name='logout'),
path('display_profile/', user_views.display_profile, name='display_profile'),
]
| 39.181818 | 101 | 0.741879 |
f2c6489cb1d71f21799cfa6f29d5ef316ae22c1b | 143,583 | py | Python | Lib/typeworld/client/__init__.py | davelab6/typeworld | 978b4d31aa41c95729df52975864778910172935 | [
"Apache-2.0"
] | 1 | 2021-08-16T17:00:01.000Z | 2021-08-16T17:00:01.000Z | Lib/typeworld/client/__init__.py | davelab6/typeworld | 978b4d31aa41c95729df52975864778910172935 | [
"Apache-2.0"
] | null | null | null | Lib/typeworld/client/__init__.py | davelab6/typeworld | 978b4d31aa41c95729df52975864778910172935 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
import json
import copy
import platform
import urllib.request
import urllib.error
import urllib.parse
import traceback
import time
import base64
import threading
import ssl
import certifi
import semver
import logging
import inspect
import re
from time import gmtime, strftime
import requests
import requests.exceptions
import typeworld.api
from typeworld.api import VERSION
from typeworld.client.helpers import (
ReadFromFile,
WriteToFile,
MachineName,
addAttributeToURL,
OSName,
Garbage,
)
WIN = platform.system() == "Windows"
MAC = platform.system() == "Darwin"
LINUX = platform.system() == "Linux"
CI = os.getenv("CI", "false").lower() == "true"
GAE = os.getenv("GAE_ENV", "").startswith("standard")
MOTHERSHIP = "https://api.type.world/v1"
if MAC:
from AppKit import NSUserDefaults
from typeworld.client.helpers import nslog
class DummyKeyring(object):
def __init__(self):
self.passwords = {}
def set_password(self, key, username, password):
self.passwords[(key, username)] = password
def get_password(self, key, username):
if (key, username) in self.passwords:
return self.passwords[(key, username)]
def delete_password(self, key, username):
if (key, username) in self.passwords:
del self.passwords[(key, username)]
dummyKeyRing = DummyKeyring()
if "TRAVIS" in os.environ:
import tempfile
tempFolder = tempfile.mkdtemp()
def urlIsValid(url):
if (
not url.find("typeworld://")
< url.find("+")
< url.find("http")
< url.find("//", url.find("http"))
):
return False, "URL is malformed."
if url.count("@") > 1:
return (
False,
"URL contains more than one @ sign, so don’t know how to parse it.",
)
found = False
for protocol in typeworld.api.PROTOCOLS:
if url.startswith(protocol + "://"):
found = True
break
if not found:
return (
False,
"Unknown custom protocol, known are: %s" % (typeworld.api.PROTOCOLS),
)
if url.count("://") > 1:
return (
False,
(
"URL contains more than one :// combination, "
"so don’t know how to parse it."
),
)
return True, None
class URL(object):
def __init__(self, url):
(
self.customProtocol,
self.protocol,
self.transportProtocol,
self.subscriptionID,
self.secretKey,
self.accessToken,
self.restDomain,
) = splitJSONURL(url)
def unsecretURL(self):
if self.subscriptionID and self.secretKey:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.subscriptionID)
+ ":"
+ "secretKey"
+ "@"
+ str(self.restDomain)
)
elif self.subscriptionID:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.subscriptionID)
+ "@"
+ str(self.restDomain)
)
else:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.restDomain)
)
def shortUnsecretURL(self):
if self.subscriptionID:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.subscriptionID)
+ "@"
+ str(self.restDomain)
)
else:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.restDomain)
)
def secretURL(self):
if self.subscriptionID and self.secretKey:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.subscriptionID)
+ ":"
+ str(self.secretKey)
+ "@"
+ str(self.restDomain)
)
elif self.subscriptionID:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.subscriptionID)
+ "@"
+ str(self.restDomain)
)
else:
return (
str(self.customProtocol)
+ str(self.protocol)
+ "+"
+ str(self.transportProtocol.replace("://", "//"))
+ str(self.restDomain)
)
def HTTPURL(self):
return str(self.transportProtocol) + str(self.restDomain)
def getProtocol(url):
protocolName = URL(url).protocol
for ext in (".py", ".pyc"):
if os.path.exists(
os.path.join(os.path.dirname(__file__), "protocols", protocolName + ext)
):
import importlib
spec = importlib.util.spec_from_file_location(
"json",
os.path.join(
os.path.dirname(__file__), "protocols", protocolName + ext
),
)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
protocolObject = module.TypeWorldProtocol(url)
return True, protocolObject
return False, "Protocol %s doesn’t exist in this app (yet)." % protocolName
def request(url, parameters={}, method="POST"):
"""Perform request in a loop 10 times, because the central server’s instance might
shut down unexpectedly during a request, especially longer running ones."""
message = None
for i in range(10):
try:
if method == "POST":
response = requests.post(url, parameters, timeout=30)
elif method == "GET":
response = requests.get(url, timeout=30)
# except requests.exceptions.ConnectionError:
# message = f'Connection refused: {url}'
# except requests.exceptions.HTTPError:
# message = f'HTTP Error: {url}'
# except requests.exceptions.Timeout:
# message = f'Connection timed out: {url}'
# except requests.exceptions.TooManyRedirects:
# message = f'Too many redirects: {url}'
except Exception:
if parameters:
parameters = copy.copy(parameters)
for key in parameters:
if key.lower().endswith("key"):
parameters[key] = "*****"
if key.lower().endswith("secret"):
parameters[key] = "*****"
message = (
f"Response from {url} with parameters "
f"{parameters} after {i+1} tries: "
+ traceback.format_exc().splitlines()[-1]
)
else:
message = traceback.format_exc().splitlines()[-1]
return False, message, None
else:
# try:
if response.status_code != 200:
return False, f"HTTP Error {response.status_code}", response
else:
return True, response.content, response
def splitJSONURL(url):
customProtocol = "typeworld://"
url = url.replace(customProtocol, "")
protocol = url.split("+")[0]
url = url.replace(protocol + "+", "")
url = url.replace("http//", "http://")
url = url.replace("https//", "https://")
url = url.replace("HTTP//", "http://")
url = url.replace("HTTPS//", "https://")
transportProtocol = None
if url.startswith("https://"):
transportProtocol = "https://"
elif url.startswith("http://"):
transportProtocol = "http://"
urlRest = url[len(transportProtocol) :]
subscriptionID = ""
secretKey = ""
accessToken = ""
# With credentials
if "@" in urlRest:
credentials, domain = urlRest.split("@")
credentialParts = credentials.split(":")
if len(credentialParts) == 3:
subscriptionID, secretKey, accessToken = credentialParts
elif len(credentialParts) == 2:
subscriptionID, secretKey = credentialParts
elif len(credentialParts) == 1:
subscriptionID = credentialParts[0]
# No credentials given
else:
domain = urlRest
return (
customProtocol,
protocol,
transportProtocol,
subscriptionID,
secretKey,
accessToken,
domain,
)
class Preferences(object):
def __init__(self):
self._dict = {} # nocoverage
# (In tests, preferences are loaded either as JSON or as AppKitNSUserDefaults,
# not the plain class here)
def get(self, key):
if key in self._dict:
return self._dict[key]
def set(self, key, value):
self._dict[key] = value
self.save()
def remove(self, key):
if key in self._dict:
del self._dict[key]
def save(self):
pass
def dictionary(self):
return self._dict # nocoverage
# (In tests, preferences are loaded either as JSON or as AppKitNSUserDefaults,
# not the plain class here)
class JSON(Preferences):
def __init__(self, path):
self.path = path
self._dict = {}
if self.path and os.path.exists(self.path):
self._dict = json.loads(ReadFromFile(self.path))
def save(self):
if not os.path.exists(os.path.dirname(self.path)):
os.makedirs(os.path.dirname(self.path))
WriteToFile(self.path, json.dumps(self._dict))
def dictionary(self):
return self._dict
class AppKitNSUserDefaults(Preferences):
def __init__(self, name):
# NSUserDefaults = objc.lookUpClass('NSUserDefaults')
self.defaults = NSUserDefaults.alloc().initWithSuiteName_(name)
self.values = {}
def get(self, key):
if key in self.values:
return self.values[key]
else:
o = self.defaults.objectForKey_(key)
if o:
if "Array" in o.__class__.__name__:
o = list(o)
elif "Dictionary" in o.__class__.__name__:
o = dict(o)
elif "unicode" in o.__class__.__name__:
o = str(o)
self.values[key] = o
return self.values[key]
def set(self, key, value):
# self.defaults.setObject_forKey_(json.dumps(value), key)
# if MAC:
# if type(value) == dict:
# value = NSDictionary.alloc().initWithDictionary_(value)
self.values[key] = value
self.defaults.setObject_forKey_(value, key)
def remove(self, key):
if key in self.values:
del self.values[key]
if self.defaults.objectForKey_(key):
self.defaults.removeObjectForKey_(key)
def convertItem(self, item):
if "Array" in item.__class__.__name__ or type(item) in (list, tuple):
_list = list(item)
for i, _item in enumerate(_list):
_list[i] = self.convertItem(_item)
return _list
elif "Dictionary" in item.__class__.__name__ or type(item) == dict:
d = dict(item)
for k, v in d.items():
d[k] = self.convertItem(v)
return d
elif "unicode" in item.__class__.__name__:
return str(item)
def dictionary(self):
d = self.defaults.dictionaryRepresentation()
return self.convertItem(d)
class TypeWorldClientDelegate(object):
def __init__(self):
self.client = None
self.initialize()
def initialize(self):
pass
def _fontWillInstall(self, font):
try:
self.fontWillInstall(font)
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def fontWillInstall(self, font):
assert type(font) == typeworld.api.Font
def _fontHasInstalled(self, success, message, font):
try:
self.fontHasInstalled(success, message, font)
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def fontHasInstalled(self, success, message, font):
if success:
assert type(font) == typeworld.api.Font
def _fontWillUninstall(self, font):
try:
self.fontWillUninstall(font)
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def fontWillUninstall(self, font):
assert type(font) == typeworld.api.Font
def _fontHasUninstalled(self, success, message, font):
try:
self.fontHasUninstalled(success, message, font)
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def fontHasUninstalled(self, success, message, font):
if success:
assert type(font) == typeworld.api.Font
def _subscriptionUpdateNotificationHasBeenReceived(self, subscription):
try:
self.subscriptionUpdateNotificationHasBeenReceived(subscription)
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def subscriptionUpdateNotificationHasBeenReceived(self, subscription):
assert type(subscription) == typeworld.client.APISubscription
pass
# def _subscriptionInvitationHasBeenReceived(self, invitation):
# try:
# self.subscriptionInvitationHasBeenReceived(invitation)
# except Exception: # nocoverage
# self.client.handleTraceback( # nocoverage
# sourceMethod=getattr(self, sys._getframe().f_code.co_name)
# )
# def subscriptionInvitationHasBeenReceived(self, invitation):
# pass
def _userAccountUpdateNotificationHasBeenReceived(self):
try:
self.userAccountUpdateNotificationHasBeenReceived()
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def userAccountUpdateNotificationHasBeenReceived(self):
pass
def _userAccountHasBeenUpdated(self):
try:
self.userAccountHasBeenUpdated()
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def userAccountHasBeenUpdated(self):
pass
def _subscriptionHasBeenDeleted(self, subscription):
try:
self.subscriptionHasBeenDeleted(subscription)
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def subscriptionHasBeenDeleted(self, subscription):
pass
def _publisherHasBeenDeleted(self, publisher):
try:
self.publisherHasBeenDeleted(publisher)
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def publisherHasBeenDeleted(self, publisher):
pass
def _subscriptionHasBeenAdded(self, subscription):
try:
self.subscriptionHasBeenAdded(subscription)
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def subscriptionHasBeenAdded(self, subscription):
pass
def _subscriptionWillUpdate(self, subscription):
try:
self.subscriptionWillUpdate(subscription)
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def subscriptionWillUpdate(self, subscription):
pass
def _subscriptionHasBeenUpdated(self, subscription, success, message, changes):
try:
self.subscriptionHasBeenUpdated(subscription, success, message, changes)
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def subscriptionHasBeenUpdated(self, subscription, success, message, changes):
pass
def _clientPreferenceChanged(self, key, value):
try:
self.clientPreferenceChanged(key, value)
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def clientPreferenceChanged(self, key, value):
pass
def _messageQueueConnected(self):
try:
self.messageQueueConnected()
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def messageQueueConnected(self):
pass
def _messageQueueDisconnected(self):
try:
self.messageQueueDisconnected()
except Exception: # nocoverage
self.client.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def messageQueueDisconnected(self):
pass
class APIInvitation(object):
keywords = ()
def __init__(self, d):
for key in self.keywords:
# if key in d:
setattr(self, key, d[key])
# else:
# setattr(self, key, None)
class APIPendingInvitation(APIInvitation):
keywords = (
"url",
"ID",
"invitedByUserName",
"invitedByUserEmail",
"time",
"canonicalURL",
"publisherName",
"subscriptionName",
"logoURL",
"backgroundColor",
"fonts",
"families",
"foundries",
"websiteURL",
)
def accept(self):
return self.parent.acceptInvitation(self.url)
def decline(self):
return self.parent.declineInvitation(self.url)
class APIAcceptedInvitation(APIInvitation):
keywords = (
"url",
"ID",
"invitedByUserName",
"invitedByUserEmail",
"time",
"canonicalURL",
"publisherName",
"subscriptionName",
"logoURL",
"backgroundColor",
"fonts",
"families",
"foundries",
"websiteURL",
)
class APISentInvitation(APIInvitation):
keywords = (
"url",
"invitedUserName",
"invitedUserEmail",
"invitedTime",
"acceptedTime",
"confirmed",
)
class APIClient(object):
"""\
Main Type.World client app object.
Use it to load repositories and install/uninstall fonts.
"""
def __init__(
self,
preferences=None,
secretTypeWorldAPIKey=None,
delegate=None,
mothership=None,
mode="headless",
zmqSubscriptions=False,
online=False,
testing=False,
externallyControlled=False,
secretServerAuthKey=None,
inCompiledApp=False,
commercial=False,
appID="world.type.headless",
):
try:
self._preferences = preferences or Preferences()
# if self:
# self.clearPendingOnlineCommands()
self._publishers = {}
self._subscriptionsUpdated = []
self.onlineCommandsQueue = []
self._syncProblems = []
self.secretTypeWorldAPIKey = secretTypeWorldAPIKey
self.delegate = delegate or TypeWorldClientDelegate()
self.delegate.client = self
self.mothership = mothership or MOTHERSHIP
self.mode = mode # gui or headless
self.zmqSubscriptions = zmqSubscriptions
self._isSetOnline = online
self.lastOnlineCheck = {}
self.testing = testing
self.externallyControlled = externallyControlled
self.secretServerAuthKey = secretServerAuthKey
self.inCompiledApp = inCompiledApp
self.commercial = commercial
self.appID = appID
self._zmqRunning = False
self._zmqCallbacks = {}
if self._isSetOnline:
self.sslcontext = ssl.create_default_context(cafile=certifi.where())
# For Unit Testing
self.testScenario = None
self._systemLocale = None
self._online = {}
# wentOnline()
if self._isSetOnline:
self.wentOnline()
# ZMQ
if self._isSetOnline and self.zmqSubscriptions:
if self.user():
topicID = "user-%s" % self.user()
self.registerZMQCallback(topicID, self.zmqCallback)
self.manageMessageQueueConnection()
except Exception as e: # nocoverage
self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def __repr__(self):
return f'<APIClient user="{self.user()}">'
def tracebackTest(self):
try:
assert abc # noqa: F821
except Exception as e:
self.handleTraceback(
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def tracebackTest2(self):
try:
assert abc # noqa: F821
except Exception:
self.handleTraceback(
sourceMethod=getattr(self, sys._getframe().f_code.co_name)
)
def wentOnline(self):
success, message = self.downloadSettings(performCommands=True)
assert success
assert self.get("downloadedSettings")["messagingQueue"].startswith("tcp://")
assert self.get("downloadedSettings")["breakingAPIVersions"]
def wentOffline(self):
pass
def zmqSetup(self):
import zmq
import zmq.error
if not self._zmqRunning:
self._zmqctx = zmq.Context.instance()
self.zmqSocket = self._zmqctx.socket(zmq.SUB)
# https://github.com/zeromq/libzmq/issues/2882
self.zmqSocket.setsockopt(zmq.TCP_KEEPALIVE, 1)
self.zmqSocket.setsockopt(zmq.TCP_KEEPALIVE_CNT, 10)
self.zmqSocket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, 30)
self.zmqSocket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, 30)
target = self.get("downloadedSettings")["messagingQueue"]
self.zmqSocket.connect(target)
self._zmqRunning = True
self.zmqListenerThread = threading.Thread(
target=self.zmqListener, daemon=True
)
self.zmqListenerThread.start()
self.delegate._messageQueueConnected()
def zmqListener(self):
import zmq
import zmq.error
while self._zmqRunning:
time.sleep(0.1)
try:
topic, msg = self.zmqSocket.recv_multipart(flags=zmq.NOBLOCK)
topic = topic.decode()
msg = msg.decode()
if topic in self._zmqCallbacks:
self._zmqCallbacks[topic](msg)
except zmq.Again:
pass
except zmq.error.ZMQError:
pass
def quit(self):
self.zmqQuit()
def zmqQuit(self):
if self._zmqRunning:
# for topic in self._zmqCallbacks:
# self.zmqSocket.setsockopt(zmq.UNSUBSCRIBE, topic.encode("ascii"))
self._zmqRunning = False
self.zmqSocket.close()
self._zmqctx.destroy()
self.zmqListenerThread.join()
# self._zmqctx.term()
self.delegate._messageQueueDisconnected()
def registerZMQCallback(self, topic, method):
import zmq
import zmq.error
if self.zmqSubscriptions:
if self._zmqRunning and not self.zmqSocket.closed:
self.zmqSocket.setsockopt(zmq.SUBSCRIBE, topic.encode("ascii"))
self._zmqCallbacks[topic] = method
def unregisterZMQCallback(self, topic):
import zmq
import zmq.error
if self.zmqSubscriptions:
if topic in self._zmqCallbacks:
if self._zmqRunning and not self.zmqSocket.closed:
self.zmqSocket.setsockopt(zmq.UNSUBSCRIBE, topic.encode("ascii"))
del self._zmqCallbacks[topic]
def zmqCallback(self, message):
try:
if message:
data = json.loads(message)
if data["command"] == "pullUpdates" and (
"sourceAnonymousAppID" not in data
or (
"sourceAnonymousAppID" in data
and data["sourceAnonymousAppID"]
and data["sourceAnonymousAppID"] != self.anonymousAppID()
)
):
self.delegate._userAccountUpdateNotificationHasBeenReceived()
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
# def clearPendingOnlineCommands(self):
# commands = self.get('pendingOnlineCommands') or {}
# commands['acceptInvitation'] = []
# commands['declineInvitation'] = []
# commands['downloadSubscriptions'] = []
# commands['linkUser'] = []
# commands['syncSubscriptions'] = []
# commands['unlinkUser'] = []
# commands['uploadSubscriptions'] = []
# self.set('pendingOnlineCommands', commands)
def holdsSubscriptionWithLiveNotifcations(self):
for publisher in self.publishers():
for subscription in publisher.subscriptions():
success, command = subscription.protocol.endpointCommand()
if success:
if command.sendsLiveNotifications:
return True
return False
def requiresMessageQueueConnection(self):
return (
(self.user() and self.get("userAccountStatus") == "pro")
or self.holdsSubscriptionWithLiveNotifcations()
or self.testing
# or self.testScenario == "simulateProAccount"
)
def manageMessageQueueConnection(self):
import zmq
import zmq.error
if self._isSetOnline and self.zmqSubscriptions:
requiresMessageQueueConnection = self.requiresMessageQueueConnection()
if requiresMessageQueueConnection and not self._zmqRunning:
self.zmqSetup()
for topic in self._zmqCallbacks:
self.zmqSocket.setsockopt(zmq.SUBSCRIBE, topic.encode("ascii"))
elif not requiresMessageQueueConnection and self._zmqRunning:
self.zmqQuit()
def get(self, key):
try:
return self._preferences.get(
"world.type.guiapp." + key
) or self._preferences.get(key)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def set(self, key, value):
try:
self._preferences.set("world.type.guiapp." + key, value)
self.delegate._clientPreferenceChanged(key, value)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def remove(self, key):
try:
self._preferences.remove("world.type.guiapp." + key)
self._preferences.remove(key)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def performRequest(self, url, parameters={}, method="POST"):
try:
parameters["sourceAnonymousAppID"] = self.anonymousAppID()
parameters["clientVersion"] = VERSION
if self.testScenario == "simulateFaultyClientVersion":
parameters["clientVersion"] = "abc"
elif self.testScenario == "simulateNoClientVersion":
del parameters["clientVersion"]
if self.testing:
parameters["testing"] = "true"
# if self._isSetOnline:
if self.testScenario:
parameters["testScenario"] = self.testScenario
if self.testScenario == "simulateCentralServerNotReachable":
url = "https://api.type.worlddd/api"
return request(url, parameters, method)
# else:
# return False, 'APIClient is set to work offline as set by:
# APIClient(online=False)'
except Exception as e: # nocoverage
success, message = self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
return success, message, None
def pendingInvitations(self):
try:
_list = []
if self.get("pendingInvitations"):
for invitation in self.get("pendingInvitations"):
invitation = APIPendingInvitation(invitation)
invitation.parent = self
_list.append(invitation)
return _list
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def acceptedInvitations(self):
try:
_list = []
if self.get("acceptedInvitations"):
for invitation in self.get("acceptedInvitations"):
invitation = APIAcceptedInvitation(invitation)
invitation.parent = self
_list.append(invitation)
return _list
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def sentInvitations(self):
try:
_list = []
if self.get("sentInvitations"):
for invitation in self.get("sentInvitations"):
invitation = APISentInvitation(invitation)
invitation.parent = self
_list.append(invitation)
return _list
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def secretSubscriptionURLs(self):
try:
_list = []
for publisher in self.publishers():
for subscription in publisher.subscriptions():
_list.append(subscription.protocol.secretURL())
return _list
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def unsecretSubscriptionURLs(self):
try:
_list = []
for publisher in self.publishers():
for subscription in publisher.subscriptions():
_list.append(subscription.protocol.unsecretURL())
return _list
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def timezone(self):
try:
return strftime("%z", gmtime())
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def syncProblems(self):
return self._syncProblems
def addMachineIDToParameters(self, parameters):
try:
(
machineModelIdentifier,
machineHumanReadableName,
machineSpecsDescription,
) = MachineName()
if machineModelIdentifier:
parameters["machineModelIdentifier"] = machineModelIdentifier
if machineHumanReadableName:
parameters["machineHumanReadableName"] = machineHumanReadableName
if machineSpecsDescription:
parameters["machineSpecsDescription"] = machineSpecsDescription
import platform
parameters["machineNodeName"] = platform.node()
osName = OSName()
if osName:
parameters["machineOSVersion"] = osName
return parameters
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def online(self, server=None):
try:
if self.testScenario == "simulateNotOnline":
return False
if "GAE_DEPLOYMENT_ID" in os.environ:
return True # nocoverage
if not server:
server = "type.world"
if not server.startswith("http"):
server = "http://" + server
if (
server in self.lastOnlineCheck
and type(self.lastOnlineCheck[server]) is float
):
if time.time() - self.lastOnlineCheck[server] < 10:
return True
try:
urllib.request.urlopen(server, context=self.sslcontext) # Python 3.x
except urllib.error.URLError:
return False
# Do nothing if HTTP errors are returned, and let the subsequent methods
# handle the details
except urllib.error.HTTPError:
pass
self.lastOnlineCheck[server] = time.time()
return True
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def appendCommands(self, commandName, commandsList=["pending"]):
try:
# Set up data structure
commands = self.get("pendingOnlineCommands")
if not self.get("pendingOnlineCommands"):
commands = {}
# Init empty
if commandName not in commands:
commands[commandName] = []
if (
commandName in commands and len(commands[commandName]) == 0
): # set anyway if empty because NSObject immutability
commands[commandName] = []
self.set("pendingOnlineCommands", commands)
# Add commands to list
commands = self.get("pendingOnlineCommands")
if type(commandsList) in (str, int):
commandsList = [commandsList]
for commandListItem in commandsList:
if commandListItem not in commands[commandName]:
commands[commandName] = list(commands[commandName])
commands[commandName].append(commandListItem)
self.set("pendingOnlineCommands", commands)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def performCommands(self):
try:
success, message = True, None
self._syncProblems = []
if self.online():
commands = self.get("pendingOnlineCommands") or {}
if "unlinkUser" in commands and commands["unlinkUser"]:
success, message = self.performUnlinkUser()
if success:
commands["unlinkUser"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
if "linkUser" in commands and commands["linkUser"]:
success, message = self.performLinkUser(commands["linkUser"][0])
if success:
commands["linkUser"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
if "syncSubscriptions" in commands and commands["syncSubscriptions"]:
success, message = self.performSyncSubscriptions(
commands["syncSubscriptions"]
)
if success:
commands["syncSubscriptions"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
if (
"uploadSubscriptions" in commands
and commands["uploadSubscriptions"]
):
success, message = self.perfomUploadSubscriptions(
commands["uploadSubscriptions"]
)
if success:
commands["uploadSubscriptions"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
if "acceptInvitation" in commands and commands["acceptInvitation"]:
success, message = self.performAcceptInvitation(
commands["acceptInvitation"]
)
if success:
commands["acceptInvitation"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
if "declineInvitation" in commands and commands["declineInvitation"]:
success, message = self.performDeclineInvitation(
commands["declineInvitation"]
)
if success:
commands["declineInvitation"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
if (
"downloadSubscriptions" in commands
and commands["downloadSubscriptions"]
):
success, message = self.performDownloadSubscriptions()
if success:
commands["downloadSubscriptions"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
if "downloadSettings" in commands and commands["downloadSettings"]:
success, message = self.performDownloadSettings()
if success:
commands["downloadSettings"] = []
self.set("pendingOnlineCommands", commands)
else:
self._syncProblems.append(message)
if self._syncProblems:
return False, self._syncProblems[0]
else:
return True, None
else:
self._syncProblems.append("#(response.notOnline)")
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def uploadSubscriptions(self, performCommands=True):
try:
self.appendCommands(
"uploadSubscriptions", self.secretSubscriptionURLs() or ["empty"]
)
self.appendCommands("downloadSubscriptions")
success, message = True, None
if performCommands:
success, message = self.performCommands()
return success, message
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def perfomUploadSubscriptions(self, oldURLs):
try:
userID = self.user()
if userID:
if oldURLs == ["pending"]:
oldURLs = ["empty"]
self.set("lastServerSync", int(time.time()))
# self.log('Uploading subscriptions: %s' % oldURLs)
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"subscriptionURLs": ",".join(oldURLs),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/uploadUserSubscriptions", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# Success
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def downloadSubscriptions(self, performCommands=True):
try:
if self.user():
self.appendCommands("downloadSubscriptions")
if performCommands:
return self.performCommands()
else:
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def performDownloadSubscriptions(self):
try:
userID = self.user()
if userID:
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"userTimezone": self.timezone(),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/downloadUserSubscriptions", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
self.set("lastServerSync", int(time.time()))
return self.executeDownloadSubscriptions(response)
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def executeDownloadSubscriptions(self, response):
try:
oldURLs = self.secretSubscriptionURLs()
# Uninstall all protected fonts when app instance is reported as revoked
if response["appInstanceIsRevoked"]:
success, message = self.uninstallAllProtectedFonts()
if not success:
return False, message
# Verified Email Address
if "userAccountEmailIsVerified" in response:
self.set(
"userAccountEmailIsVerified", response["userAccountEmailIsVerified"]
)
# USer Account Status
if "userAccountStatus" in response:
self.set("userAccountStatus", response["userAccountStatus"])
# Website Token
if "typeWorldWebsiteToken" in response:
keyring = self.keyring()
keyring.set_password(
self.userKeychainKey(self.user()),
"typeWorldWebsiteToken",
response["typeWorldWebsiteToken"],
)
# Add new subscriptions
for incomingSubscription in response["heldSubscriptions"]:
# Incoming server timestamp
incomingServerTimestamp = None
if (
"serverTimestamp" in incomingSubscription
and incomingSubscription["serverTimestamp"]
):
incomingServerTimestamp = incomingSubscription["serverTimestamp"]
# Add new subscription
if incomingSubscription["url"] not in oldURLs:
success, message, publisher, subscription = self.addSubscription(
incomingSubscription["url"], updateSubscriptionsOnServer=False
)
if success:
if incomingServerTimestamp:
subscription.set("serverTimestamp", incomingServerTimestamp)
self.delegate._subscriptionHasBeenAdded(subscription)
else:
return (
False,
"Received from self.addSubscription() for %s: %s"
% (incomingSubscription["url"], message),
)
# Update subscription
else:
subscription = None
for publisher in self.publishers():
for subscription in publisher.subscriptions():
if (
subscription.url
== URL(incomingSubscription["url"]).unsecretURL()
):
break
if (
incomingServerTimestamp
and subscription.get("serverTimestamp")
and int(incomingServerTimestamp)
> int(subscription.get("serverTimestamp"))
) or (
incomingServerTimestamp
and not subscription.get("serverTimestamp")
):
success, message, changes = subscription.update()
if success:
subscription.set(
"serverTimestamp", int(incomingServerTimestamp)
)
def replace_item(obj, key, replace_value):
for k, v in obj.items():
if v == key:
obj[k] = replace_value
return obj
# oldPendingInvitations = self.pendingInvitations()
# Invitations
self.set(
"acceptedInvitations",
[replace_item(x, None, "") for x in response["acceptedInvitations"]],
)
self.set(
"pendingInvitations",
[replace_item(x, None, "") for x in response["pendingInvitations"]],
)
self.set(
"sentInvitations",
[replace_item(x, None, "") for x in response["sentInvitations"]],
)
# newPendingInvitations = self.pendingInvitations()
# TODO: trigger notification
# import threading
# preloadThread = threading.Thread(target=self.preloadLogos)
# preloadThread.start()
# Delete subscriptions
for publisher in self.publishers():
for subscription in publisher.subscriptions():
if not subscription.protocol.secretURL() in [
x["url"] for x in response["heldSubscriptions"]
]:
subscription.delete(updateSubscriptionsOnServer=False)
self.delegate._userAccountHasBeenUpdated()
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def acceptInvitation(self, url):
try:
userID = self.user()
if userID:
self.appendCommands("acceptInvitation", [url])
return self.performCommands()
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def performAcceptInvitation(self, urls):
try:
userID = self.user()
# Get Invitation IDs from urls
IDs = []
for invitation in self.pendingInvitations():
for url in urls:
if invitation.url == url:
if invitation.ID not in IDs:
IDs.append(invitation.ID)
assert len(IDs) == len(urls)
if userID:
self.set("lastServerSync", int(time.time()))
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"subscriptionIDs": ",".join(IDs),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/acceptInvitations", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# Success
return self.executeDownloadSubscriptions(response)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def declineInvitation(self, url):
try:
userID = self.user()
if userID:
self.appendCommands("declineInvitation", [url])
return self.performCommands()
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def performDeclineInvitation(self, urls):
try:
userID = self.user()
# Get Invitation IDs from urls
IDs = []
for invitation in self.pendingInvitations():
for url in urls:
if invitation.url == url:
if invitation.ID not in IDs:
IDs.append(invitation.ID)
assert len(IDs) == len(urls)
if userID:
self.set("lastServerSync", int(time.time()))
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"subscriptionIDs": ",".join(IDs),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/declineInvitations", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# Success
return self.executeDownloadSubscriptions(response)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def syncSubscriptions(self, performCommands=True):
try:
self.appendCommands(
"syncSubscriptions", self.secretSubscriptionURLs() or ["empty"]
)
if performCommands:
return self.performCommands()
else:
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def performSyncSubscriptions(self, oldURLs):
try:
userID = self.user()
if userID:
if oldURLs == ["pending"]:
oldURLs = ["empty"]
self.set("lastServerSync", int(time.time()))
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"subscriptionURLs": ",".join(oldURLs),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/syncUserSubscriptions", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# Add new subscriptions
for url in response["subscriptions"]:
if url not in oldURLs:
(
success,
message,
publisher,
subscription,
) = self.addSubscription(url, updateSubscriptionsOnServer=False)
if not success:
return False, message
# Success
return True, len(response["subscriptions"]) - len(oldURLs)
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def downloadSettings(self, performCommands=True):
try:
if performCommands:
return self.performDownloadSettings()
else:
self.appendCommands("downloadSettings")
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def performDownloadSettings(self):
try:
parameters = {}
if self.user():
parameters["anonymousUserID"] = self.user()
parameters["secretKey"] = self.secretKey()
success, response, responseObject = self.performRequest(
self.mothership + "/downloadSettings", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
self.set("downloadedSettings", response["settings"])
self.set("lastSettingsDownloaded", int(time.time()))
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def user(self):
try:
return self.get("typeworldUserAccount") or ""
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def userKeychainKey(self, ID):
try:
return "https://%s@%s.type.world" % (ID, self.anonymousAppID())
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def secretKey(self, userID=None):
try:
keyring = self.keyring()
return keyring.get_password(
self.userKeychainKey(userID or self.user()), "secretKey"
)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def userName(self):
try:
keyring = self.keyring()
return keyring.get_password(self.userKeychainKey(self.user()), "userName")
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def userEmail(self):
try:
keyring = self.keyring()
return keyring.get_password(self.userKeychainKey(self.user()), "userEmail")
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def createUserAccount(self, name, email, password1, password2):
try:
if self.online():
if not name or not email or not password1 or not password2:
return False, "#(RequiredFieldEmpty)"
if password1 != password2:
return False, "#(PasswordsDontMatch)"
parameters = {
"name": name,
"email": email,
"password": password1,
}
if self.secretServerAuthKey:
parameters["SECRETKEY"] = self.secretServerAuthKey
success, response, responseObject = self.performRequest(
self.mothership + "/createUserAccount", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# success
return self.linkUser(response["anonymousUserID"], response["secretKey"])
else:
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def deleteUserAccount(self, email, password):
try:
if self.online():
# Required parameters
if not email or not password:
return False, "#(RequiredFieldEmpty)"
# Unlink user first
if self.userEmail() == email:
success, message = self.performUnlinkUser()
if not success:
return False, message
parameters = {
"email": email,
"password": password,
}
success, response, responseObject = self.performRequest(
self.mothership + "/deleteUserAccount", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# success
return True, None
else:
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def resendEmailVerification(self):
try:
parameters = {
"email": self.userEmail(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/resendEmailVerification", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# success
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def logInUserAccount(self, email, password):
try:
if not email or not password:
return False, "#(RequiredFieldEmpty)"
parameters = {
"email": email,
"password": password,
}
success, response, responseObject = self.performRequest(
self.mothership + "/logInUserAccount", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# success
return self.linkUser(response["anonymousUserID"], response["secretKey"])
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def linkUser(self, userID, secretKey):
try:
# Set secret key now, so it doesn't show up in preferences when offline
keyring = self.keyring()
keyring.set_password(self.userKeychainKey(userID), "secretKey", secretKey)
assert self.secretKey(userID) == secretKey
self.appendCommands("linkUser", userID)
self.appendCommands("syncSubscriptions")
self.appendCommands("downloadSubscriptions")
return self.performCommands()
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def performLinkUser(self, userID):
try:
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"secretKey": self.secretKey(userID),
}
parameters = self.addMachineIDToParameters(parameters)
success, response, responseObject = self.performRequest(
self.mothership + "/linkTypeWorldUserAccount", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
# Success
self.set("typeworldUserAccount", userID)
assert userID == self.user()
# ZMQ
topicID = "user-%s" % self.user()
self.registerZMQCallback(topicID, self.zmqCallback)
keyring = self.keyring()
if "userEmail" in response:
keyring.set_password(
self.userKeychainKey(userID), "userEmail", response["userEmail"]
)
if "userName" in response:
keyring.set_password(
self.userKeychainKey(userID), "userName", response["userName"]
)
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def linkedAppInstances(self):
try:
if not self.user():
return False, "No user"
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": self.user(),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/userAppInstances", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
class AppInstance(object):
pass
# Success
instances = []
for serverInstance in response["appInstances"]:
instance = AppInstance()
for key in serverInstance:
setattr(instance, key, serverInstance[key])
instances.append(instance)
return True, instances
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def revokeAppInstance(self, anonymousAppID=None):
try:
if not self.user():
return False, "No user"
parameters = {
"anonymousAppID": anonymousAppID or self.anonymousAppID(),
"anonymousUserID": self.user(),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/revokeAppInstance", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def reactivateAppInstance(self, anonymousAppID=None):
try:
if not self.user():
return False, "No user"
parameters = {
"anonymousAppID": anonymousAppID or self.anonymousAppID(),
"anonymousUserID": self.user(),
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/reactivateAppInstance", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] != "success":
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def unlinkUser(self):
try:
self.appendCommands("unlinkUser")
return self.performCommands()
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def uninstallAllProtectedFonts(self, dryRun=False):
try:
# Uninstall all protected fonts
for publisher in self.publishers():
for subscription in publisher.subscriptions():
(
success,
installabeFontsCommand,
) = subscription.protocol.installableFontsCommand()
assert success
fontIDs = []
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
# Dry run from central server: add all fonts to list
if dryRun and font.protected:
fontIDs.append(
font.uniqueID
) # nocoverage (This is executed only when the
# central server uninstalls *all* fonts)
# Run from local client, add only actually installed
# fonts
elif (
not dryRun
and font.protected
and subscription.installedFontVersion(font=font)
):
fontIDs.append(font.uniqueID)
if fontIDs:
success, message = subscription.removeFonts(
fontIDs, dryRun=dryRun, updateSubscription=False
)
if not success:
return False, message
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def performUnlinkUser(self):
try:
userID = self.user()
success, response = self.uninstallAllProtectedFonts()
if not success:
return False, response
parameters = {
"anonymousAppID": self.anonymousAppID(),
"anonymousUserID": userID,
"secretKey": self.secretKey(),
}
success, response, responseObject = self.performRequest(
self.mothership + "/unlinkTypeWorldUserAccount", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
continueFor = ["userUnknown"]
if (
response["response"] != "success"
and not response["response"] in continueFor
):
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
self.set("typeworldUserAccount", "")
self.set("userAccountEmailIsVerified", "")
self.remove("acceptedInvitations")
self.remove("pendingInvitations")
self.remove("sentInvitations")
# ZMQ
topicID = "user-%s" % userID
self.unregisterZMQCallback(topicID)
keyring = self.keyring()
keyring.delete_password(self.userKeychainKey(userID), "secretKey")
keyring.delete_password(self.userKeychainKey(userID), "userEmail")
keyring.delete_password(self.userKeychainKey(userID), "userName")
# Success
return True, None
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def systemLocale(self):
try:
if not self._systemLocale:
if MAC:
from AppKit import NSLocale
self._systemLocale = str(
NSLocale.preferredLanguages()[0].split("_")[0].split("-")[0]
)
else:
import locale
self._systemLocale = locale.getdefaultlocale()[0].split("_")[0]
return self._systemLocale
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def locale(self):
try:
"""\
Reads user locale from OS
"""
if self.get("localizationType") == "systemLocale":
_locale = [self.systemLocale()]
elif self.get("localizationType") == "customLocale":
_locale = [self.get("customLocaleChoice") or "en"]
else:
_locale = [self.systemLocale()]
if "en" not in _locale:
_locale.append("en")
return _locale
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def expiringInstalledFonts(self):
try:
fonts = []
for publisher in self.publishers():
for subscription in publisher.subscriptions():
fonts.extend(subscription.expiringInstalledFonts())
return fonts
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def amountOutdatedFonts(self):
try:
amount = 0
for publisher in self.publishers():
amount += publisher.amountOutdatedFonts()
return amount
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def keyring(self):
try:
# Using keyring causes problems on all three MAC/WIN/LINUX
# when used headlessly in a CI environment,
# so we’re using the dummy for CI, which sucks because
# then you can’t self-test thoroughly it during app build
if (CI and not self.inCompiledApp) or GAE:
keyring = dummyKeyRing
return keyring
import keyring # nocoverage
if MAC: # nocoverage
if self.inCompiledApp:
keyring.core.set_keyring(
keyring.core.load_keyring("keyring.backends.macOS.Keyring")
) # nocoverage
elif WIN: # nocoverage
keyring.core.set_keyring(
keyring.core.load_keyring(
"keyring.backends.Windows.WinVaultKeyring"
)
) # nocoverage
elif LINUX: # nocoverage
keyring.core.set_keyring(
keyring.core.load_keyring("keyring.backends.kwallet.DBusKeyring")
) # nocoverage
return keyring # nocoverage
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def handleTraceback(self, file=None, sourceMethod=None, e=None):
payload = f"""\
Version: {typeworld.api.VERSION}
{traceback.format_exc()}
"""
# Remove path parts to make tracebacks identical (so they don't re-surface)
def removePathPrefix(_payload, _snippet, _file):
m = re.search(r'File "(.+?)"', _payload, re.MULTILINE)
if m:
_file = m.group(1)
index = _file.find(_snippet)
if index != -1:
clientPathPrefix = _file[:index]
return _payload.replace(clientPathPrefix, "")
else:
return _payload
else:
return _payload # nocoverage (this seems to never get executed,
# because code always contains `File "..."` like it should.
# Leaving this here just in case) TODO
# Normalize file paths
if WIN:
payload = (
removePathPrefix(payload, "TypeWorld.exe", __file__)
.replace("\\", "/")
.replace("TypeWorld.exe", "app.py")
)
payload = removePathPrefix(payload, "typeworld/client/", __file__).replace(
"\\", "/"
)
payload = removePathPrefix(payload, "app.py", file).replace("\\", "/")
# Create supplementary information
supplementary = {
"os": OSName(),
"file": file or __file__,
"preferences": self._preferences.dictionary(),
}
if sourceMethod:
if hasattr(sourceMethod, "__self__") and sourceMethod.__self__:
supplementary["sourceMethodSignature"] = (
str(sourceMethod.__self__.__class__.__name__)
+ "."
+ str(sourceMethod.__name__)
+ str(inspect.signature(sourceMethod))
)
else:
supplementary["sourceMethodSignature"] = str( # nocoverage
sourceMethod.__name__ # nocoverage
) + str( # nocoverage
inspect.signature(sourceMethod) # nocoverage
) # nocoverage
# (currently not testing for calling this method without
# a sourceMethod parameter)
supplementary["traceback"] = payload
supplementary["stack"] = []
supplementary["trace"] = []
for s in inspect.stack():
supplementary["stack"].append(
{
"filename": str(s.filename),
"lineno": str(s.lineno),
"function": str(s.function),
"code_context": str(s.code_context[0].replace("\t", " ").rstrip())
if s.code_context
else None,
}
)
for s in inspect.trace():
supplementary["trace"].append(
{
"filename": str(s.filename),
"lineno": str(s.lineno),
"function": str(s.function),
"code_context": str(s.code_context[0].replace("\t", " ").rstrip())
if s.code_context
else None,
}
)
# replace faulty line of code (some Python versions include the faulty code
# line in the traceback output, some not)
if supplementary["trace"] and supplementary["trace"][0]["code_context"]:
payload = payload.replace(supplementary["trace"][0]["code_context"], "")
payload = payload.replace("\n \n", "\n")
parameters = {
"payload": payload,
"supplementary": json.dumps(supplementary),
}
# Submit to central server
# if self.online(self.mothership):
def handleTracebackWorker(self):
success, response, responseObject = self.performRequest(
self.mothership + "/handleTraceback", parameters
)
if success:
response = json.loads(response.decode())
if response["response"] != "success":
self.log("handleTraceback() error on server, step 2: %s" % response)
if not success:
self.log("handleTraceback() error on server, step 1: %s" % response)
handleTracebackThread = threading.Thread(
target=handleTracebackWorker, args=(self,)
)
handleTracebackThread.start()
# Log
if sourceMethod:
self.log(
payload
+ "\nMethod signature:\n"
+ supplementary["sourceMethodSignature"]
)
else:
self.log(payload) # nocoverage # nocoverage # nocoverage
# (currently not testing for calling this method without a sourceMethod
# parameter)
return False, payload
def log(self, *arg):
string = "Type.World: %s" % " ".join(map(str, arg))
if MAC:
nslog(string)
else:
logging.debug(string)
def prepareUpdate(self):
self._subscriptionsUpdated = []
def allSubscriptionsUpdated(self):
try:
for publisher in self.publishers():
for subscription in publisher.subscriptions():
if subscription.stillUpdating():
return False
return True
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def deleteResources(self, urls):
try:
resources = self.get("resources") or {}
for url in urls:
for key in resources.keys():
if key.startswith(url):
del resources[key]
break
self.set("resources", resources)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def resourceByURL(self, url, binary=False, update=False):
"""Caches and returns content of a HTTP resource. If binary is set to True,
content will be stored and return as a bas64-encoded string"""
try:
resources = self.get("resources") or {}
key = f"{url},binary={binary}"
# Load fresh
if key not in resources or update:
if self.testScenario:
url = addAttributeToURL(url, "testScenario=%s" % self.testScenario)
success, response, responseObject = request(url, method="GET")
if not success:
return False, response, responseObject.headers["content-type"]
content = responseObject.content
if binary:
content = base64.b64encode(content).decode()
else:
content = content.decode()
resources[key] = responseObject.headers["content-type"] + "," + content
self.set("resources", resources)
return True, content, responseObject.headers["content-type"]
# Serve from cache
else:
response = resources[key]
mimeType = response.split(",")[0]
content = response[len(mimeType) + 1 :]
return True, content, mimeType
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def anonymousAppID(self):
try:
anonymousAppID = self.get("anonymousAppID")
if anonymousAppID is None or anonymousAppID == {}:
import uuid
anonymousAppID = str(uuid.uuid1())
self.set("anonymousAppID", anonymousAppID)
return anonymousAppID
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def endpointCommand(self, url):
try:
# Check for URL validity
success, response = urlIsValid(url)
if not success:
return False, response
# Get subscription
success, protocol = getProtocol(url)
protocol.client = self
# Get Root Command
return protocol.endpointCommand(testScenario=self.testScenario)
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def addSubscription(
self,
url,
username=None,
password=None,
updateSubscriptionsOnServer=True,
JSON=None,
):
try:
self._updatingProblem = None
# Check for URL validity
success, response = urlIsValid(url)
if not success:
return False, response, None, None
# Get subscription
success, message = getProtocol(url)
if success:
protocol = message
protocol.client = self
else:
return False, message, None, None
# Change secret key
if protocol.unsecretURL() in self.unsecretSubscriptionURLs():
# Initial endpointCommand
success, message = self.endpointCommand(url)
if success:
endpointCommand = message
else:
return False, message, None, None
protocol.setSecretKey(protocol.url.secretKey)
publisher = self.publisher(endpointCommand.canonicalURL)
subscription = publisher.subscription(protocol.unsecretURL(), protocol)
else:
# Initial Health Check
success, response = protocol.aboutToAddSubscription(
anonymousAppID=self.anonymousAppID(),
anonymousTypeWorldUserID=self.user(),
accessToken=protocol.url.accessToken,
testScenario=self.testScenario,
)
if not success:
message = response
# self._updatingProblem = [
# "#(response.loginRequired)",
# "#(response.loginRequired.headline)",
# ]
return False, message, None, None
# endpointCommand
success, endpointCommand = protocol.endpointCommand(
testScenario=self.testScenario
)
assert success
assert endpointCommand
# Breaking API Version Check
if "breakingAPIVersions" in self.get("downloadedSettings"):
breakingVersions = copy.copy(
self.get("downloadedSettings")["breakingAPIVersions"]
)
if self.testScenario == "simulateBreakingAPIVersion":
versionParts = breakingVersions[-1].split(".")
versionParts[0] = str(int(versionParts[0]) + 1)
breakingVersions.append(".".join(versionParts))
success, rootCommand = protocol.rootCommand(
testScenario=self.testScenario
)
assert success
assert rootCommand
incomingVersion = rootCommand.version
for breakingVersion in breakingVersions:
# Breaking version is higher than local API version
if (
semver.VersionInfo.parse(breakingVersion).compare(
typeworld.api.VERSION
)
== 1
# Incoming version is higher than breaking
) and (
semver.VersionInfo.parse(incomingVersion).compare(
breakingVersion
)
== 1
):
return (
False,
[
"#(response.appUpdateRequired)",
"#(response.appUpdateRequired.headline)",
],
None,
None,
)
# Commercial app check
if (
self.commercial
and self.appID not in endpointCommand.allowedCommercialApps
):
return (
False,
[
"#(response.commercialAppNotAllowed)",
"#(response.commercialAppNotAllowed.headline)",
],
None,
None,
)
publisher = self.publisher(endpointCommand.canonicalURL)
subscription = publisher.subscription(protocol.unsecretURL(), protocol)
# Success
subscription.save()
publisher.save()
subscription.stillAlive()
self.manageMessageQueueConnection()
if updateSubscriptionsOnServer:
success, message = self.uploadSubscriptions()
if not success:
return (
False,
message,
None,
None,
) # 'Response from client.uploadSubscriptions(): %s' %
protocol.subscriptionAdded()
return True, None, publisher, subscription
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def publisher(self, canonicalURL):
try:
if canonicalURL not in self._publishers:
e = APIPublisher(self, canonicalURL)
self._publishers[canonicalURL] = e
if self.get("publishers") and canonicalURL in self.get("publishers"):
self._publishers[canonicalURL].exists = True
return self._publishers[canonicalURL]
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def publishers(self):
try:
if self.get("publishers"):
publishers = []
if self.get("publishers"):
for canonicalURL in self.get("publishers"):
publisher = self.publisher(canonicalURL)
if publisher.subscriptions():
publishers.append(publisher)
return publishers
else:
return []
except Exception as e: # nocoverage
return self.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
class APIPublisher(object):
"""\
Represents an API endpoint, identified and grouped by the canonical URL attribute
of the API responses. This API endpoint class can then hold several repositories.
"""
def __init__(self, parent, canonicalURL):
self.parent = parent
self.canonicalURL = canonicalURL
self.exists = False
self._subscriptions = {}
self._updatingSubscriptions = []
def folder(self):
try:
if WIN:
return os.path.join(os.environ["WINDIR"], "Fonts")
elif MAC:
from os.path import expanduser
home = expanduser("~")
folder = os.path.join(home, "Library", "Fonts", "Type.World App")
return folder
else:
import tempfile
return tempfile.gettempdir()
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def stillUpdating(self):
try:
return len(self._updatingSubscriptions) > 0
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def updatingProblem(self):
try:
problems = []
for subscription in self.subscriptions():
problem = subscription.updatingProblem()
if problem and problem not in problems:
problems.append(problem)
if problems:
return problems
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def name(self, locale=["en"]):
try:
endpointCommand = self.subscriptions()[0].protocol.endpointCommand()[1]
if endpointCommand:
return endpointCommand.name.getTextAndLocale(locale=locale)
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def amountInstalledFonts(self):
try:
return len(self.installedFonts())
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def installedFonts(self):
try:
_list = []
for subscription in self.subscriptions():
for font in subscription.installedFonts():
if font not in _list:
_list.append(font)
return _list
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def amountOutdatedFonts(self):
try:
return len(self.outdatedFonts())
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def outdatedFonts(self):
try:
_list = []
for subscription in self.subscriptions():
for font in subscription.outdatedFonts():
if font not in _list:
_list.append(font)
return _list
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
# def currentSubscription(self):
# if self.get('currentSubscription'):
# subscription = self.subscription(self.get('currentSubscription'))
# if subscription:
# return subscription
def get(self, key):
try:
preferences = self.parent.get("publisher(%s)" % self.canonicalURL) or {}
if key in preferences:
o = preferences[key]
if "Array" in o.__class__.__name__:
o = list(o)
elif "Dictionary" in o.__class__.__name__:
o = dict(o)
return o
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def set(self, key, value):
try:
preferences = self.parent.get("publisher(%s)" % self.canonicalURL) or {}
preferences[key] = value
self.parent.set("publisher(%s)" % self.canonicalURL, preferences)
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
# def addGitHubSubscription(self, url, commits):
# self.parent._subscriptions = {}
# subscription = self.subscription(url)
# subscription.set('commits', commits)
# self.set('currentSubscription', url)
# subscription.save()
# return True, None
def subscription(self, url, protocol=None):
try:
if url not in self._subscriptions:
# Load from DB
loadFromDB = False
if not protocol:
success, message = getProtocol(url)
if success:
protocol = message
loadFromDB = True
e = APISubscription(self, protocol)
if loadFromDB:
protocol.loadFromDB()
self._subscriptions[url] = e
if self.get("subscriptions") and url in self.get("subscriptions"):
self._subscriptions[url].exists = True
return self._subscriptions[url]
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def subscriptions(self):
try:
subscriptions = []
if self.get("subscriptions"):
for url in self.get("subscriptions"):
if urlIsValid(url)[0] is True:
subscriptions.append(self.subscription(url))
return subscriptions
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def update(self):
try:
self.parent.prepareUpdate()
changes = False
if self.parent.online():
for subscription in self.subscriptions():
success, message, change = subscription.update()
if change:
changes = True
if not success:
return success, message, changes
return True, None, changes
else:
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
False,
)
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def save(self):
try:
publishers = self.parent.get("publishers") or []
if self.canonicalURL not in publishers:
publishers.append(self.canonicalURL)
self.parent.set("publishers", publishers)
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def resourceByURL(self, url, binary=False, update=False):
"""Caches and returns content of a HTTP resource. If binary is set to True,
content will be stored and return as a bas64-encoded string"""
try:
success, response, mimeType = self.parent.resourceByURL(url, binary, update)
# Save resource
if success is True:
resourcesList = self.get("resources") or []
if url not in resourcesList:
resourcesList.append(url)
self.set("resources", resourcesList)
return success, response, mimeType
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def delete(self):
try:
for subscription in self.subscriptions():
success, message = subscription.delete(
calledFromParent=True, updateSubscriptionsOnServer=False
)
if not success:
return False, message
# Resources
self.parent.deleteResources(self.get("resources") or [])
self.parent.remove("publisher(%s)" % self.canonicalURL)
publishers = self.parent.get("publishers")
publishers.remove(self.canonicalURL)
self.parent.set("publishers", publishers)
# self.parent.set('currentPublisher', '')
# Sync to server
self.parent.uploadSubscriptions()
self.parent.delegate._publisherHasBeenDeleted(self)
self.parent.manageMessageQueueConnection()
self.parent._publishers = {}
return True, None
except Exception as e: # nocoverage
self.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
class APISubscription(object):
"""\
Represents a subscription, identified and grouped by the canonical URL attribute of
the API responses.
"""
def __init__(self, parent, protocol):
try:
self.parent = parent
self.exists = False
self.secretKey = None
self.protocol = protocol
self.protocol.subscription = self
self.protocol.client = self.parent.parent
self.url = self.protocol.unsecretURL()
self.stillAliveTouched = None
self._updatingProblem = None
# ZMQ
if self.parent.parent._isSetOnline and self.parent.parent.zmqSubscriptions:
self.parent.parent.zmqSetup()
self.parent.parent.registerZMQCallback(
self.zmqTopic(), self.zmqCallback
)
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def zmqTopic(self):
return "subscription-%s" % urllib.parse.quote_plus(
self.protocol.shortUnsecretURL()
)
def __repr__(self):
return f'<APISubscription url="{self.url}">'
def uniqueID(self):
try:
uniqueID = self.get("uniqueID")
if uniqueID is None or uniqueID == {}:
# import uuid
uniqueID = Garbage(10)
self.set("uniqueID", uniqueID)
return uniqueID
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def zmqCallback(self, message):
try:
if message:
data = json.loads(message)
if (
data["command"] == "pullUpdates"
and "sourceAnonymousAppID" not in data
or (
"sourceAnonymousAppID" in data
and data["sourceAnonymousAppID"]
!= self.parent.parent.anonymousAppID()
)
):
delegate = self.parent.parent.delegate
delegate._subscriptionUpdateNotificationHasBeenReceived(self)
success, message, changes = self.update()
if success:
if "serverTimestamp" in data and data["serverTimestamp"]:
self.set("serverTimestamp", data["serverTimestamp"])
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
# TODO: Temporarily suspended because the central API updateSubscription requires
# an APIKey parameter, so is intended only for publishers atm.
# Here, this should be called after a protected font has been installed,
# as it should update the used seats for that font
# def announceChange(self):
# try:
# if not self.parent.parent.user(): return False, 'No user'
# self.set('lastServerSync', int(time.time()))
# parameters = {
# 'command': 'updateSubscription',
# 'anonymousAppID': self.parent.parent.anonymousAppID(),
# 'anonymousUserID': self.parent.parent.user(),
# 'subscriptionURL': self.protocol.url.secretURL(),
# 'secretKey': self.parent.parent.secretKey(),
# }
# success, response, responseObject =
# self.parent.parent.performRequest(self.parent.parent.
# mothership, parameters)
# if not success:
# return False, response
# response = json.loads(response.decode())
# if response['response'] != 'success':
# return False, ['#(response.%s)' % response['response'], '#(response.%s.
# headline)' % response['response']]
# # Success
# return True, None
# except Exception as e: self.parent.parent.handleTraceback(sourceMethod =
# getattr(self, sys._getframe().f_code.co_name), e = e)
def hasProtectedFonts(self):
try:
success, installabeFontsCommand = self.protocol.installableFontsCommand()
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
if font.protected:
return True
return False
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def stillAlive(self):
try:
def stillAliveWorker(self):
# Register endpoint
parameters = {
"url": "typeworld://%s+%s"
% (
self.protocol.url.protocol,
self.parent.canonicalURL.replace("://", "//"),
),
}
success, response, responseObject = self.parent.parent.performRequest(
self.parent.parent.mothership + "/registerAPIEndpoint", parameters
)
if not success:
return False, response
response = json.loads(response.decode())
# Touch only once
if not self.parent.parent.user():
if not self.stillAliveTouched:
stillAliveThread = threading.Thread(
target=stillAliveWorker, args=(self,)
)
stillAliveThread.start()
self.stillAliveTouched = time.time()
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def inviteUser(self, targetEmail):
try:
if self.parent.parent.online():
if not self.parent.parent.userEmail():
return False, "No source user linked."
parameters = {
"targetUserEmail": targetEmail,
"sourceUserEmail": self.parent.parent.userEmail(),
"subscriptionURL": self.protocol.secretURL(),
}
success, response, responseObject = self.parent.parent.performRequest(
self.parent.parent.mothership + "/inviteUserToSubscription",
parameters,
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] == "success":
return True, None
else:
return (
False,
[
"#(response.%s)" % response["response"],
"#(response.%s.headline)" % response["response"],
],
)
else:
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
)
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def revokeUser(self, targetEmail):
try:
if self.parent.parent.online():
parameters = {
"targetUserEmail": targetEmail,
"sourceUserEmail": self.parent.parent.userEmail(),
"subscriptionURL": self.protocol.secretURL(),
}
success, response, responseObject = self.parent.parent.performRequest(
self.parent.parent.mothership + "/revokeSubscriptionInvitation",
parameters,
)
if not success:
return False, response
response = json.loads(response.decode())
if response["response"] == "success":
return True, None
else:
return False, response["response"]
else:
return (
False,
["#(response.notOnline)", "#(response.notOnline.headline)"],
)
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def invitationAccepted(self):
try:
if self.parent.parent.user():
acceptedInvitations = self.parent.parent.acceptedInvitations()
if acceptedInvitations:
for invitation in acceptedInvitations:
if self.protocol.unsecretURL() == invitation.url:
return True
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def stillUpdating(self):
try:
return self.url in self.parent._updatingSubscriptions
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def name(self, locale=["en"]):
try:
success, installabeFontsCommand = self.protocol.installableFontsCommand()
return installabeFontsCommand.name.getText(locale) or "#(Unnamed)"
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def resourceByURL(self, url, binary=False, update=False):
"""Caches and returns content of a HTTP resource. If binary is set to True,
content will be stored and return as a bas64-encoded string"""
try:
success, response, mimeType = self.parent.parent.resourceByURL(
url, binary, update
)
# Save resource
if success is True:
resourcesList = self.get("resources") or []
if url not in resourcesList:
resourcesList.append(url)
self.set("resources", resourcesList)
return success, response, mimeType
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def familyByID(self, ID):
try:
success, installabeFontsCommand = self.protocol.installableFontsCommand()
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
if family.uniqueID == ID:
return family
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def fontByID(self, ID):
try:
success, installabeFontsCommand = self.protocol.installableFontsCommand()
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
if font.uniqueID == ID:
return font
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def amountInstalledFonts(self):
try:
return len(self.installedFonts())
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def installedFonts(self):
try:
_list = []
# Get font
success, installabeFontsCommand = self.protocol.installableFontsCommand()
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
if self.installedFontVersion(font=font):
if font not in _list:
_list.append(font)
return _list
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def expiringInstalledFonts(self):
try:
fonts = []
# Get font
success, installabeFontsCommand = self.protocol.installableFontsCommand()
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
if self.installedFontVersion(font=font) and font.expiry:
if font not in fonts:
fonts.append(font)
return fonts
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def amountOutdatedFonts(self):
try:
return len(self.outdatedFonts())
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def outdatedFonts(self):
try:
_list = []
success, installabeFontsCommand = self.protocol.installableFontsCommand()
# Get font
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
installedFontVersion = self.installedFontVersion(font=font)
if (
installedFontVersion
and installedFontVersion != font.getVersions()[-1].number
):
if font not in _list:
_list.append(font.uniqueID)
return _list
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def installedFontVersion(self, fontID=None, font=None):
try:
folder = self.parent.folder()
if fontID and not font:
font = self.fontByID(fontID)
for version in font.getVersions():
path = os.path.join(
folder, self.uniqueID() + "-" + font.filename(version.number)
)
if os.path.exists(path):
return version.number
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
# def fontIsOutdated(self, fontID):
# success, installabeFontsCommand = self.protocol.installableFontsCommand()
# for foundry in installabeFontsCommand.foundries:
# for family in foundry.families:
# for font in family.fonts:
# if font.uniqueID == fontID:
# installedVersion = self.installedFontVersion(fontID)
# return installedVersion and installedVersion != font.getVersions()[-1].number
def removeFonts(self, fonts, dryRun=False, updateSubscription=True):
try:
success, installableFontsCommand = self.protocol.installableFontsCommand()
uninstallTheseProtectedFontIDs = []
uninstallTheseUnprotectedFontIDs = []
folder = self.parent.folder()
fontIDs = []
for fontID in fonts:
fontIDs.append(fontID)
path = None
font = self.fontByID(fontID)
installedFontVersion = self.installedFontVersion(font=font)
if installedFontVersion:
path = os.path.join(
folder,
self.uniqueID() + "-" + font.filename(installedFontVersion),
)
if not path and not dryRun:
return False, "Font path couldn’t be determined (preflight)"
if font.protected:
self.parent.parent.delegate._fontWillUninstall(font)
# Test for permissions here
if not dryRun:
try:
if (
self.parent.parent.testScenario
== "simulatePermissionError"
):
raise PermissionError
else:
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
f = open(path + ".test", "w")
f.write("test")
f.close()
os.remove(path + ".test")
except PermissionError:
self.parent.parent.delegate._fontHasInstalled(
False,
"Insufficient permission to uninstall font.",
font,
)
return False, "Insufficient permission to uninstall font."
assert os.path.exists(path + ".test") is False
uninstallTheseProtectedFontIDs.append(fontID)
else:
uninstallTheseUnprotectedFontIDs.append(fontID)
assert self.parent.parent == self.protocol.client
assert self.parent.parent.testScenario == self.protocol.client.testScenario
# Server access
# Protected fonts
if uninstallTheseProtectedFontIDs:
success, payload = self.protocol.removeFonts(
uninstallTheseProtectedFontIDs,
updateSubscription=updateSubscription,
)
font = None
if success:
# # Security check
# if set([x.uniqueID for x in payload.assets]) - set(fontIDs) or
# set(fontIDs) - set([x.uniqueID for x in payload.assets]):
# return False, 'Incoming fonts’ uniqueIDs mismatch with requested
# font IDs.'
if len(payload.assets) == 0:
return (
False,
(
"No fonts to uninstall in .assets, expected "
f"{len(uninstallTheseProtectedFontIDs)} assets"
),
)
# Process fonts
for incomingFont in payload.assets:
if incomingFont.uniqueID in fontIDs:
proceed = ["unknownInstallation", "unknownFont"] #
if incomingFont.response in proceed:
pass
# Predefined response messages
elif (
incomingFont.response != "error"
and incomingFont.response != "success"
):
return (
False,
[
"#(response.%s)" % incomingFont.response,
"#(response.%s.headline)"
% incomingFont.response,
],
)
elif incomingFont.response == "error":
return False, incomingFont.errorMessage
if incomingFont.response == "success":
path = None
font = self.fontByID(incomingFont.uniqueID)
installedFontVersion = self.installedFontVersion(
font=font
)
if installedFontVersion:
path = os.path.join(
folder,
self.uniqueID()
+ "-"
+ font.filename(installedFontVersion),
)
if self.parent.parent.testScenario == "simulateNoPath":
path = None
if not path and not dryRun:
return (
False,
(
"Font path couldn’t be determined "
"(deleting unprotected fonts)"
),
)
if not dryRun:
os.remove(path)
self.parent.parent.delegate._fontHasUninstalled(
True, None, font
)
else:
self.parent.parent.delegate._fontHasUninstalled(
False, payload, font
)
return False, payload
# Unprotected fonts
if uninstallTheseUnprotectedFontIDs:
for fontID in uninstallTheseUnprotectedFontIDs:
path = None
font = self.fontByID(fontID)
installedFontVersion = self.installedFontVersion(font=font)
if installedFontVersion:
path = os.path.join(
folder,
self.uniqueID() + "-" + font.filename(installedFontVersion),
)
if self.parent.parent.testScenario == "simulateNoPath":
path = None
if not path and not dryRun:
return (
False,
(
"Font path couldn’t be determined (deleting "
"unprotected fonts)"
),
)
if not dryRun:
os.remove(path)
self.parent.parent.delegate._fontHasUninstalled(True, None, font)
return True, None
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def installFonts(self, fonts):
try:
# Terms of Service
if self.get("acceptedTermsOfService") is not True:
return (
False,
[
"#(response.termsOfServiceNotAccepted)",
"#(response.termsOfServiceNotAccepted.headline)",
],
)
success, installabeFontsCommand = self.protocol.installableFontsCommand()
installTheseFontIDs = []
protectedFonts = False
versionByFont = {}
folder = self.parent.folder()
fontIDs = []
for fontID, version in fonts:
fontIDs.append(fontID)
versionByFont[fontID] = version
path = None
font = self.fontByID(fontID)
path = os.path.join(
folder, self.uniqueID() + "-" + font.filename(version)
)
if font.protected or font.expiry or font.expiryDuration:
protectedFonts = True
assert path
assert font
self.parent.parent.delegate._fontWillInstall(font)
# Test for permissions here
try:
if self.parent.parent.testScenario == "simulatePermissionError":
raise PermissionError
else:
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
f = open(path + ".test", "w")
f.write("test")
f.close()
os.remove(path + ".test")
except PermissionError:
self.parent.parent.delegate._fontHasInstalled(
False, "Insufficient permission to install font.", font
)
return False, "Insufficient permission to install font."
assert os.path.exists(path + ".test") is False
installTheseFontIDs.append(fontID)
# Server access
success, payload = self.protocol.installFonts(
fonts, updateSubscription=protectedFonts
)
font = None
if success:
# Check for empty assets
if len(payload.assets) == 0:
return (
False,
(
"No fonts to install in .assets, expected "
f"{len(installTheseFontIDs)} assets"
),
)
# Check if all requested fonts and fontVersions
# are present in the assets
for fontID, version in fonts:
if not [fontID, version] in [
[x.uniqueID, x.version] for x in payload.assets
]:
return (
False,
(
f"Font {fontID} with version {version} "
"not found in assets"
),
)
# Process fonts
for incomingFont in payload.assets:
if incomingFont.uniqueID in fontIDs:
if incomingFont.response == "error":
return False, incomingFont.errorMessage
# Predefined response messages
elif (
incomingFont.response != "error"
and incomingFont.response != "success"
):
return (
False,
[
"#(response.%s)" % incomingFont.response,
"#(response.%s.headline)" % incomingFont.response,
],
)
if incomingFont.response == "success":
path = None
font = self.fontByID(incomingFont.uniqueID)
path = os.path.join(
folder,
self.uniqueID()
+ "-"
+ font.filename(versionByFont[incomingFont.uniqueID]),
)
assert path
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if incomingFont.data and incomingFont.encoding:
f = open(path, "wb")
f.write(base64.b64decode(incomingFont.data))
f.close()
elif incomingFont.dataURL:
(
success,
response,
responseObject,
) = self.parent.parent.performRequest(
incomingFont.dataURL, method="GET"
)
if not success:
return False, response
else:
f = open(path, "wb")
f.write(response)
f.close()
self.parent.parent.delegate._fontHasInstalled(
True, None, font
)
# Ping
self.stillAlive()
return True, None
else:
self.parent.parent.delegate._fontHasInstalled(False, payload, font)
return False, payload
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def update(self):
try:
self.parent._updatingSubscriptions.append(self.url)
if self.parent.parent.online(self.protocol.url.restDomain.split("/")[0]):
self.parent.parent.delegate._subscriptionWillUpdate(self)
self.stillAlive()
success, message, changes = self.protocol.update()
if self.url in self.parent._updatingSubscriptions:
self.parent._updatingSubscriptions.remove(self.url)
self._updatingProblem = None
self.parent.parent._subscriptionsUpdated.append(self.url)
if not success:
self.parent.parent.delegate._subscriptionHasBeenUpdated(
self, success, message, changes
)
return success, message, changes
if changes:
self.save()
# Success
self.parent.parent.delegate._subscriptionHasBeenUpdated(
self, True, None, changes
)
return True, None, changes
else:
self.parent._updatingSubscriptions.remove(self.url)
self.parent.parent._subscriptionsUpdated.append(self.url)
self._updatingProblem = [
"#(response.serverNotReachable)",
"#(response.serverNotReachable.headline)",
]
self.parent.parent.delegate._subscriptionHasBeenUpdated(
self, False, self._updatingProblem, False
)
return False, self._updatingProblem, False
except Exception as e: # nocoverage
success, message = self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
return False, message, False
def updatingProblem(self):
try:
return self._updatingProblem
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def get(self, key):
try:
preferences = dict(
self.parent.parent.get("subscription(%s)" % self.protocol.unsecretURL())
or {}
)
if key in preferences:
o = preferences[key]
if "Array" in o.__class__.__name__:
o = list(o)
elif "Dictionary" in o.__class__.__name__:
o = dict(o)
return o
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def set(self, key, value):
try:
preferences = dict(
self.parent.parent.get("subscription(%s)" % self.protocol.unsecretURL())
or {}
)
preferences[key] = value
self.parent.parent.set(
"subscription(%s)" % self.protocol.unsecretURL(), preferences
)
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def save(self):
try:
subscriptions = self.parent.get("subscriptions") or []
if not self.protocol.unsecretURL() in subscriptions:
subscriptions.append(self.protocol.unsecretURL())
self.parent.set("subscriptions", subscriptions)
self.protocol.save()
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
def delete(self, calledFromParent=False, updateSubscriptionsOnServer=True):
try:
success, installabeFontsCommand = self.protocol.installableFontsCommand()
# Delete all fonts
for foundry in installabeFontsCommand.foundries:
for family in foundry.families:
for font in family.fonts:
self.removeFonts([font.uniqueID])
# Key
try:
self.protocol.deleteSecretKey()
except Exception:
pass
# ZMQ
self.parent.parent.unregisterZMQCallback(self.zmqTopic())
# Resources
self.parent.parent.deleteResources(self.get("resources") or [])
self.parent.parent.remove("subscription(%s)" % self.protocol.unsecretURL())
# Subscriptions
subscriptions = self.parent.get("subscriptions") or []
subscriptions.remove(self.protocol.unsecretURL())
self.parent.set("subscriptions", subscriptions)
self.parent._subscriptions = {}
# # currentSubscription
# if self.parent.get('currentSubscription') == self.protocol.unsecretURL():
# if len(subscriptions) >= 1:
# self.parent.set('currentSubscription', subscriptions[0])
self.parent._subscriptions = {}
if len(subscriptions) == 0 and calledFromParent is False:
self.parent.delete()
self.parent.parent.delegate._subscriptionHasBeenDeleted(self)
self.parent.parent.manageMessageQueueConnection()
if updateSubscriptionsOnServer:
self.parent.parent.uploadSubscriptions()
return True, None
except Exception as e: # nocoverage
self.parent.parent.handleTraceback( # nocoverage
sourceMethod=getattr(self, sys._getframe().f_code.co_name), e=e
)
| 34.589978 | 88 | 0.511885 |
7f9c007d8b41999c071f85851d6db4759ed3a51f | 20,583 | py | Python | salt/modules/reg.py | herlo/salt | 10ffb8315559c0cfbc10b4adc26cd62ebc462851 | [
"Apache-2.0"
] | null | null | null | salt/modules/reg.py | herlo/salt | 10ffb8315559c0cfbc10b4adc26cd62ebc462851 | [
"Apache-2.0"
] | null | null | null | salt/modules/reg.py | herlo/salt | 10ffb8315559c0cfbc10b4adc26cd62ebc462851 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
===========================
Manage the Windows registry
===========================
The read_key and set_key functions will be updated in Boron to reflect proper
registry usage. The registry has three main components. Hives, Keys, and Values.
-----
Hives
-----
Hives are the main sections of the registry and all begin with the word HKEY.
- HKEY_LOCAL_MACHINE
- HKEY_CURRENT_USER
- HKEY_USER
----
Keys
----
Keys are the folders in the registry. Keys can have many nested subkeys. Keys
can have a value assigned to them under the (Default)
-----------------
Values or Entries
-----------------
Values/Entries are name/data pairs. There can be many values in a key. The
(Default) value corresponds to the Key, the rest are their own value pairs.
:depends: - winreg Python module
'''
# TODO: Figure out the exceptions _winreg can raise and properly catch them
# Import python libs
from __future__ import absolute_import
import logging
# Import third party libs
try:
from salt.ext.six.moves import winreg as _winreg # pylint: disable=import-error,no-name-in-module
HAS_WINDOWS_MODULES = True
except ImportError:
HAS_WINDOWS_MODULES = False
# Import salt libs
import salt.utils
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'reg'
class Registry(object):
'''
Delay '_winreg' usage until this module is used
'''
def __init__(self):
self.hkeys = {
"HKEY_CURRENT_USER": _winreg.HKEY_CURRENT_USER,
"HKEY_LOCAL_MACHINE": _winreg.HKEY_LOCAL_MACHINE,
"HKEY_USERS": _winreg.HKEY_USERS,
"HKCU": _winreg.HKEY_CURRENT_USER,
"HKLM": _winreg.HKEY_LOCAL_MACHINE,
"HKU": _winreg.HKEY_USERS,
}
self.registry_32 = {
True: _winreg.KEY_ALL_ACCESS | _winreg.KEY_WOW64_32KEY,
False: _winreg.KEY_ALL_ACCESS,
}
self.vtype = {
"REG_BINARY": _winreg.REG_BINARY,
"REG_DWORD": _winreg.REG_DWORD,
"REG_EXPAND_SZ": _winreg.REG_EXPAND_SZ,
"REG_MULTI_SZ": _winreg.REG_MULTI_SZ,
"REG_SZ": _winreg.REG_SZ
}
self.vtype_reverse = {
_winreg.REG_BINARY: "REG_BINARY",
_winreg.REG_DWORD: "REG_DWORD",
_winreg.REG_EXPAND_SZ: "REG_EXPAND_SZ",
_winreg.REG_MULTI_SZ: "REG_MULTI_SZ",
_winreg.REG_SZ: "REG_SZ"
}
def __getattr__(self, k):
try:
return self.hkeys[k]
except KeyError:
msg = 'No hkey named \'{0}. Try one of {1}\''
hkeys = ', '.join(self.hkeys)
raise CommandExecutionError(msg.format(k, hkeys))
def __virtual__():
'''
Only works on Windows systems
'''
if salt.utils.is_windows() and HAS_WINDOWS_MODULES:
return __virtualname__
return (False, 'reg execution module failed to load: either the system is not Windows or the _winreg python library not available.')
def _key_exists(hive, key, use_32bit_registry=False):
'''
Check that the key is found in the registry
:param str hive: The hive to connect to.
:param str key: The key to check
:param bool use_32bit_registry: Look in the 32bit portion of the registry
:return: Returns True if found, False if not found
:rtype: bool
'''
registry = Registry()
hkey = registry.hkeys[hive]
access_mask = registry.registry_32[use_32bit_registry]
try:
handle = _winreg.OpenKey(hkey, key, 0, access_mask)
_winreg.CloseKey(handle)
return True
except WindowsError as exc: # pylint: disable=E0602
return False
def read_key(hkey, path, key=None, use_32bit_registry=False):
'''
.. important::
The name of this function is misleading and will be changed to reflect
proper usage in the Boron release of Salt. The path option will be removed
and the key will be the actual key. See the following issue:
https://github.com/saltstack/salt/issues/25618
In order to not break existing state files this function will call the
read_value function if a key is passed. Key will be passed as the value
name. If key is not passed, this function will return the default value for
the key.
In the Boron release this function will be removed in favor of read_value.
Read registry key value
Returns the first unnamed value (Default) as a string.
Returns none if first unnamed value is empty.
Returns False if key not found.
CLI Example:
.. code-block:: bash
salt '*' reg.read_key HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version'
'''
ret = {'hive': hkey,
'key': path,
'vdata': None,
'success': True}
if key: # This if statement will be removed in Boron
salt.utils.warn_until('Boron', 'Use reg.read_value to read a registry '
'value. This functionality will be '
'removed in Salt Boron')
return read_value(hive=hkey,
key=path,
vname=key,
use_32bit_registry=use_32bit_registry)
return read_value(hive=hkey,
key=path,
use_32bit_registry=use_32bit_registry)
def read_value(hive, key, vname=None, use_32bit_registry=False):
r'''
Reads a registry value entry or the default value for a key.
:param str hive: The name of the hive. Can be one of the following
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
:param str key: The key (looks like a path) to the value name.
:param str vname: The value name. These are the individual name/data pairs
under the key. If not passed, the key (Default) value will be returned
:param bool use_32bit_registry: Accesses the 32bit portion of the registry
on 64 bit installations. On 32bit machines this is ignored.
:return: A dictionary containing the passed settings as well as the
value_data if successful. If unsuccessful, sets success to False
If vname is not passed:
- Returns the first unnamed value (Default) as a string.
- Returns none if first unnamed value is empty.
- Returns False if key not found.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' reg.read_value HKEY_LOCAL_MACHINE 'SOFTWARE\Salt' 'version'
'''
# Setup the return array
ret = {'hive': hive,
'key': key,
'vname': vname,
'vdata': None,
'success': True}
# If no name is passed, the default value of the key will be returned
# The value name is Default
if not vname:
ret['vname'] = '(Default)'
registry = Registry()
hkey = registry.hkeys[hive]
access_mask = registry.registry_32[use_32bit_registry]
try:
handle = _winreg.OpenKey(hkey, key, 0, access_mask)
try:
vdata, vtype = _winreg.QueryValueEx(handle, vname)
if vdata or vdata in [0, '']:
ret['vtype'] = registry.vtype_reverse[vtype]
ret['vdata'] = vdata
else:
ret['comment'] = 'Empty Value'
except WindowsError as exc: # pylint: disable=E0602
ret['vdata'] = ('(value not set)')
ret['vtype'] = 'REG_SZ'
except WindowsError as exc: # pylint: disable=E0602
log.debug(exc)
log.debug('Cannot find key: {0}\\{1}'.format(hive, key))
ret['comment'] = 'Cannot find key: {0}\\{1}'.format(hive, key)
ret['success'] = False
return ret
def set_key(hkey,
path,
value,
key=None,
vtype='REG_DWORD',
reflection=True,
use_32bit_registry=False):
'''
.. important ::
The name of this function is misleading and will be changed to reflect
proper usage in the Boron release of Salt. The path option will be removed
and the key will be the actual key. See the following issue:
https://github.com/saltstack/salt/issues/25618
In order to not break existing state files this function will call the
set_value function if a key is passed. Key will be passed as the value
name. If key is not passed, this function will return the default value for
the key.
In the Boron release this function will be removed in favor of set_value.
Set a registry key
vtype: http://docs.python.org/2/library/_winreg.html#value-types
CLI Example:
.. code-block:: bash
salt '*' reg.set_key HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version' '0.97' REG_DWORD
'''
if key: # This if statement will be removed in Boron
salt.utils.warn_until('Boron', 'Use reg.set_value to set a registry '
'value. This functionality will be '
'removed in Salt Boron')
return set_value(hive=hkey,
key=path,
vname=key,
vdata=value,
vtype=vtype,
use_32bit_registry=use_32bit_registry)
return set_value(hive=hkey,
key=path,
vdata=value,
vtype=vtype,
use_32bit_registry=use_32bit_registry)
def set_value(hive,
key,
vname=None,
vdata=None,
vtype='REG_SZ',
reflection=True,
use_32bit_registry=False):
'''
Sets a registry value entry or the default value for a key.
:param str hive: The name of the hive. Can be one of the following
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
:param str key: The key (looks like a path) to the value name.
:param str vname: The value name. These are the individual name/data pairs
under the key. If not passed, the key (Default) value will be set.
:param str vdata: The value data to be set.
:param str vtype: The value type. Can be one of the following:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_SZ
:param bool reflection: A boolean value indicating that the value should
also be set in the Wow6432Node portion of the registry. Only applies to 64
bit Windows. This setting is ignored for 32 bit Windows.
.. deprecated:: 2015.8.2
Use `use_32bit_registry` instead. The parameter seems to have no effect
since Windows 7 / Windows 2008R2 removed support for reflection. The
parameter will be removed in Boron.
:return: Returns True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' reg.set_value HKEY_LOCAL_MACHINE 'SOFTWARE\\Salt' 'version' '2015.5.2'
'''
registry = Registry()
hkey = registry.hkeys[hive]
vtype = registry.vtype[vtype]
access_mask = registry.registry_32[use_32bit_registry]
try:
handle = _winreg.CreateKeyEx(hkey, key, 0, access_mask)
if vtype == registry.vtype['REG_SZ']\
or vtype == registry.vtype['REG_BINARY']:
vdata = str(vdata)
_winreg.SetValueEx(handle, vname, 0, vtype, vdata)
_winreg.CloseKey(handle)
return True
except (WindowsError, ValueError, TypeError) as exc: # pylint: disable=E0602
log.error(exc, exc_info=True)
return False
def create_key(hkey,
path,
key=None,
value=None,
reflection=True,
use_32bit_registry=False):
'''
.. important ::
The name of this function is misleading and will be changed to reflect
proper usage in the Boron release of Salt. The path option will be removed
and the key will be the actual key. See the following issue:
https://github.com/saltstack/salt/issues/25618
In order to not break existing state files this function will call the
set_value function if key is passed. Key will be passed as the value name.
If key is not passed, this function will return the default value for the
key.
In the Boron release path will be removed and key will be the path. You will
not pass value.
Create a registry key
CLI Example:
.. code-block:: bash
salt '*' reg.create_key HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version' '0.97'
'''
if key: # This if statement will be removed in Boron
salt.utils.warn_until('Boron', 'Use reg.set_value to create a registry '
'value. This functionality will be '
'removed in Salt Boron')
return set_value(hive=hkey,
key=path,
vname=key,
vdata=value,
use_32bit_registry=use_32bit_registry)
return set_value(hive=hkey, key=path, use_32bit_registry=use_32bit_registry)
def delete_key(hkey,
path,
key=None,
reflection=True,
force=False,
use_32bit_registry=False):
'''
.. important::
The name of this function is misleading and will be changed to reflect
proper usage in the Boron release of Salt. The path option will be removed
and the key will be the actual key. See the following issue:
https://github.com/saltstack/salt/issues/25618
In order to not break existing state files this function will call the
delete_value function if a key is passed. Key will be passed as the value
name. If key is not passed, this function will return the default value for
the key.
In the Boron release path will be removed and key will be the path.
reflection will also be removed.
Delete a registry key
CLI Example:
.. code-block:: bash
salt '*' reg.delete_key HKEY_CURRENT_USER 'SOFTWARE\\Salt'
:param str hkey: (will be changed to hive) The name of the hive. Can be one
of the following
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
:param str path: (will be changed to key) The key (looks like a path) to
remove.
:param str key: (used incorrectly) Will be removed in Boron
:param bool reflection: A boolean value indicating that the value should
also be removed from the Wow6432Node portion of the registry. Only applies
to 64 bit Windows. This setting is ignored for 32 bit Windows.
Only applies to delete value. If the key parameter is passed, this function
calls delete_value instead. Will be changed in Boron.
:param bool force: A boolean value indicating that all subkeys should be
removed as well. If this is set to False (default) and there are subkeys,
the delete_key function will fail.
:return: Returns True if successful, False if not. If force=True, the
results of delete_key_recursive are returned.
:rtype: bool
'''
if key: # This if statement will be removed in Boron
salt.utils.warn_until('Boron',
'Variable names will be changed to match Windows '
'Registry terminology. These changes will be '
'made in Boron')
return delete_value(hive=hkey,
key=path,
vname=key,
reflection=reflection,
use_32bit_registry=use_32bit_registry)
if force:
return delete_key_recursive(hkey,
path,
use_32bit_registry=use_32bit_registry)
registry = Registry()
hive = registry.hkeys[hkey]
key = path
access_mask = registry.registry_32[use_32bit_registry]
try:
# Can't use delete_value to delete a key
key_handle = _winreg.OpenKey(hive, key, 0, access_mask)
_winreg.DeleteKey(key_handle, '')
_winreg.CloseKey(key_handle)
return True
except WindowsError as exc: # pylint: disable=E0602
log.error(exc, exc_info=True)
return False
def delete_key_recursive(hive, key, use_32bit_registry=False):
'''
.. versionadded:: 2015.5.4
Delete a registry key to include all subkeys.
:param hive: The name of the hive. Can be one of the following
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
:param key: The key to remove (looks like a path)
:return: A dictionary listing the keys that deleted successfully as well as
those that failed to delete.
:rtype: dict
The following example will remove ``salt`` and all its subkeys from the
``SOFTWARE`` key in ``HKEY_LOCAL_MACHINE``:
CLI Example:
.. code-block:: bash
salt '*' reg.delete_key_recursive HKLM SOFTWARE\\salt
'''
# Instantiate the registry object
registry = Registry()
hkey = registry.hkeys[hive]
key_path = key
access_mask = registry.registry_32[use_32bit_registry]
if not _key_exists(hive, key, use_32bit_registry):
return False
# Functions for traversing the registry tree
def subkeys(key):
i = 0
while True:
try:
subkey = _winreg.EnumKey(key, i)
yield subkey
i += 1
except WindowsError: # pylint: disable=E0602
break
def traverse_registry_tree(hkey, keypath, ret, access_mask):
key = _winreg.OpenKey(hkey, keypath, 0, access_mask)
for subkeyname in subkeys(key):
subkeypath = r'{0}\{1}'.format(keypath, subkeyname)
ret = traverse_registry_tree(hkey, subkeypath, ret, access_mask)
ret.append('{0}'.format(subkeypath))
return ret
# Get a reverse list of registry keys to be deleted
key_list = []
key_list = traverse_registry_tree(hkey, key_path, key_list, access_mask)
# Add the top level key last, all subkeys must be deleted first
key_list.append(r'{0}'.format(key_path))
ret = {'Deleted': [],
'Failed': []}
# Delete all sub_keys
for sub_key_path in key_list:
try:
key_handle = _winreg.OpenKey(hkey, sub_key_path, 0, access_mask)
_winreg.DeleteKey(key_handle, '')
ret['Deleted'].append(r'{0}\{1}'.format(hive, sub_key_path))
except WindowsError as exc: # pylint: disable=E0602
log.error(exc, exc_info=True)
ret['Failed'].append(r'{0}\{1} {2}'.format(hive, sub_key_path, exc))
return ret
def delete_value(hive, key, vname=None, reflection=True, use_32bit_registry=False):
'''
Delete a registry value entry or the default value for a key.
:param str hive: The name of the hive. Can be one of the following
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
:param str key: The key (looks like a path) to the value name.
:param str vname: The value name. These are the individual name/data pairs
under the key. If not passed, the key (Default) value will be deleted.
:param bool reflection: A boolean value indicating that the value should
also be set in the Wow6432Node portion of the registry. Only applies to 64
bit Windows. This setting is ignored for 32 bit Windows.
.. deprecated:: 2015.8.2
Use `use_32bit_registry` instead. The parameter seems to have no effect
since Windows 7 / Windows 2008R2 removed support for reflection. The
parameter will be removed in Boron.
:return: Returns True if successful, False if not
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' reg.delete_value HKEY_CURRENT_USER 'SOFTWARE\\Salt' 'version'
'''
registry = Registry()
hive = registry.hkeys[hive]
access_mask = registry.registry_32[use_32bit_registry]
try:
handle = _winreg.OpenKey(hive, key, 0, access_mask)
_winreg.DeleteValue(handle, vname)
_winreg.CloseKey(handle)
return True
except WindowsError as exc: # pylint: disable=E0602
log.error(exc, exc_info=True)
return False
| 32.880192 | 136 | 0.624399 |
872002756a9a0862e74b1130f0b4c4766bb9e758 | 1,496 | py | Python | SoloLearnPython3/Part_2/sketch_5a.py | mahmoudmheisen91/Python_EDU | 3ca08f65bb219335502159a6d13617b9a73c3b7e | [
"MIT"
] | null | null | null | SoloLearnPython3/Part_2/sketch_5a.py | mahmoudmheisen91/Python_EDU | 3ca08f65bb219335502159a6d13617b9a73c3b7e | [
"MIT"
] | null | null | null | SoloLearnPython3/Part_2/sketch_5a.py | mahmoudmheisen91/Python_EDU | 3ca08f65bb219335502159a6d13617b9a73c3b7e | [
"MIT"
] | null | null | null | # Dicitionary:
dic = {
"one": 1,
"two": 2,
"three": 3,
}
print(dic["one"])
dic["four"] = 4
dic[5] = 6
print(dic)
print(6 in dic)
print(5 in dic)
print(dic.get("one"))
print(dic.get(7, "do not exists"))
print(8*"-")
# Tubles:
tup = (1, 2, 3, 4)
print(tup[1])
tup = 1, 2, 3, 4, 4
print(tup)
tup = (1,)
print(tup)
nums = (55, 44, 33, 22)
print(max(min(nums[0:2]), abs(-42)))
print(8*"-")
# Lists:
s = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(s[1:3])
print(s[:4])
print(s[5:]) # 9 will print
print(s[:4:-1])
print(s[5::-1])
print(s[-13:-1])
print(8*"-")
a = [i**2 for i in range(5)]
print(a)
b = [i**2 for i in range(6) if i % 2 == 0]
print(b)
print(8*"-")
print(max([1, 4, 9, 2, 5, 6, 8]))
print(sum([1, 2, 3, 4, 5]))
print(8*"-")
nums = [55, 44, 33, 22, 11]
if all([i > 5 for i in nums]):
print("All larger than 5")
if any([i % 2 == 0 for i in nums]):
print("At least one is even")
a = [i for i in range(10)]
a_bool = [i % 2 == 0 for i in a]
print(a_bool)
print(any(a_bool))
for v in enumerate(nums):
print(v)
print(8*"-")
# String:
a = [1, 2]
msg1 = "nums: {0}, {1}".format(a[0], a[1])
msg2 = "numx: {x}, {y}".format(y=6, x=33)
print(msg1)
print(msg2)
print(", ".join(["spam", "eggs", "ham"]))
print("Hello ME".replace("ME", "world"))
print("This is a sentence.".startswith("This"))
print("This is a sentence.".endswith("sentence."))
print("this is a sentence.".upper())
print("AN ALL CAPS SENTENCE".lower())
print("spam, eggs, ham".split(", "))
print(8*"-")
| 17.395349 | 50 | 0.548128 |
b184b63d63736cf55cafdd419ceb5d41022b5b17 | 16,059 | py | Python | _scripts/graveyard.py | Erotemic/hotspotter | 3cfa4015798e21385455b937f9083405c4b3cf53 | [
"Apache-2.0"
] | 2 | 2015-07-19T02:55:06.000Z | 2021-07-07T02:38:26.000Z | _scripts/graveyard.py | Erotemic/hotspotter | 3cfa4015798e21385455b937f9083405c4b3cf53 | [
"Apache-2.0"
] | 5 | 2017-03-11T16:30:26.000Z | 2021-04-10T16:42:10.000Z | _scripts/graveyard.py | Erotemic/hotspotter | 3cfa4015798e21385455b937f9083405c4b3cf53 | [
"Apache-2.0"
] | 10 | 2015-07-19T03:05:42.000Z | 2021-08-24T14:48:59.000Z |
print('warping img.shape=%r to a new shape=%r' % (img.shape, dsize))
# 1vM Agg info graveyard
#cx2_max_ax = np.cumsum(cx2_nFeats)
#_ax2_cid = [[cid_]*nFeats for (cid_, nFeats) in iter(zip(cx2_cid, cx2_nFeats))]
#ax2_cid = np.array(list(chain.from_iterable(_ax2_cid)))
# dont need no cid stuff here
#qfx2_cx = np.array([cx2_cid.index(cid) for cid in qfx2_cid.flat])
#qfx2_cx.shape = qfx2_cid.shape
#print('Baseline SIFT matching')
#print('len(qdesc) = %d' % len(qdesc))
#print('len(desc2) = %d' % len(desc2))
#print('len(matches) = %d' % len(matches))
#delta = 2000
#im_12 = warp.panorama(H_12,rchip1,rchip2,delta,delta)
#cx2_score_1vM = [np.sum(fs) for fs in cx2_fs_1vM]
#ax2_cx, ax2_fx, ax2_desc, flann_1vM = aggregate_1vM(cx2_cid, cx2_desc)
#matches_scores = assign_feat_matches_1vM(qcx, cx2_cid, cx2_desc, ax2_cx, ax2_fx, flann_1vM)
#cx2_num_fm_1v1 = [len(_) for _ in cx2_fs_1vM]
#rchip_path = cx2_rchip_path[0]
#sift_path = cx2_sift_path[0]
#sift = fc2.load_features(sift_path)
#kpts, desc = sift
#qcx = 1
#def FLANN_Searcher(object):
#def __init__(self, qdesc):
#self.flann = FLANN()
#self.flann.build_index(qdesc, **__FLANN_PARAMS__)
#def neareset(desc2, K=1):
#(idx21, dists21) = flann.nn_index(desc2, K, **__FLANN_PARAMS__)
#idx21.shape = (desc2.shape[0], K)
#dists21.shape = (desc2.shape[0], K)
#flann.delete_index()
#return idx21.T, dists21.T
##flann.save_index(path)
##flann.load_index(path, qdesc)
def desc_matcher(qcx, cx2):
''' BruteForce, BruteForce-L1, BruteForce-Hamming,
BruteForceHamming(2), FlannBased '''
matcher = cv2.DescriptorMatcher_create('BruteForce')
matcher = cv2.DescriptorMatcher_create('BruteForce-Hamming')
matches = matcher.match(qdesc, desc2)
return matches
#####
# DIRECTION 2 of __test_homog():
####
#with Timer(msg=testname+' SV21'):
#H21, inliers21 = func_homog(kpts2_m, kpts1_m, xy_thresh12_sqrd)
#print(' * num inliers21 = %d' % inliers21.sum())
#fm1_SV2 = fm12[inliers21,:]
#df2.show_matches(qcx, cx, hs_cpaths, cx2_kpts, fm1_SV2, fignum=fignum+1, title=testname+' SV2')
#df2.imshow(rchip1_H2, fignum=fignum+2, title=testname+' warped querychip1')
#df2.imshow(rchip2_H2, fignum=fignum+3, title=testname+' warped reschip2')
#print H2
#rchip1_H2 = cv2.warpPerspective(rchip1, inv(H2), rchip2.shape[0:2][::-1])
#rchip2_H2 = cv2.warpPerspective(rchip2, H2, rchip1.shape[0:2][::-1])
def FREAK_assign_feat_matches_1v1(qcx, cx2_cid, cx2_freak):
print('Assigning 1v1 feature matches from cx=%d to %d chips' % (qcx, len(cx2_cid)))
qfreak = cx2_freak[qcx]
matcher = cv2.DescriptorMatcher_create('BruteForce-Hamming')
cx2_fm = [[] for _ in xrange(len(cx2_cid))]
cx2_fs = [[] for _ in xrange(len(cx2_cid))]
for cx, freak in enumerate(cx2_freak):
sys.stdout.write('.')
sys.stdout.flush()
m = matcher.match(freak, qfreak)
if cx == qcx: continue
(fx2_qfx, fx2_dist) = flann_1v1.nn_index(freak, 2, **__FLANN_PARAMS__)
# Lowe's ratio test
fx2_ratio = np.divide(fx2_dist[:,1]+1, fx2_dist[:,0]+1)
fx, = np.where(fx2_ratio > __1v1_RAT_THRESH__)
qfx = fx2_qfx[fx,0]
cx2_fm[cx] = np.array(zip(qfx, fx))
cx2_fs[cx] = fx2_ratio[fx]
sys.stdout.write('DONE')
flann_1v1.delete_index()
return cx2_fm, cx2_fs
def unpack_freak(cx2_desc):
cx2_unpacked_freak = []
for descs in cx2_desc:
unpacked_desc = []
for d in descs:
bitstr = ''.join([('{0:#010b}'.format(byte))[2:] for byte in d])
d_bool = np.array([int(bit) for bit in bitstr],dtype=bool)
unpacked_desc.append(d_bool)
cx2_unpacked_freak.append(unpacked_desc)
# SPATIAL VERIFICATION PARAMS SETUP
img1_extent = (kpts1_m[0:2,:].max(1) - kpts1_m[0:2,:].min(1))[0:2]
img2_extent = (kpts2_m[0:2,:].max(1) - kpts2_m[0:2,:].min(1))[0:2]
xy_thresh12_sqrd = np.sum(img1_extent**2) * (__xy_thresh_percent__**2)
xy_thresh21_sqrd = np.sum(img2_extent**2) * (__xy_thresh_percent__**2)
__PRINT_THRESH_INFO__ = False
if __PRINT_THRESH_INFO__:
print('---------------------------------------')
print(' * Threshold is %.1f%% of diagonal length' % (__xy_thresh_percent__*100))
print('Computing the xy_threshold:')
print(' * img1_extent = %r ' % img1_extent)
print(' * img2_extent = %r ' % img2_extent)
print(' * img1_diag_len = %.2f ' % np.sqrt(np.sum(img1_extent**2)))
print(' * img2_diag_len = %.2f ' % np.sqrt(np.sum(img2_extent**2)))
print(' * xy_thresh12_sqrd=%.2f' % np.sqrt(xy_thresh12_sqrd))
print(' * xy_thresh21_sqrd=%.2f' % np.sqrt(xy_thresh21_sqrd))
print('---------------------------------------')
def gen_subset_split(full_set, M, K):
np.random.seed(0) # repeatibility
seen = set([])
split_list = []
for kx in xrange(K):
np.random.shuffle(full_set)
failsafe = 0
while True:
np.random.shuffle(full_set)
subset = tuple(full_set[0:M])
if not subset in seen:
seen.add(subset)
compliment = tuple(np.setdiff1d(full_set, subset))
yield (compliment, subset)
break
failsafe += 1
if failsafe > 100:
break
def test_entropy_internals(desc):
fig = df2.figure(1, doclf=True)
max_bw = 5
for ix in range(max_bw):
bw_factor = (ix + 1)**2
print('bw=%d' % bw_factor)
prob_x1 = _hist_prob_x(desc, bw_factor)
prob_x2 = _gkde_prob_x(desc, bw_factor)
entropy1 = [-(px * np.log2(px)).sum() for px in prob_x1]
entropy2 = [-(px * np.log2(px)).sum() for px in prob_x2]
x = sorted(entropy1)
y = sorted(entropy2)
fig = df2.figure(1, plotnum=(max_bw, 2, ix*2+1), title='sorted bw=%d' % bw_factor)
plt.plot(x, y)
fig = df2.figure(1, plotnum=(max_bw, 2, ix*2+2), title='scatter bw=%d' % bw_factor)
plt.plot(entropy1, entropy2, 'go')
fig.tight_layout()
df2.update()
# Renormalize descriptor to have an l2 norm of 1
desc1 = np.array(desc1, dtype=float)
l2norm1 = np.sqrt((desc1**2).sum(1))
desc1 /= l2norm1[:, np.newaxis]
desc2 = np.array(desc2, dtype=float)
l2norm2 = np.sqrt((desc2**2).sum(1))
desc2 /= l2norm2[:, np.newaxis]
desc_hist = np.histogram(desc1[0], bins=32, density=True)[0]
def check(desc):
norm = np.sqrt((desc**2).sum(1))
print('norm: %r ' % norm)
print('shape: %r ' % norm.shape)
print('mean: %r ' % np.mean(norm))
print('std: %r ' % np.std(norm))
check(desc1)
check(desc2)
print('DESC1: %r ' % np.sqrt((desc1**2).sum(1)))
print('DESC2: %r ' % np.sqrt((desc2**2).sum(1)))
print('DESC1: %r ' % np.sqrt((desc1**2).sum(0)))
print('DESC2: %r ' % np.sqrt((desc2**2).sum(0)))
print rank
orgres.qcxs
orgres.cxs
def get_sort_and_x(scores):
scores = np.array(scores)
scores_sortx = scores.argsort()[::-1]
scores_sort = scores[scores_sortx]
return scores_sort, scores_sortx
tt_sort, tt_sortx = get_sort_and_x(allres.top_true.scores)
tf_sort, tf_sortx = get_sort_and_x(allres.top_false.scores)
#orgres = allres.top_true
#qcx, cx, score, rank = orgres.iter().next()
#res = qcx2_res[qcx]
#fm = res.cx2_fm_V[cx]
## Get matching descriptors
#desc1 = cx2_desc[qcx][fm[:,0]]
#desc2 = cx2_desc[cx ][fm[:,1]]
def leave_out(expt_func=None, **kwargs):
'''
do with TF-IDF on the zebra data set.
Let M be the total number of *animals* (not images and not chips) in an experimental data set.
Do a series of leave-M-out (M >= 1) experiments on the TF-IDF scoring,
where the "left out" M are M different zebras,
so that there are no images of these zebras in the images used to form the vocabulary.
The vocabulary is formed from the remaining N-M animals.
Test how well TF-IDF recognition does with these M animals.
Repeat for different subsets of M animals.
import experiments as expt
from experiments import *
'''
# ---
# Testing should have animals I have seen and animals I haven't seen.
# Make sure num descriptors -per- word is about the same as Oxford
# ---
# Notes from Monday:
# 1) Larger training set (see how animals in training do vs animals out of training)
# 2) More detailed analysis of failures
# 3) Aggregate scores across different pictures of the same animal
if not 'expt_func' in vars() or expt_func is None:
expt_func = run_experiment
# Load tables
hs = ld2.HotSpotter(ld2.DEFAULT, load_basic=True)
# Grab names
nx2_name = hs.tables.nx2_name
cx2_nx = hs.tables.cx2_nx
nx2_cxs = np.array(hs.get_nx2_cxs())
nx2_nChips = np.array(map(len, nx2_cxs))
num_uniden = nx2_nChips[0] + nx2_nChips[1]
nx2_nChips[0:3] = 0 # remove uniden names
# Seperate singleton / multitons
multiton_nxs, = np.where(nx2_nChips > 1)
singleton_nxs, = np.where(nx2_nChips == 1)
all_nxs = np.hstack([multiton_nxs, singleton_nxs])
print('[expt] There are %d names' % len(all_nxs))
print('[expt] There are %d multiton names' % len(multiton_nxs))
print('[expt] There are %d singleton names' % len(singleton_nxs))
print('[expt] There are %d unidentified animals' % num_uniden)
#
multiton_cxs = nx2_cxs[multiton_nxs]
singleton_cxs = nx2_cxs[singleton_nxs]
multiton_nChips = map(len, multiton_cxs)
print('[expt] multion #cxs stats: %r' % helpers.printable_mystats(multiton_nChips))
# Find test/train splits
num_names = len(multiton_cxs)
# How to generate samples/splits for names
num_nsplits = 3
nsplit_size = (num_names//num_nsplits)
# How to generate samples/splits for chips
csplit_size = 1 # number of indexed chips per Jth experiment
# Generate name splits
kx2_name_split = far_appart_splits(multiton_nxs, nsplit_size, num_nsplits)
result_map = {}
kx = 0
# run K experiments
all_cxs = nx2_cxs[list(all_nxs)]
for kx in xrange(num_nsplits):
print('***************')
print('[expt] Leave M=%r names out iteration: %r/%r' % (nsplit_size, kx+1, num_nsplits))
print('***************')
# Get name splits
(test_nxs, train_nxs) = kx2_name_split[kx]
# Lock in training set
# train_nxs
train_cxs_list = nx2_cxs[list(train_nxs)]
train_samp = np.hstack(train_cxs_list)
#
# Choose test / index smarter
#test_samp = np.hstack(test_cxs_list) # Test on half
#indx_samp = np.hstack([test_samp, train_samp]) # Search on all
#
# Generate chip splits
test_cxs_list = nx2_cxs[list(test_nxs)]
test_nChip = map(len, test_cxs_list)
print('[expt] testnames #cxs stats: %r' % helpers.printable_mystats(test_nChip))
test_cx_splits = []
for ix in xrange(len(test_cxs_list)):
cxs = test_cxs_list[ix]
num_csplits = len(cxs)//csplit_size
cxs_splits = far_appart_splits(cxs, csplit_size, num_csplits)
test_cx_splits.append(cxs_splits)
max_num_csplits = max(map(len, test_cx_splits))
# Put them into experiment sets
jx2_test_cxs = [[] for _ in xrange(max_num_csplits)]
jx2_index_cxs = [[] for _ in xrange(max_num_csplits)]
for ix in xrange(len(test_cx_splits)):
cxs_splits = test_cx_splits[ix]
for jx in xrange(max_num_csplits):
if jx >= len(cxs_splits):
break
#ix_test_cxs, ix_index_cxs = cxs_splits[jx]
ix_index_cxs, ix_test_cxs = cxs_splits[jx]
jx2_test_cxs[jx].append(ix_test_cxs)
jx2_index_cxs[jx].append(ix_index_cxs)
jx = 0
for jx in xrange(max_num_csplits): # run K*J experiments
# Lock in test and index set
#all_cxs # np.hstack(jx2_test_cxs[jx])
indx_samp = np.hstack(jx2_index_cxs[jx]+[train_samp])
# Run all the goddamn queries (which have indexed ground truth)
test_samp = hs.get_cxs_in_sample(indx_samp)
# Set samples
hs.set_samples(test_samp, train_samp, indx_samp)
mj_label = '[LNO:%r/%r;%r/%r]' % (kx+1, num_nsplits, jx+1, max_num_csplits)
# Run experiment
print('[expt] <<<<<<<<')
print('[expt] Run expt_func()')
print('[expt] M=%r, J=%r' % (nsplit_size,csplit_size))
print(mj_label)
#rss = helpers.RedirectStdout('[expt %d/%d]' % (kx, K)); rss.start()
expt_locals = expt_func(hs, pprefix=mj_label, **kwargs)
print('[expt] Finished expt_func()')
print('[expt] mth iteration: %r/%r' % (kx+1, num_nsplits))
print('[expt] jth iteration: %r/%r' % (jx+1, max_num_csplits))
print('[expt] >>>>>>>>')
result_map[kx] = expt_locals['allres']
#rss.stop(); rss.dump()
return locals()
'''
this is interesting
0 - 1 = -1
0 - 0 - 1 = -1? idk, why?
(x - y) = (z)
-1*(x - y) = -1*(z)
-(x + y) = -(z)
-x + y = -z
let x=0
let y=1
let z=-1
(0 - 1) = (-1)
-1*(0 - 1) = -1*(-1)
-(0 + 1) = -(-1)
-0 + 1 = --1
-0 + 1 = 1
1 = 1 + 0
1 = 1
let x=0
let a=0
let y=1
let z=-1
(a - x - y) = (z)
-1*(a - x - y) = -1*(z)
-(a - x + y) = -(z)
-a - x + y = -z
(0 - 0 - 1) = (-1)
-1*(0 - 0 - 1) = -1*(-1)
-(0 - 0 + 1) = -(-1)
-0 - 0 + 1 = --1
'''
#print('lets think about z')
#print('z = %r' % z)
#print('radius = %r' % radius)
#print('ideal z1:z2 = %r:%r' % (z1, z2))
#print('truncated iz1:iz2 = %r:%r' % (iz1, iz2))
print('x=%r, y=%r' % (x,y))
print('radius = %r' % radius)
print('rchip.shape = %r' % (rchip.shape,))
print('ix_range = [%3d:%3d], x_radius = %r' % (ix1, ix2, xm))
print('iy_range = [%3d:%3d], y_radius = %r' % (iy1, iy2, ym))
print('------')
df2.imshow(warp1, plotnum=(2,4,1), fignum=2)
df2.imshow(warp2, plotnum=(2,4,2), fignum=2)
#df2.imshow(warp3, plotnum=(2,4,3), fignum=2)
#df2.imshow(warp4, plotnum=(2,4,4), fignum=2)
#df2.imshow(warp5, plotnum=(2,4,5), fignum=2)
#df2.imshow(warp6, plotnum=(2,4,6), fignum=2)
#df2.imshow(warp6, plotnum=(2,4,7), fignum=2)
#df2.imshow(warp6, plotnum=(2,4,8), fignum=2)
warp1 = warp_image(subchip, H)
warp2 = warp_image(subchip, H2)
#warp3 = warp_image(subchip, H3)
#warp4 = warp_image(subchip, H, True)
#warp5 = warp_image(subchip, H2, True)
#warp6 = warp_image(subchip, H3, True)
#H = np.eye(3)
#print('rchip.shape = %r', (rchip.shape,))
#print('subchip.shape = %r', (subchip.shape,))
#print('warp_subchip.shape = %r', (warp_subchip.shape,))
# WARP
M = H1
img = subchip
def warp_border(img, M):
#print('------------')
#print('M = \n%r ' % (M,))
#print('img.shape=%r ' % (img.shape,))
#print('img.max()=%r, img.min()=%r ' % (img.max(), img.min()))
#print('img.dtype=%r ' % (img.dtype,))
coord_list = np.array(border_coordinates(img))
coord_homog = homogonize(coord_list)
Mcoord_homog = M.dot(coord_homog)
Mcoord_list = np.vstack((Mcoord_homog[0] / Mcoord_homog[-1],
Mcoord_homog[1] / Mcoord_homog[-1])).T
#print('coord_list: ')
#print(coord_list.T)
#print('Mcoord_list: ')
#print(Mcoord_list.T)
Mxywh = border2_xywh(Mcoord_list)
#print('Mxywh')
#print(Mxywh)
#print('------------')
#print('Rchip warp test')
#print('subchip warp test')
#warp_border(subchip, A)
#print target_scale_factor(rchip1, H)
#print target_scale_factor(subchip, A)
print('-----')
print('rchip.shape = %r' % (rchip.shape,))
print('(y,x) = (%.2f, %.2f)' % (y,x))
print('scale = %.4f' % scale)
print('radius = %.4f' % radius)
print('-----')
print('subchip = rchip[%d:%d, %d:%d]' % (iy1, iy2, ix1, ix2))
print('subchip.shape = %r' % (map(int,subchip.shape),))
print('-----')
| 36.168919 | 100 | 0.606389 |
5895763c362cd081e47c5e50f6798fb8e73ef3ce | 2,211 | py | Python | contrib/runners/noop_runner/tests/unit/test_nooprunner.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | 1 | 2020-11-09T21:05:33.000Z | 2020-11-09T21:05:33.000Z | contrib/runners/noop_runner/tests/unit/test_nooprunner.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | 3 | 2021-03-25T23:57:10.000Z | 2021-03-26T00:01:05.000Z | contrib/runners/noop_runner/tests/unit/test_nooprunner.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | null | null | null | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import uuid
import mock
import st2tests.config as tests_config
tests_config.parse_args()
from unittest2 import TestCase
from st2common.constants import action as action_constants
from st2tests.fixturesloader import FixturesLoader
from noop_runner import noop_runner
class TestNoopRunner(TestCase):
fixtures_loader = FixturesLoader()
def test_noop_command_executes(self):
models = TestNoopRunner.fixtures_loader.load_models(
fixtures_pack='generic', fixtures_dict={'actions': ['noop.yaml']})
action_db = models['actions']['noop.yaml']
runner = TestNoopRunner._get_runner(action_db)
status, result, _ = runner.run({})
self.assertEquals(status, action_constants.LIVEACTION_STATUS_SUCCEEDED)
self.assertEquals(result['failed'], False)
self.assertEquals(result['succeeded'], True)
self.assertEquals(result['return_code'], 0)
@staticmethod
def _get_runner(action_db):
runner = noop_runner.NoopRunner(uuid.uuid4().hex)
runner.action = action_db
runner.action_name = action_db.name
runner.liveaction_id = uuid.uuid4().hex
runner.entry_point = None
runner.context = dict()
runner.callback = dict()
runner.libs_dir_path = None
runner.auth_token = mock.Mock()
runner.auth_token.token = 'mock-token'
return runner
| 36.85 | 79 | 0.729986 |
df67fcbfa9e9debdbcedf11f7e1325833c136189 | 6,190 | py | Python | venv/lib/python3.6/site-packages/django_celery_beat/admin.py | slarkjm0803/autobets | f92a5d999acaf5d7c83ca2768a260c2282eabbee | [
"MIT"
] | 1 | 2020-03-06T14:29:12.000Z | 2020-03-06T14:29:12.000Z | venv/lib/python3.6/site-packages/django_celery_beat/admin.py | slarkjm0803/autobets | f92a5d999acaf5d7c83ca2768a260c2282eabbee | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/django_celery_beat/admin.py | slarkjm0803/autobets | f92a5d999acaf5d7c83ca2768a260c2282eabbee | [
"MIT"
] | null | null | null | """Periodic Task Admin interface."""
from __future__ import absolute_import, unicode_literals
from django import forms
from django.conf import settings
from django.contrib import admin
from django.forms.widgets import Select
from django.template.defaultfilters import pluralize
from django.utils.translation import ugettext_lazy as _
from celery import current_app
from celery.utils import cached_property
from kombu.utils.json import loads
from .models import (
PeriodicTask, PeriodicTasks,
IntervalSchedule, CrontabSchedule,
SolarSchedule
)
from .utils import is_database_scheduler
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text # noqa
class TaskSelectWidget(Select):
"""Widget that lets you choose between task names."""
celery_app = current_app
_choices = None
def tasks_as_choices(self):
_ = self._modules # noqa
tasks = list(sorted(name for name in self.celery_app.tasks
if not name.startswith('celery.')))
return (('', ''), ) + tuple(zip(tasks, tasks))
@property
def choices(self):
if self._choices is None:
self._choices = self.tasks_as_choices()
return self._choices
@choices.setter
def choices(self, _):
# ChoiceField.__init__ sets ``self.choices = choices``
# which would override ours.
pass
@cached_property
def _modules(self):
self.celery_app.loader.import_default_modules()
class TaskChoiceField(forms.ChoiceField):
"""Field that lets you choose between task names."""
widget = TaskSelectWidget
def valid_value(self, value):
return True
class PeriodicTaskForm(forms.ModelForm):
"""Form that lets you create and modify periodic tasks."""
regtask = TaskChoiceField(
label=_('Task (registered)'),
required=False,
)
task = forms.CharField(
label=_('Task (custom)'),
required=False,
max_length=200,
)
class Meta:
"""Form metadata."""
model = PeriodicTask
exclude = ()
def clean(self):
data = super(PeriodicTaskForm, self).clean()
regtask = data.get('regtask')
if regtask:
data['task'] = regtask
if not data['task']:
exc = forms.ValidationError(_('Need name of task'))
self._errors['task'] = self.error_class(exc.messages)
raise exc
return data
def _clean_json(self, field):
value = self.cleaned_data[field]
try:
loads(value)
except ValueError as exc:
raise forms.ValidationError(
_('Unable to parse JSON: %s') % exc,
)
return value
def clean_args(self):
return self._clean_json('args')
def clean_kwargs(self):
return self._clean_json('kwargs')
class PeriodicTaskAdmin(admin.ModelAdmin):
"""Admin-interface for periodic tasks."""
form = PeriodicTaskForm
model = PeriodicTask
celery_app = current_app
list_display = ('__str__', 'enabled')
actions = ('enable_tasks', 'disable_tasks', 'run_tasks')
fieldsets = (
(None, {
'fields': ('name', 'regtask', 'task', 'enabled'),
'classes': ('extrapretty', 'wide'),
}),
('Schedule', {
'fields': ('interval', 'crontab', 'solar'),
'classes': ('extrapretty', 'wide', ),
}),
('Arguments', {
'fields': ('args', 'kwargs'),
'classes': ('extrapretty', 'wide', 'collapse', 'in'),
}),
('Execution Options', {
'fields': ('expires', 'queue', 'exchange', 'routing_key'),
'classes': ('extrapretty', 'wide', 'collapse', 'in'),
}),
)
def changelist_view(self, request, extra_context=None):
extra_context = extra_context or {}
scheduler = getattr(settings, 'CELERY_BEAT_SCHEDULER', None)
extra_context['wrong_scheduler'] = not is_database_scheduler(scheduler)
return super(PeriodicTaskAdmin, self).changelist_view(
request, extra_context)
def get_queryset(self, request):
qs = super(PeriodicTaskAdmin, self).get_queryset(request)
return qs.select_related('interval', 'crontab', 'solar')
def enable_tasks(self, request, queryset):
rows_updated = queryset.update(enabled=True)
PeriodicTasks.update_changed()
self.message_user(
request,
_('{0} task{1} {2} successfully enabled').format(
rows_updated,
pluralize(rows_updated),
pluralize(rows_updated, _('was,were')),
),
)
enable_tasks.short_description = _('Enable selected tasks')
def disable_tasks(self, request, queryset):
rows_updated = queryset.update(enabled=False)
PeriodicTasks.update_changed()
self.message_user(
request,
_('{0} task{1} {2} successfully disabled').format(
rows_updated,
pluralize(rows_updated),
pluralize(rows_updated, _('was,were')),
),
)
disable_tasks.short_description = _('Disable selected tasks')
def run_tasks(self, request, queryset):
self.celery_app.loader.import_default_modules()
tasks = [(self.celery_app.tasks.get(task.task),
loads(task.args),
loads(task.kwargs))
for task in queryset]
task_ids = [task.delay(*args, **kwargs)
for task, args, kwargs in tasks]
tasks_run = len(task_ids)
self.message_user(
request,
_('{0} task{1} {2} successfully run').format(
tasks_run,
pluralize(tasks_run),
pluralize(tasks_run, _('was,were')),
),
)
run_tasks.short_description = _('Run selected tasks')
admin.site.register(IntervalSchedule)
admin.site.register(CrontabSchedule)
admin.site.register(SolarSchedule)
admin.site.register(PeriodicTask, PeriodicTaskAdmin)
| 30.79602 | 79 | 0.609208 |
35b7bfe3d156fa75af95a311843a98ffe77d6d90 | 686 | py | Python | src/pgassets/common/pgCheckbox.py | Blackdevil132/machineLearning | de048bb1473994052f8ed1afb11a15b7833b506d | [
"MIT"
] | 1 | 2019-05-04T07:28:19.000Z | 2019-05-04T07:28:19.000Z | src/pgassets/common/pgCheckbox.py | Blackdevil132/machineLearning | de048bb1473994052f8ed1afb11a15b7833b506d | [
"MIT"
] | 3 | 2019-04-29T09:20:11.000Z | 2019-04-29T09:23:22.000Z | src/pgassets/common/pgCheckbox.py | Blackdevil132/machineLearning | de048bb1473994052f8ed1afb11a15b7833b506d | [
"MIT"
] | null | null | null | import pygame
class pgCheckbox:
def __init__(self, pos, size, color):
self.rect = pygame.Rect(*pos, *size)
self.color = color
self.checked = False
def collidepoint(self, pos):
return self.rect.collidepoint(*pos)
def update_status(self):
self.checked = (self.checked + 1) % 2
def get_status(self):
return self.checked
def draw(self, screen):
if self.checked:
pygame.draw.line(screen, self.color, self.rect.topleft, self.rect.bottomright, 5)
pygame.draw.line(screen, self.color, self.rect.bottomleft, self.rect.topright, 5)
pygame.draw.rect(screen, (0, 0, 0), self.rect, 2)
| 28.583333 | 93 | 0.623907 |
7515d921fc2d701110bec0448beac610dedb611d | 8,883 | py | Python | conans/assets/templates/new_v2_cmake.py | andriyor/conan | 0f98c1e915ebeb25fafd3bd4ad247cef3efdc16f | [
"MIT"
] | null | null | null | conans/assets/templates/new_v2_cmake.py | andriyor/conan | 0f98c1e915ebeb25fafd3bd4ad247cef3efdc16f | [
"MIT"
] | null | null | null | conans/assets/templates/new_v2_cmake.py | andriyor/conan | 0f98c1e915ebeb25fafd3bd4ad247cef3efdc16f | [
"MIT"
] | null | null | null | conanfile_sources_v2 = """from conans import ConanFile
from conan.tools.cmake import CMakeToolchain, CMake, cmake_layout
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
# Optional metadata
license = "<Put the package license here>"
author = "<Put your name here> <And your email here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of {package_name} here>"
topics = ("<Put some tag here>", "<here>", "<and here>")
# Binary configuration
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False], "fPIC": [True, False]}}
default_options = {{"shared": False, "fPIC": True}}
# Sources are located in the same place as this recipe, copy them to the recipe
exports_sources = "CMakeLists.txt", "src/*", "include/*"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def layout(self):
cmake_layout(self)
def generate(self):
tc = CMakeToolchain(self)
tc.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
cmake = CMake(self)
cmake.install()
def package_info(self):
self.cpp_info.libs = ["{name}"]
"""
test_conanfile_v2 = """import os
from conans import ConanFile, tools
from conan.tools.cmake import CMake, cmake_layout
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
# VirtualBuildEnv and VirtualRunEnv can be avoided if "tools.env.virtualenv:auto_use" is defined
# (it will be defined in Conan 2.0)
generators = "CMakeDeps", "CMakeToolchain", "VirtualBuildEnv", "VirtualRunEnv"
apply_env = False
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def layout(self):
cmake_layout(self)
def test(self):
if not tools.cross_building(self):
cmd = os.path.join(self.cpp.build.bindirs[0], "example")
self.run(cmd, env="conanrun")
"""
test_cmake_v2 = """cmake_minimum_required(VERSION 3.15)
project(PackageTest CXX)
find_package({name} CONFIG REQUIRED)
add_executable(example src/example.cpp)
target_link_libraries(example {name}::{name})
"""
cmake_v2 = """cmake_minimum_required(VERSION 3.15)
project({name} CXX)
add_library({name} src/{name}.cpp)
target_include_directories({name} PUBLIC include)
set_target_properties({name} PROPERTIES PUBLIC_HEADER "include/{name}.h")
install(TARGETS {name} DESTINATION "."
PUBLIC_HEADER DESTINATION include
RUNTIME DESTINATION bin
ARCHIVE DESTINATION lib
LIBRARY DESTINATION lib
)
"""
source_h = """#pragma once
#ifdef _WIN32
#define {name}_EXPORT __declspec(dllexport)
#else
#define {name}_EXPORT
#endif
{name}_EXPORT void {name}();
"""
source_cpp = r"""#include <iostream>
#include "{name}.h"
void {name}(){{
#ifdef NDEBUG
std::cout << "{name}/{version}: Hello World Release!\n";
#else
std::cout << "{name}/{version}: Hello World Debug!\n";
#endif
// ARCHITECTURES
#ifdef _M_X64
std::cout << " {name}/{version}: _M_X64 defined\n";
#endif
#ifdef _M_IX86
std::cout << " {name}/{version}: _M_IX86 defined\n";
#endif
#if __i386__
std::cout << " {name}/{version}: __i386__ defined\n";
#endif
#if __x86_64__
std::cout << " {name}/{version}: __x86_64__ defined\n";
#endif
// Libstdc++
#if defined _GLIBCXX_USE_CXX11_ABI
std::cout << " {name}/{version}: _GLIBCXX_USE_CXX11_ABI "<< _GLIBCXX_USE_CXX11_ABI << "\n";
#endif
// COMPILER VERSIONS
#if _MSC_VER
std::cout << " {name}/{version}: _MSC_VER" << _MSC_VER<< "\n";
#endif
#if _MSVC_LANG
std::cout << " {name}/{version}: _MSVC_LANG" << _MSVC_LANG<< "\n";
#endif
#if __cplusplus
std::cout << " {name}/{version}: __cplusplus" << __cplusplus<< "\n";
#endif
#if __INTEL_COMPILER
std::cout << " {name}/{version}: __INTEL_COMPILER" << __INTEL_COMPILER<< "\n";
#endif
#if __GNUC__
std::cout << " {name}/{version}: __GNUC__" << __GNUC__<< "\n";
#endif
#if __GNUC_MINOR__
std::cout << " {name}/{version}: __GNUC_MINOR__" << __GNUC_MINOR__<< "\n";
#endif
#if __clang_major__
std::cout << " {name}/{version}: __clang_major__" << __clang_major__<< "\n";
#endif
#if __clang_minor__
std::cout << " {name}/{version}: __clang_minor__" << __clang_minor__<< "\n";
#endif
#if __apple_build_version__
std::cout << " {name}/{version}: __apple_build_version__" << __apple_build_version__<< "\n";
#endif
// SUBSYSTEMS
#if __MSYS__
std::cout << " {name}/{version}: __MSYS__" << __MSYS__<< "\n";
#endif
#if __MINGW32__
std::cout << " {name}/{version}: __MINGW32__" << __MINGW32__<< "\n";
#endif
#if __MINGW64__
std::cout << " {name}/{version}: __MINGW64__" << __MINGW64__<< "\n";
#endif
#if __CYGWIN__
std::cout << " {name}/{version}: __CYGWIN__" << __CYGWIN__<< "\n";
#endif
}}
"""
test_main = """#include "{name}.h"
int main() {{
{name}();
}}
"""
def get_cmake_lib_files(name, version, package_name="Pkg"):
files = {"conanfile.py": conanfile_sources_v2.format(name=name, version=version,
package_name=package_name),
"src/{}.cpp".format(name): source_cpp.format(name=name, version=version),
"include/{}.h".format(name): source_h.format(name=name, version=version),
"CMakeLists.txt": cmake_v2.format(name=name, version=version),
"test_package/conanfile.py": test_conanfile_v2.format(name=name,
version=version,
package_name=package_name),
"test_package/src/example.cpp": test_main.format(name=name),
"test_package/CMakeLists.txt": test_cmake_v2.format(name=name)}
return files
conanfile_exe = """from conans import ConanFile
from conan.tools.cmake import CMakeToolchain, CMake, cmake_layout
class {package_name}Conan(ConanFile):
name = "{name}"
version = "{version}"
# Optional metadata
license = "<Put the package license here>"
author = "<Put your name here> <And your email here>"
url = "<Package recipe repository url here, for issues about the package>"
description = "<Description of {package_name} here>"
topics = ("<Put some tag here>", "<here>", "<and here>")
# Binary configuration
settings = "os", "compiler", "build_type", "arch"
# Sources are located in the same place as this recipe, copy them to the recipe
exports_sources = "CMakeLists.txt", "src/*", "include/*"
def layout(self):
cmake_layout(self)
def generate(self):
tc = CMakeToolchain(self)
tc.generate()
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
cmake = CMake(self)
cmake.install()
"""
cmake_exe_v2 = """cmake_minimum_required(VERSION 3.15)
project({name} CXX)
add_executable({name} src/{name}.cpp src/main.cpp)
target_include_directories({name} PUBLIC include)
install(TARGETS {name} DESTINATION "."
RUNTIME DESTINATION bin
ARCHIVE DESTINATION lib
LIBRARY DESTINATION lib
)
"""
test_conanfile_exe_v2 = """import os
from conans import ConanFile, tools
class {package_name}TestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
# VirtualRunEnv can be avoided if "tools.env.virtualenv:auto_use" is defined
# (it will be defined in Conan 2.0)
generators = "VirtualRunEnv"
apply_env = False
def test(self):
if not tools.cross_building(self):
self.run("{name}", env="conanrun")
"""
def get_cmake_exe_files(name, version, package_name="Pkg"):
files = {"conanfile.py": conanfile_exe.format(name=name, version=version,
package_name=package_name),
"src/{}.cpp".format(name): source_cpp.format(name=name, version=version),
"include/{}.h".format(name): source_h.format(name=name, version=version),
"src/main.cpp": test_main.format(name=name),
"CMakeLists.txt": cmake_exe_v2.format(name=name, version=version),
"test_package/conanfile.py": test_conanfile_exe_v2.format(name=name,
version=version,
package_name=package_name)
}
return files
| 28.934853 | 100 | 0.61083 |
be0028f22dd600c72d7c18a32550a58f15d4c511 | 3,062 | py | Python | sage/datasets.py | senehasubramaniyan/SAGE_ | c9966f8c0e2a4d137bed6fdd7bf7ff49167e0872 | [
"MIT"
] | null | null | null | sage/datasets.py | senehasubramaniyan/SAGE_ | c9966f8c0e2a4d137bed6fdd7bf7ff49167e0872 | [
"MIT"
] | null | null | null | sage/datasets.py | senehasubramaniyan/SAGE_ | c9966f8c0e2a4d137bed6fdd7bf7ff49167e0872 | [
"MIT"
] | null | null | null | import os
import pandas as pd
github_data_url = 'https://github.com/senehasubramaniyan/SAGE_/tree/main/data/'
def airbnb():
'''
Airbnb listing data from Kaggle.
Located at: https://www.kaggle.com/dgomonov/new-york-city-airbnb-open-data
'''
path = os.path.join(github_data_url, 'AB_NYC_2019.csv')
df = pd.read_table(path, sep=',', header=0, index_col=None)
# Type conversions
df['name'] = df['name'].astype(str)
df['host_name'] = df['host_name'].astype(str)
df['last_review'] = pd.to_datetime(df['last_review'])
return df
def bank():
'''
Bank marketing data from UCI dataset repository.
Located at: https://archive.ics.uci.edu/ml/datasets/bank+marketing
'''
columns = [
'Age', 'Job', 'Marital', 'Education', 'Default', 'Balance', 'Housing',
'Loan', 'Contact', 'Day', 'Month', 'Duration', 'Campaign', 'Prev Days',
'Prev Contacts', 'Prev Outcome', 'Success']
path = os.path.join(github_data_url, 'bank-full.csv')
df = pd.read_table(path, sep=';', header=None, index_col=None, skiprows=1,
names=columns)
# Convert label.
df['Success'] = (df['Success'] == 'yes')
return df
def Malware():
columns = ['c1','c2','c3','c4','c5','c6','c7','c8','c9','c10','c11','c12','c13','c14','c15','c16']
path = os.path.join(github_data_url, 'Malware.csv')
df = pd.read_table(df = pd.read_table(path, sep=',', header=0, index_col=None,names=columns))
return df
def bike():
'''
Bike sharing dataset from Kaggle competition.
Located at: https://www.kaggle.com/c/bike-sharing-demand
'''
path = os.path.join(github_data_url, 'bike.csv')
df = pd.read_table(path, sep=',', header=0, index_col=None)
columns = df.columns.tolist()
# Split and remove datetime column.
df['datetime'] = pd.to_datetime(df['datetime'])
df['year'] = df['datetime'].dt.year
df['month'] = df['datetime'].dt.month
df['day'] = df['datetime'].dt.day
df['hour'] = df['datetime'].dt.hour
df = df.drop('datetime', axis=1)
# Reorder and rename columns.
df = df[['year', 'month', 'day', 'hour'] + columns[1:]]
df.columns = list(map(str.title, df.columns))
return df
def credit():
'''
German credit quality dataset from UCI dataset repository.
Located at: https://archive.ics.uci.edu/ml/datasets/South+German+Credit+%28UPDATE%29
'''
columns = [
'Checking Status', 'Duration', 'Credit History', 'Purpose',
'Credit Amount', 'Savings Account/Bonds', 'Employment Since',
'Installment Rate', 'Personal Status', 'Debtors/Guarantors',
'Residence Duration', 'Property Type', 'Age',
'Other Installment Plans', 'Housing Ownership',
'Number Existing Credits', 'Job', 'Number Liable', 'Telephone',
'Foreign Worker', 'Good Customer'
]
path = os.path.join(github_data_url, 'SouthGermanCredit.asc')
return pd.read_table(path, sep=' ', header=None, index_col=None,
names=columns, skiprows=1)
| 34.022222 | 102 | 0.619203 |
4ca1690cbbb87284e415aed21f10bc53fee81bb7 | 19,279 | py | Python | pywps/app/Service.py | ldesousa/PyWPS | d2fcde33d52e2da7d0ddaf76931f1275ae444e6d | [
"MIT"
] | null | null | null | pywps/app/Service.py | ldesousa/PyWPS | d2fcde33d52e2da7d0ddaf76931f1275ae444e6d | [
"MIT"
] | null | null | null | pywps/app/Service.py | ldesousa/PyWPS | d2fcde33d52e2da7d0ddaf76931f1275ae444e6d | [
"MIT"
] | null | null | null | ##################################################################
# Copyright 2016 OSGeo Foundation, #
# represented by PyWPS Project Steering Committee, #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import logging
import tempfile
from werkzeug.exceptions import HTTPException
from werkzeug.wrappers import Request, Response
from pywps import WPS, OWS
from pywps._compat import PY2
from pywps._compat import urlopen
from pywps._compat import urlparse
from pywps.app.basic import xml_response
from pywps.app.WPSRequest import WPSRequest
import pywps.configuration as config
from pywps.exceptions import MissingParameterValue, NoApplicableCode, InvalidParameterValue, FileSizeExceeded, \
StorageNotSupported, FileURLNotSupported
from pywps.inout.inputs import ComplexInput, LiteralInput, BoundingBoxInput
from pywps.dblog import log_request, update_response
from pywps import response
from collections import deque, OrderedDict
import os
import sys
import uuid
import copy
import requests
import shutil
LOGGER = logging.getLogger("PYWPS")
class Service(object):
""" The top-level object that represents a WPS service. It's a WSGI
application.
:param processes: A list of :class:`~Process` objects that are
provided by this service.
:param cfgfiles: A list of configuration files
"""
def __init__(self, processes=[], cfgfiles=None):
# ordered dict of processes
self.processes = OrderedDict((p.identifier, p) for p in processes)
if cfgfiles:
config.load_configuration(cfgfiles)
if config.get_config_value('logging', 'file') and config.get_config_value('logging', 'level'):
LOGGER.setLevel(getattr(logging, config.get_config_value('logging', 'level')))
fh = logging.FileHandler(config.get_config_value('logging', 'file'))
fh.setFormatter(logging.Formatter(config.get_config_value('logging', 'format')))
LOGGER.addHandler(fh)
else: # NullHandler | StreamHandler
LOGGER.addHandler(logging.NullHandler())
def get_capabilities(self, wps_request, uuid):
response_cls = response.get_response("capabilities")
return response_cls(wps_request, uuid, processes=self.processes)
def describe(self, wps_request, uuid, identifiers):
response_cls = response.get_response("describe")
return response_cls(wps_request, uuid, processes=self.processes,
identifiers=identifiers)
def execute(self, identifier, wps_request, uuid):
"""Parse and perform Execute WPS request call
:param identifier: process identifier string
:param wps_request: pywps.WPSRequest structure with parsed inputs, still in memory
:param uuid: string identifier of the request
"""
self._set_grass()
try:
process = self.processes[identifier]
# make deep copy of the process instace
# so that processes are not overriding each other
# just for execute
process = copy.deepcopy(process)
workdir = os.path.abspath(config.get_config_value('server', 'workdir'))
tempdir = tempfile.mkdtemp(prefix='pywps_process_', dir=workdir)
process.set_workdir(tempdir)
except KeyError:
raise InvalidParameterValue("Unknown process '%r'" % identifier, 'Identifier')
return self._parse_and_execute(process, wps_request, uuid)
def _parse_and_execute(self, process, wps_request, uuid):
"""Parse and execute request
"""
LOGGER.debug('Checking if all mandatory inputs have been passed')
data_inputs = {}
for inpt in process.inputs:
# Replace the dicts with the dict of Literal/Complex inputs
# set the input to the type defined in the process.
request_inputs = None
if inpt.identifier in wps_request.inputs:
request_inputs = wps_request.inputs[inpt.identifier]
if not request_inputs:
if inpt.data_set:
data_inputs[inpt.identifier] = [inpt.clone()]
else:
if isinstance(inpt, ComplexInput):
data_inputs[inpt.identifier] = self.create_complex_inputs(
inpt, request_inputs)
elif isinstance(inpt, LiteralInput):
data_inputs[inpt.identifier] = self.create_literal_inputs(
inpt, request_inputs)
elif isinstance(inpt, BoundingBoxInput):
data_inputs[inpt.identifier] = self.create_bbox_inputs(
inpt, request_inputs)
for inpt in process.inputs:
if inpt.identifier not in data_inputs:
if inpt.min_occurs > 0:
LOGGER.error('Missing parameter value: %s', inpt.identifier)
raise MissingParameterValue(
inpt.identifier, inpt.identifier)
wps_request.inputs = data_inputs
# set as_reference to True for all the outputs specified as reference
# if the output is not required to be raw
if not wps_request.raw:
for wps_outpt in wps_request.outputs:
is_reference = wps_request.outputs[
wps_outpt].get('asReference', 'false')
if is_reference.lower() == 'true':
# check if store is supported
if process.store_supported == 'false':
raise StorageNotSupported(
'The storage of data is not supported for this process.')
is_reference = True
else:
is_reference = False
for outpt in process.outputs:
if outpt.identifier == wps_outpt:
outpt.as_reference = is_reference
# catch error generated by process code
try:
wps_response = process.execute(wps_request, uuid)
except Exception as e:
e_follow = e
if not isinstance(e, NoApplicableCode):
e_follow = NoApplicableCode('Service error: %s' % e)
if wps_request.raw:
resp = Response(e_follow.get_body(), mimetype='application/xml')
resp.call_on_close(process.clean)
return resp
else:
raise e_follow
# get the specified output as raw
if wps_request.raw:
for outpt in wps_request.outputs:
for proc_outpt in process.outputs:
if outpt == proc_outpt.identifier:
return Response(proc_outpt.data)
# if the specified identifier was not found raise error
raise InvalidParameterValue('')
return wps_response
def _get_complex_input_handler(self, href):
"""Return function for parsing and storing complexdata
:param href: href object yes or not
"""
def href_handler(complexinput, datain):
"""<wps:Reference /> handler"""
# save the reference input in workdir
tmp_file = _build_input_file_name(
href=datain.get('href'),
workdir=complexinput.workdir,
extension=_extension(complexinput))
try:
reference_file = _openurl(datain)
data_size = reference_file.headers.get('Content-Length', 0)
except Exception as e:
raise NoApplicableCode('File reference error: %s' % e)
# if the response did not return a 'Content-Length' header then
# calculate the size
if data_size == 0:
LOGGER.debug('no Content-Length, calculating size')
# check if input file size was not exceeded
complexinput.calculate_max_input_size()
max_byte_size = complexinput.max_size * 1024 * 1024
if int(data_size) > int(max_byte_size):
raise FileSizeExceeded('File size for input exceeded.'
' Maximum allowed: %i megabytes' %
complexinput.max_size, complexinput.identifier)
try:
with open(tmp_file, 'wb') as f:
data_size = 0
for chunk in reference_file.iter_content(chunk_size=1024):
data_size += len(chunk)
if int(data_size) > int(max_byte_size):
raise FileSizeExceeded('File size for input exceeded.'
' Maximum allowed: %i megabytes' %
complexinput.max_size, complexinput.identifier)
f.write(chunk)
except Exception as e:
raise NoApplicableCode(e)
complexinput.file = tmp_file
complexinput.url = datain.get('href')
complexinput.as_reference = True
def file_handler(complexinput, datain):
"""<wps:Reference /> handler.
Used when href is a file url."""
# check if file url is allowed
_validate_file_input(href=datain.get('href'))
# save the file reference input in workdir
tmp_file = _build_input_file_name(
href=datain.get('href'),
workdir=complexinput.workdir,
extension=_extension(complexinput))
try:
inpt_file = urlparse(datain.get('href')).path
inpt_file = os.path.abspath(inpt_file)
os.symlink(inpt_file, tmp_file)
LOGGER.debug("Linked input file %s to %s.", inpt_file, tmp_file)
except Exception as e:
# TODO: handle os.symlink on windows
# raise NoApplicableCode("Could not link file reference: %s" % e)
LOGGER.warn("Could not link file reference")
shutil.copy2(inpt_file, tmp_file)
complexinput.file = tmp_file
complexinput.url = datain.get('href')
complexinput.as_reference = True
def data_handler(complexinput, datain):
"""<wps:Data> ... </wps:Data> handler"""
complexinput.data = datain.get('data')
if href:
if urlparse(href).scheme == 'file':
return file_handler
else:
return href_handler
else:
return data_handler
def create_complex_inputs(self, source, inputs):
"""Create new ComplexInput as clone of original ComplexInput
because of inputs can be more then one, take it just as Prototype
:return collections.deque:
"""
outinputs = deque(maxlen=source.max_occurs)
for inpt in inputs:
data_input = source.clone()
frmt = data_input.supported_formats[0]
if 'mimeType' in inpt:
if inpt['mimeType']:
frmt = data_input.get_format(inpt['mimeType'])
else:
frmt = data_input.data_format
if frmt:
data_input.data_format = frmt
else:
raise InvalidParameterValue(
'Invalid mimeType value %s for input %s' %
(inpt.get('mimeType'), source.identifier),
'mimeType')
data_input.method = inpt.get('method', 'GET')
# get the referenced input otherwise get the value of the field
href = inpt.get('href', None)
complex_data_handler = self._get_complex_input_handler(href)
complex_data_handler(data_input, inpt)
outinputs.append(data_input)
if len(outinputs) < source.min_occurs:
raise MissingParameterValue(description="Given data input is missing", locator=source.identifier)
return outinputs
def create_literal_inputs(self, source, inputs):
""" Takes the http_request and parses the input to objects
:return collections.deque:
"""
outinputs = deque(maxlen=source.max_occurs)
for inpt in inputs:
newinpt = source.clone()
# set the input to the type defined in the process
newinpt.uom = inpt.get('uom')
data_type = inpt.get('datatype')
if data_type:
newinpt.data_type = data_type
# get the value of the field
newinpt.data = inpt.get('data')
outinputs.append(newinpt)
if len(outinputs) < source.min_occurs:
raise MissingParameterValue(locator=source.identifier)
return outinputs
def _set_grass(self):
"""Set environment variables needed for GRASS GIS support
"""
if not PY2:
LOGGER.debug('Python3 is not supported by GRASS')
return
gisbase = config.get_config_value('grass', 'gisbase')
if gisbase and os.path.isdir(gisbase):
LOGGER.debug('GRASS GISBASE set to %s' % gisbase)
os.environ['GISBASE'] = gisbase
os.environ['LD_LIBRARY_PATH'] = '{}:{}'.format(
os.environ.get('LD_LIBRARY_PATH'),
os.path.join(gisbase, 'lib'))
os.putenv('LD_LIBRARY_PATH', os.environ.get('LD_LIBRARY_PATH'))
os.environ['PATH'] = '{}:{}:{}'.format(
os.environ.get('PATH'),
os.path.join(gisbase, 'bin'),
os.path.join(gisbase, 'scripts'))
os.putenv('PATH', os.environ.get('PATH'))
python_path = os.path.join(gisbase, 'etc', 'python')
os.environ['PYTHONPATH'] = '{}:{}'.format(os.environ.get('PYTHONPATH'),
python_path)
os.putenv('PYTHONPATH', os.environ.get('PYTHONPATH'))
sys.path.insert(0, python_path)
def create_bbox_inputs(self, source, inputs):
""" Takes the http_request and parses the input to objects
:return collections.deque:
"""
outinputs = deque(maxlen=source.max_occurs)
for datainput in inputs:
newinpt = source.clone()
newinpt.data = [datainput.minx, datainput.miny,
datainput.maxx, datainput.maxy]
outinputs.append(newinpt)
if len(outinputs) < source.min_occurs:
raise MissingParameterValue(
description='Number of inputs is lower than minium required number of inputs',
locator=source.identifier)
return outinputs
@Request.application
def __call__(self, http_request):
request_uuid = uuid.uuid1()
environ_cfg = http_request.environ.get('PYWPS_CFG')
if 'PYWPS_CFG' not in os.environ and environ_cfg:
LOGGER.debug('Setting PYWPS_CFG to %s', environ_cfg)
os.environ['PYWPS_CFG'] = environ_cfg
try:
wps_request = WPSRequest(http_request)
LOGGER.info('Request: %s', wps_request.operation)
if wps_request.operation in ['getcapabilities',
'describeprocess',
'execute']:
log_request(request_uuid, wps_request)
response = None
if wps_request.operation == 'getcapabilities':
response = self.get_capabilities(wps_request, request_uuid)
elif wps_request.operation == 'describeprocess':
response = self.describe(wps_request, request_uuid, wps_request.identifiers)
elif wps_request.operation == 'execute':
response = self.execute(
wps_request.identifier,
wps_request,
request_uuid
)
update_response(request_uuid, response, close=True)
return response
else:
update_response(request_uuid, response, close=True)
raise RuntimeError("Unknown operation %r"
% wps_request.operation)
except HTTPException as e:
# transform HTTPException to OWS NoApplicableCode exception
if not isinstance(e, NoApplicableCode):
e = NoApplicableCode(e.description, code=e.code)
class FakeResponse:
message = e.locator
status = e.code
status_percentage = 100
try:
update_response(request_uuid, FakeResponse, close=True)
except NoApplicableCode as e:
return e
return e
except Exception as e:
e = NoApplicableCode("No applicable error code, please check error log", code=500)
return e
def _openurl(inpt):
"""use requests to open given href
"""
data = None
reference_file = None
href = inpt.get('href')
LOGGER.debug('Fetching URL %s', href)
if inpt.get('method') == 'POST':
if 'body' in inpt:
data = inpt.get('body')
elif 'bodyreference' in inpt:
data = requests.get(url=inpt.get('bodyreference')).text
reference_file = requests.post(url=href, data=data, stream=True)
else:
reference_file = requests.get(url=href, stream=True)
return reference_file
def _build_input_file_name(href, workdir, extension=None):
href = href or ''
url_path = urlparse(href).path or ''
file_name = os.path.basename(url_path).strip() or 'input'
(prefix, suffix) = os.path.splitext(file_name)
suffix = suffix or extension or ''
if prefix and suffix:
file_name = prefix + suffix
input_file_name = os.path.join(workdir, file_name)
# build tempfile in case of duplicates
if os.path.exists(input_file_name):
input_file_name = tempfile.mkstemp(
suffix=suffix, prefix=prefix + '_',
dir=workdir)[1]
return input_file_name
def _validate_file_input(href):
href = href or ''
parsed_url = urlparse(href)
if parsed_url.scheme != 'file':
raise FileURLNotSupported('Invalid URL scheme')
file_path = parsed_url.path
if not file_path:
raise FileURLNotSupported('Invalid URL path')
file_path = os.path.abspath(file_path)
# build allowed paths list
inputpaths = config.get_config_value('server', 'allowedinputpaths')
allowed_paths = [os.path.abspath(p.strip()) for p in inputpaths.split(':') if p.strip()]
for allowed_path in allowed_paths:
if file_path.startswith(allowed_path):
LOGGER.debug("Accepted file url as input.")
return
raise FileURLNotSupported()
def _extension(complexinput):
extension = None
if complexinput.data_format:
extension = complexinput.data_format.extension
return extension
| 38.251984 | 112 | 0.584885 |
19815471371a51f60197f974ce1c5f836081c09d | 449 | py | Python | util.py | nonk123/music_downloader | 4aed5b2ffe3ec2f270a90b7ef30d24ce12ad4724 | [
"MIT"
] | null | null | null | util.py | nonk123/music_downloader | 4aed5b2ffe3ec2f270a90b7ef30d24ce12ad4724 | [
"MIT"
] | null | null | null | util.py | nonk123/music_downloader | 4aed5b2ffe3ec2f270a90b7ef30d24ce12ad4724 | [
"MIT"
] | null | null | null | import os.path
def ensure_dir_exists(path):
d = path if os.path.isdir(path) else os.path.split(path)[0]
if not os.path.exists(d):
os.makedirs(d, 0o775)
return path
def split_path(path):
parts = os.path.split(path)
if "" in parts:
return []
elif parts[0] in ("", "/"):
return [parts[1]]
elif parts[1] == "":
return [parts[0]]
else:
return split_path(parts[0]) + [parts[1]]
| 20.409091 | 63 | 0.561247 |
6f500b87efaec7708e4bbfaf869654a496a40fcb | 11,262 | py | Python | src/isna/cli.py | frostidaho/python-isna | 4f65ee626550caf55f3ad5ce23fac1e49d363178 | [
"BSD-2-Clause"
] | null | null | null | src/isna/cli.py | frostidaho/python-isna | 4f65ee626550caf55f3ad5ce23fac1e49d363178 | [
"BSD-2-Clause"
] | null | null | null | src/isna/cli.py | frostidaho/python-isna | 4f65ee626550caf55f3ad5ce23fac1e49d363178 | [
"BSD-2-Clause"
] | null | null | null | """isna.cli -- The main cli script for isna.
cli.main() is called by cli2.main()
cli2.py is just a small script which loads cli.py
cli2.py just parses the sys.argv using docopt and sends them to cli.main()
They are split like this so that printing out the program's
usage & help statements occur quickly.
"""
import os
from schema import Schema, And, Or, Use, SchemaError, Regex
from collections import namedtuple, ChainMap
from isna.config import cfg
DEBUG = False
def dprint(*pargs, **kwargs):
"print function - if the global DEBUG variable is set"
if DEBUG:
from sys import stderr
print('ISNA:\t', *(x.replace('\n', '\n\t') for x in map(str, pargs)),
flush=True, file=stderr, **kwargs)
def dpprint(*pargs, **kwargs):
"pretty print function - if the global DEBUG variable is set"
if DEBUG:
from sys import stderr
from pprint import pformat
allargs = '\n'.join(x for x in map(pformat, pargs))
print('ISNA:\t', allargs.replace('\n', '\n\t'), flush=True,
file=stderr, **kwargs)
def uniq(iterable):
"Yield uniqe elements"
seen = set()
for item in iterable:
if item not in seen:
seen.add(item)
yield item
def get_templ_dirs(templ_dirs, default=cfg['templ_dirs']):
"""Return template directories, including any default directories.
Duplicates will be removed.
Each directory is either a string path '/some/template/dir'
or a tuple like ('python.module', 'templatefolder')
"""
total = []
if templ_dirs is not None:
total.extend(templ_dirs)
total.extend(default)
return list(uniq(total))
class Validate:
"""Validate arguments/options given by docopt
TODO: Replace Transform class by moving all of its transformations
here.
"""
err_msg = 'Validation failed for {key!r} with data {data!r}'
def __init__(self, d_args):
"""Validate arguments/options given by docopt
d_args is the dictionary of options & arguments made by docopt
It also does some transformation of the data.
"""
self.data = d_args
for k, v in self.schema.items():
try:
self.data[k] = v.validate(self.data[k])
except SchemaError as e:
msg = self.err_msg.format(key=k, data=self.data[k])
raise ValueError(msg) from e
@property
def schema(self):
try:
return self._schema
except AttributeError:
d = {
'--ssh': self._schema_ssh(),
'--dir': self._schema_dir(),
'TEMPLATE': self._schema_template(),
'--vars': self._schema_vars(),
}
self._schema = {k: Schema(v) for k, v in d.items()}
return self._schema
def _schema_ssh(self):
cuser = r'[a-zA-Z_]'
chost = r'[a-zA-Z0-9_\.]'
cport = r'[0-9]{1,5}'
re_pats = (
r'^{user}+@{host}+$'.format(user=cuser, host=chost),
r'^{user}+@{host}+:{port}$'.format(user=cuser, host=chost, port=cport),
r'^{host}+$'.format(host=chost),
r'^{host}+:{port}$'.format(host=chost, port=cport),
)
regexes = [Regex(v) for v in re_pats]
return Or(type(None), *regexes)
def _schema_dir(self):
return [os.path.isdir]
def _schema_vars(self):
from isna.util import dict_from_str
return Or(None, And(Use(dict_from_str), dict))
def _schema_template(self):
td = get_templ_dirs(self.data['--dir'])
from isna.playbook import get_env
env = get_env(*td)
def is_template(name):
if name in env.list_templates(cfg['templ_ext']):
return True
return False
return [Or(os.path.isfile, is_template)]
_tr_ssh = namedtuple('_tr_ssh', 'user host port')
_tr_templs = namedtuple('_tr_templs', 'name dir')
class Transform:
"Rename option & arg keys, and transform their values"
names = {
'--ssh': 'ssh',
'--sudo': 'sudo',
'--domain': 'domain',
'--dir': 'templ_dirs',
'TEMPLATE': 'templs',
'--vars': 'exvars',
'vars': 'ls_vars',
'hosts': 'ls_hosts',
'temp': 'ls_temp',
}
def __init__(self, d_args):
d_args.pop('ls', None)
dat = {}
self.data = dat
for k, v in d_args.items():
if k in self.names:
dat[self.names[k]] = v
else:
dat[k] = v
self.raw_dat = self.data.copy()
for k, v in dat.items():
try:
dat[k] = getattr(self, k)
except AttributeError:
pass
@property
def ssh(self):
return self._tr_ssh(self.raw_dat['ssh'])
@staticmethod
def _tr_ssh(ssharg):
if not ssharg:
return _tr_ssh(None, None, None)
import re
pat_user = r'(?:^(?P<user>[a-zA-Z_]\w+)@)?'
pat_host = r'(?P<host>[a-zA-Z0-9_\.]+)'
pat_port = r'(?:\:(?P<port>[0-9]{1,5}$))?'
pat = pat_user + pat_host + pat_port
x = re.search(pat, ssharg).groupdict()
port = x.get('port', None)
if port is not None:
x['port'] = int(port)
return _tr_ssh(**x)
@property
def templs(self):
return self._tr_templs(self.raw_dat['templs'])
@staticmethod
def _tr_templs(templs):
total = []
for template in templs:
if os.path.isfile(template):
tpath = os.path.realpath(template)
total.append(_tr_templs(os.path.basename(tpath), os.path.dirname(tpath)))
else:
total.append(_tr_templs(template, None))
return total
@property
def templ_dirs(self):
dirs = [x.dir for x in self.templs if x.dir]
dirs.extend(self.raw_dat['templ_dirs'])
return self._tr_templ_dirs(dirs)
@staticmethod
def _tr_templ_dirs(templ_dirs, default=cfg['templ_dirs']):
return get_templ_dirs(templ_dirs, default=default)
def main(debug=False, **kwargs):
if debug:
global DEBUG
DEBUG = True
dprint('docopt produced the args:\n', kwargs)
val = Validate(kwargs)
tr = Transform(val.data)
dat = tr.data
dpprint('validated & transformed args are:', dat)
ls = [k for k, v in dat.items() if k.startswith('ls_') and v]
for func in ls:
print(*globals()[func](**dat), sep='\n', flush=True)
return 0
runner = Runner(**dat)
return runner.run()
class Runner:
def __init__(self, **kwargs):
from isna.query import InputQuery
self.kwargs = kwargs
self.dirs = kwargs['templ_dirs']
exvars = kwargs['exvars']
exvars = exvars if exvars else {}
self.exvars = exvars
self.inpq = InputQuery()
@property
def templates(self):
return [x.name for x in self.kwargs['templs']]
def run(self):
tdirs = [x for x in self.kwargs['templ_dirs']]
from isna.playbook import PBMaker, AnsiblePlaybook
pbm = PBMaker(*tdirs)
pbm.update(self.template_vars)
avars = self.get_ansible_vars()
for name in self.templates:
dprint('Running playbook', name)
txt = pbm.render(name)
dprint(txt)
with AnsiblePlaybook(txt, self.host_list, **avars) as apb:
out = apb.run()
return out.returncode
def get_ansible_vars(self):
sudo = self.kwargs['sudo']
ssh = self.kwargs['ssh']
ansivars = ChainMap(self.inpq.data, self.exvars)
ansivars = {k: v for k, v in ansivars.items() if k not in self.template_vars}
from isna.playbook import AnsibleArgs
ansivars.update(AnsibleArgs.from_ssh(**ssh._asdict()))
ansivars.update(AnsibleArgs.from_sudo(sudo))
from isna.util import NeedsPass
if ssh.host is not None:
dprint('Testing ssh connection without password')
res = NeedsPass.ssh(
user=ansivars['ansible_user'],
hostname=ssh.host,
port=ansivars['ansible_port'],
sudo=sudo,
)
dprint('SSH test results:\n', res)
if res.ssh_needs_pw and ('ansible_ssh_pass' not in ansivars):
passtupl = self.inpq('ansible_ssh_pass', hide=True)
ansivars[passtupl.var] = passtupl.result
if res.sudo_needs_pw and ('ansible_become_pass' not in ansivars):
passtupl = self.inpq('ansible_become_pass', hide=True)
ansivars[passtupl.var] = passtupl.result
elif sudo and ('ansible_become_pass' not in ansivars):
dprint('Testing sudo without password')
res = NeedsPass.sudo(user=sudo)
dprint('Sudo test results:\n', res)
if res.sudo_needs_pw:
passtupl = self.inpq('ansible_become_pass', hide=True)
ansivars[passtupl.var] = passtupl.result
dprint('Ansible --extra-vars:\n{!r}'.format(ansivars))
return ansivars
@property
def template_vars(self):
try:
return self._template_vars
except AttributeError:
remaining = set(self.all_templ_vars) - set(self.exvars)
remaining = remaining - set(self.inpq.data)
for var in remaining:
if any(x in var for x in cfg['pass_substrs']):
self.inpq(var, hide=True, repeat=True)
else:
self.inpq(var)
tvs = ChainMap(self.inpq.data, self.exvars)
from isna.util import maybe_bool
tvs = {k: maybe_bool(tvs[k]) for k in self.all_templ_vars}
self._template_vars = tvs
dprint('Undefined template vars:\n', self._template_vars)
return self._template_vars
@property
def host_list(self):
ssh = self.kwargs['ssh']
return [ssh.host] if ssh.host else [cfg['default_host']]
@property
def all_templ_vars(self):
try:
return self._all_templ_vars
except AttributeError:
self._all_templ_vars = ls_vars(**self.kwargs)
dprint('All template vars:\n', self._all_templ_vars)
return self._all_templ_vars
def ls_hosts(**kwargs):
hosts = ['localhost']
msg = 'Searching on the avahi domain {!r} for other hosts'
dprint(msg.format(kwargs['domain']))
from isna.util import get_hosts
hosts.extend(get_hosts(domain=kwargs['domain']))
return [x for x in hosts if x]
def ls_temp(**kwargs):
templ_ext = cfg['templ_ext']
templ_dirs = kwargs['templ_dirs']
msg = 'Listing playbook templates ending w/ {!r} in {!r}'
dprint(msg.format(templ_ext, templ_dirs))
from isna.playbook import PBMaker
pbm = PBMaker(*templ_dirs)
return pbm.list_templates(templ_ext)
def ls_vars(**kwargs):
from isna.playbook import PBMaker
pbm = PBMaker(*kwargs['templ_dirs'])
tnames = kwargs['templs']
all_vars = []
for x in tnames:
all_vars.extend(pbm.all_vars(x.name))
return sorted(uniq(all_vars))
| 32.08547 | 89 | 0.581424 |
ea7ab7d1a9035e5d55d29f93c2f299054da1733e | 3,334 | py | Python | perfect_match/models/benchmarks/jobs_benchmark.py | nec-db-ml/perfect_match | 3f6c5642c02420ae676c5e7b94bb4d716ed3b5d9 | [
"MIT"
] | 99 | 2018-10-31T22:41:11.000Z | 2022-03-31T03:07:12.000Z | perfect_match/models/benchmarks/jobs_benchmark.py | nec-db-ml/perfect_match | 3f6c5642c02420ae676c5e7b94bb4d716ed3b5d9 | [
"MIT"
] | 8 | 2019-05-23T06:46:12.000Z | 2022-02-10T00:00:51.000Z | perfect_match/models/benchmarks/jobs_benchmark.py | nec-db-ml/perfect_match | 3f6c5642c02420ae676c5e7b94bb4d716ed3b5d9 | [
"MIT"
] | 21 | 2018-12-06T06:02:46.000Z | 2022-03-10T03:08:52.000Z | """
Copyright (C) 2018 Patrick Schwab, ETH Zurich
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import math
import numpy as np
from perfect_match.data_access.jobs.data_access import DataAccess
class JobsBenchmark(object):
def __init__(self, data_dir,
**kwargs):
data_dir = kwargs["output_directory"]
self.data_access = DataAccess(data_dir, kwargs["seed"], kwargs["experiment_index"])
self.assignment_cache = {}
self.assign_counterfactuals = False
self.num_treatments = 2
self.seed = kwargs["seed"]
self.random_generator = None
@staticmethod
def get_db_file_name():
return DataAccess.DB_FILE_NAME
def filter(self, patients):
return patients
def set_assign_counterfactuals(self, value):
self.assign_counterfactuals = value
def get_num_treatments(self):
return self.num_treatments
def get_data_access(self):
return self.data_access
def get_input_shapes(self, args):
return (17,)
def get_output_shapes(self, args):
return (1,)
def initialise(self, args):
data_dir = args["output_directory"]
self.data_access = DataAccess(data_dir, args["seed"], args["experiment_index"])
self.random_generator = np.random.RandomState(self.seed)
def fit(self, generator, steps, batch_size):
pass
def get_assignment(self, id, x):
if id not in self.assignment_cache:
assigned_treatment, assigned_y = self._assign(id)
self.assignment_cache[id] = assigned_treatment, assigned_y
assigned_treatment, assigned_y = self.assignment_cache[id]
if self.assign_counterfactuals:
return assigned_treatment, assigned_y
else:
return assigned_treatment, assigned_y[assigned_treatment]
@staticmethod
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def _assign(self, id):
treatment_chosen = self.data_access.get_row(DataAccess.TABLE_JOBS, id, columns="t")[0]
y = np.array(self.data_access.get_row(DataAccess.TABLE_JOBS, id, columns="y0,y1"))
# We do not have counterfactual outcomes in this experiment.
if treatment_chosen == 0:
y = [y[0], None]
else:
y = [None, y[1]]
return treatment_chosen, y
| 37.044444 | 111 | 0.70006 |
1b5c023d6142eb9ab132be886f1c00f2115ad4c9 | 11,624 | py | Python | intersight/model/virtualization_vmware_distributed_switch_list.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/virtualization_vmware_distributed_switch_list.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/virtualization_vmware_distributed_switch_list.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.mo_base_response import MoBaseResponse
from intersight.model.virtualization_vmware_distributed_switch import VirtualizationVmwareDistributedSwitch
from intersight.model.virtualization_vmware_distributed_switch_list_all_of import VirtualizationVmwareDistributedSwitchListAllOf
globals()['MoBaseResponse'] = MoBaseResponse
globals()['VirtualizationVmwareDistributedSwitch'] = VirtualizationVmwareDistributedSwitch
globals()['VirtualizationVmwareDistributedSwitchListAllOf'] = VirtualizationVmwareDistributedSwitchListAllOf
class VirtualizationVmwareDistributedSwitchList(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'object_type': (str,), # noqa: E501
'count': (int,), # noqa: E501
'results': ([VirtualizationVmwareDistributedSwitch], none_type,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'object_type': val}
attribute_map = {
'object_type': 'ObjectType', # noqa: E501
'count': 'Count', # noqa: E501
'results': 'Results', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, object_type, *args, **kwargs): # noqa: E501
"""VirtualizationVmwareDistributedSwitchList - a model defined in OpenAPI
Args:
object_type (str): A discriminator value to disambiguate the schema of a HTTP GET response body.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
count (int): The total number of 'virtualization.VmwareDistributedSwitch' resources matching the request, accross all pages. The 'Count' attribute is included when the HTTP GET request includes the '$inlinecount' parameter.. [optional] # noqa: E501
results ([VirtualizationVmwareDistributedSwitch], none_type): The array of 'virtualization.VmwareDistributedSwitch' resources matching the request.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseResponse,
VirtualizationVmwareDistributedSwitchListAllOf,
],
'oneOf': [
],
}
| 48.635983 | 1,678 | 0.646507 |
488724e80ce3c4e46dc48263dc17c1bd91b5887a | 10,804 | py | Python | lesson7.4/tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py | magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda | cc226deb7b46852407900f9fec0caf62638defe2 | [
"MIT"
] | 21 | 2018-12-11T20:07:47.000Z | 2021-11-08T13:12:32.000Z | lesson7.4/tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py | magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda | cc226deb7b46852407900f9fec0caf62638defe2 | [
"MIT"
] | 3 | 2020-09-26T01:09:47.000Z | 2022-02-10T02:12:08.000Z | lesson7.4/tensorflow/contrib/distributions/python/ops/vector_sinh_arcsinh_diag.py | magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda | cc226deb7b46852407900f9fec0caf62638defe2 | [
"MIT"
] | 15 | 2018-12-12T02:32:28.000Z | 2021-11-05T20:40:10.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SinhArcsinh transformation of a distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
__all__ = [
"VectorSinhArcsinhDiag",
]
class VectorSinhArcsinhDiag(transformed_distribution.TransformedDistribution):
"""The (diagonal) SinhArcsinh transformation of a distribution on `R^k`.
This distribution models a random vector `Y = (Y1,...,Yk)`, making use of
a `SinhArcsinh` transformation (which has adjustable tailweight and skew),
a rescaling, and a shift.
The `SinhArcsinh` transformation of the Normal is described in great depth in
[Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865).
Here we use a slightly different parameterization, in terms of `tailweight`
and `skewness`. Additionally we allow for distributions other than Normal,
and control over `scale` as well as a "shift" parameter `loc`.
#### Mathematical Details
Given iid random vector `Z = (Z1,...,Zk)`, we define the VectorSinhArcsinhDiag
transformation of `Z`, `Y`, parameterized by
`(loc, scale, skewness, tailweight)`, via the relation (with `@` denoting
matrix multiplication):
```
Y := loc + scale @ F(Z) * (2 / F(2))
F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
```
This distribution is similar to the location-scale transformation
`L(Z) := loc + scale @ Z` in the following ways:
* If `skewness = 0` and `tailweight = 1` (the defaults), `F(Z) = Z`, and then
`Y = L(Z)` exactly.
* `loc` is used in both to shift the result by a constant factor.
* Our definition of `C` ensures that
`P[Y - loc <= 2 * scale] = P[L(Z) - loc <= 2 * scale]`.
Thus it can be said that the weights in the tails of `Y` and `L(Z)` beyond
`loc + 2 * scale` are the same.
This distribution is different than `loc + scale @ Z` due to the
reshaping done by `F`:
* Positive (negative) `skewness` leads to positive (negative) skew.
* positive skew means, the mode of `F(Z)` is "tilted" to the right.
* positive skew means positive values of `F(Z)` become more likely, and
negative values become less likely.
* Larger (smaller) `tailweight` leads to fatter (thinner) tails.
* Fatter tails mean larger values of `|F(Z)|` become more likely.
* `tailweight < 1` leads to a distribution that is "flat" around `Y = loc`,
and a very steep drop-off in the tails.
* `tailweight > 1` leads to a distribution more peaked at the mode with
heavier tails.
To see the argument about the tails, note that for `|Z| >> 1` and
`|Z| >> (|skewness| * tailweight)**tailweight`, we have
`Y approx 0.5 Z**tailweight e**(sign(Z) skewness * tailweight)`.
To see the argument about `C` and quantiles, note that
```
P[(Y - loc) / scale <= 2] = P[F(Z) <= 2 * scale / C]
= P[Z <= F^{-1}(2 * scale / C)]
= P[Z <= 2].
```
"""
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
skewness=None,
tailweight=None,
distribution=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalLinearOperator"):
"""Construct VectorSinhArcsinhDiag distribution on `R^k`.
The arguments `scale_diag` and `scale_identity_multiplier` combine to
define the diagonal `scale` referred to in this class docstring:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scale-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scale
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale`
is the `Identity`.
skewness: Skewness parameter. floating-point `Tensor` with shape
broadcastable with `event_shape`.
tailweight: Tailweight parameter. floating-point `Tensor` with shape
broadcastable with `event_shape`.
distribution: `tf.Distribution`-like instance. Distribution from which `k`
iid samples are used as input to transformation `F`. Default is
`ds.Normal(0., 1.)`.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
a VectorSinhArcsinhDiag sample and `distribution` is not
`FULLY_REPARAMETERIZED` yet is a function of trainable variables, then
the gradient will be incorrect!
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = locals()
with ops.name_scope(
name,
values=[
loc, scale_diag, scale_identity_multiplier, skewness, tailweight
]):
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
tailweight = 1. if tailweight is None else tailweight
skewness = 0. if skewness is None else skewness
# Recall, with Z a random variable,
# Y := loc + C * F(Z),
# F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
# C := 2 * scale / F(2)
# Construct shapes and 'scale' out of the scale_* and loc kwargs.
# scale_linop is only an intermediary to:
# 1. get shapes from looking at loc and the two scale args.
# 2. combine scale_diag with scale_identity_multiplier, which gives us
# 'scale', which in turn gives us 'C'.
scale_linop = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale_linop)
# scale_linop.diag_part() is efficient since it is a diag type linop.
scale_diag_part = scale_linop.diag_part()
dtype = scale_diag_part.dtype
if distribution is None:
distribution = normal.Normal(
loc=array_ops.zeros([], dtype=dtype),
scale=array_ops.ones([], dtype=dtype),
allow_nan_stats=allow_nan_stats)
else:
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
scale_diag_part = control_flow_ops.with_dependencies(
asserts, scale_diag_part)
# Make the SAS bijector, 'F'.
skewness = ops.convert_to_tensor(skewness, dtype=dtype, name="skewness")
tailweight = ops.convert_to_tensor(
tailweight, dtype=dtype, name="tailweight")
f = bijectors.SinhArcsinh(
skewness=skewness, tailweight=tailweight, event_ndims=1)
# Make the Affine bijector, Z --> loc + C * Z.
c = 2 * scale_diag_part / f.forward(ops.convert_to_tensor(2, dtype=dtype))
affine = bijectors.Affine(
shift=loc, scale_diag=c, validate_args=validate_args, event_ndims=1)
bijector = bijectors.Chain([affine, f])
super(VectorSinhArcsinhDiag, self).__init__(
distribution=distribution,
bijector=bijector,
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
self._loc = loc
self._scale = scale_linop
self._tailweight = tailweight
self._skewness = skewness
@property
def loc(self):
"""The `loc` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._loc
@property
def scale(self):
"""The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._scale
@property
def tailweight(self):
"""Controls the tail decay. `tailweight > 1` means faster than Normal."""
return self._tailweight
@property
def skewness(self):
"""Controls the skewness. `Skewness > 0` means right skew."""
return self._skewness
| 42.203125 | 80 | 0.663643 |
a1ab90f0e706f07fc8bd37579c6529bc411f1a14 | 1,395 | py | Python | libtb/tbsyslog/__init__.py | MelonSmasher/turkey-bite | b21bf82076e7c00c3ae74fc9d36761f0960cbbaf | [
"MIT"
] | 4 | 2020-08-20T02:22:07.000Z | 2020-08-20T14:27:52.000Z | libtb/tbsyslog/__init__.py | MelonSmasher/turkey-bite | b21bf82076e7c00c3ae74fc9d36761f0960cbbaf | [
"MIT"
] | null | null | null | libtb/tbsyslog/__init__.py | MelonSmasher/turkey-bite | b21bf82076e7c00c3ae74fc9d36761f0960cbbaf | [
"MIT"
] | 1 | 2020-08-20T08:41:50.000Z | 2020-08-20T08:41:50.000Z | import socket
class Facility:
"""Syslog facilities"""
KERN, USER, MAIL, DAEMON, AUTH, SYSLOG, \
LPR, NEWS, UUCP, CRON, AUTHPRIV, FTP = range(12)
LOCAL0, LOCAL1, LOCAL2, LOCAL3, \
LOCAL4, LOCAL5, LOCAL6, LOCAL7 = range(16, 24)
class Level:
"""Syslog levels"""
EMERG, ALERT, CRIT, ERR, \
WARNING, NOTICE, INFO, DEBUG = range(8)
class Syslog:
"""A syslog client that logs to a remote server.
Example:
>>> log = Syslog(host="foobar.example")
>>> log.send("hello", Level.WARNING)
"""
def __init__(self,
host="localhost",
port=514,
facility=Facility.DAEMON):
self.host = host
self.port = port
self.facility = facility
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def send(self, message, level):
"""Send a syslog message to remote host using UDP."""
data = "<%d>%s" % (level + self.facility * 8, message)
self.socket.sendto(data.encode(), (self.host, self.port))
def warn(self, message):
"""Send a syslog warning message."""
self.send(message, Level.WARNING)
def notice(self, message):
"""Send a syslog notice message."""
self.send(message, Level.NOTICE)
def error(self, message):
"""Send a syslog error message."""
self.send(message, Level.ERR) | 27.9 | 70 | 0.58853 |
f91c113384ffc0788112309708614567e8b91c7e | 10,691 | py | Python | yolov3/train_bottleneck.py | lyk19940625/WorkControl | 7b59f98fa5d9824262b391aa41240bd88841d682 | [
"Apache-2.0"
] | 70 | 2019-07-22T10:45:26.000Z | 2022-03-24T12:57:59.000Z | yolov3/train_bottleneck.py | lyk19940625/WorkControl | 7b59f98fa5d9824262b391aa41240bd88841d682 | [
"Apache-2.0"
] | 3 | 2020-02-04T08:14:32.000Z | 2021-04-19T10:45:03.000Z | yolov3/train_bottleneck.py | lyk19940625/WorkControl | 7b59f98fa5d9824262b391aa41240bd88841d682 | [
"Apache-2.0"
] | 27 | 2019-08-25T09:23:55.000Z | 2021-08-24T03:14:45.000Z | """
Retrain the YOLO model for your own dataset.
"""
import os
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def _main():
annotation_path = '2007_train.txt'
log_dir = 'logs/000/'
classes_path = 'model_data/coco_classes.txt'
anchors_path = 'model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
input_shape = (416,416) # multiple of 32, hw
model, bottleneck_model, last_layer_model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path='model_data/yolo_weights.h5') # make sure you know what you freeze
logging = TensorBoard(log_dir=log_dir)
checkpoint = ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
val_split = 0.1
with open(annotation_path) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines)*val_split)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
if True:
# perform bottleneck training
if not os.path.isfile("bottlenecks.npz"):
print("calculating bottlenecks")
batch_size=8
bottlenecks=bottleneck_model.predict_generator(data_generator_wrapper(lines, batch_size, input_shape, anchors, num_classes, random=False, verbose=True),
steps=(len(lines)//batch_size)+1, max_queue_size=1)
np.savez("bottlenecks.npz", bot0=bottlenecks[0], bot1=bottlenecks[1], bot2=bottlenecks[2])
# load bottleneck features from file
dict_bot=np.load("bottlenecks.npz")
bottlenecks_train=[dict_bot["bot0"][:num_train], dict_bot["bot1"][:num_train], dict_bot["bot2"][:num_train]]
bottlenecks_val=[dict_bot["bot0"][num_train:], dict_bot["bot1"][num_train:], dict_bot["bot2"][num_train:]]
# train last layers with fixed bottleneck features
batch_size=8
print("Training last layers with bottleneck features")
print('with {} samples, val on {} samples and batch size {}.'.format(num_train, num_val, batch_size))
last_layer_model.compile(optimizer='adam', loss={'yolo_loss': lambda y_true, y_pred: y_pred})
last_layer_model.fit_generator(bottleneck_generator(lines[:num_train], batch_size, input_shape, anchors, num_classes, bottlenecks_train),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=bottleneck_generator(lines[num_train:], batch_size, input_shape, anchors, num_classes, bottlenecks_val),
validation_steps=max(1, num_val//batch_size),
epochs=30,
initial_epoch=0, max_queue_size=1)
model.save_weights(log_dir + 'trained_weights_stage_0.h5')
# train last layers with random augmented data
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 16
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=50,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(log_dir + 'trained_weights_stage_1.h5')
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all of the layers.')
batch_size = 4 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=100,
initial_epoch=50,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(log_dir + 'trained_weights_final.h5')
# Further training if needed.
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
# get output of second last layers and create bottleneck model of it
out1=model_body.layers[246].output
out2=model_body.layers[247].output
out3=model_body.layers[248].output
bottleneck_model = Model([model_body.input, *y_true], [out1, out2, out3])
# create last layer model of last layers from yolo model
in0 = Input(shape=bottleneck_model.output[0].shape[1:].as_list())
in1 = Input(shape=bottleneck_model.output[1].shape[1:].as_list())
in2 = Input(shape=bottleneck_model.output[2].shape[1:].as_list())
last_out0=model_body.layers[249](in0)
last_out1=model_body.layers[250](in1)
last_out2=model_body.layers[251](in2)
model_last=Model(inputs=[in0, in1, in2], outputs=[last_out0, last_out1, last_out2])
model_loss_last =Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_last.output, *y_true])
last_layer_model = Model([in0,in1,in2, *y_true], model_loss_last)
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model, bottleneck_model, last_layer_model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, random=True, verbose=False):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0 and random:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=random)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
if verbose:
print("Progress: ",i,"/",n)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes, random=True, verbose=False):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, random, verbose)
def bottleneck_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, bottlenecks):
n = len(annotation_lines)
i = 0
while True:
box_data = []
b0=np.zeros((batch_size,bottlenecks[0].shape[1],bottlenecks[0].shape[2],bottlenecks[0].shape[3]))
b1=np.zeros((batch_size,bottlenecks[1].shape[1],bottlenecks[1].shape[2],bottlenecks[1].shape[3]))
b2=np.zeros((batch_size,bottlenecks[2].shape[1],bottlenecks[2].shape[2],bottlenecks[2].shape[3]))
for b in range(batch_size):
_, box = get_random_data(annotation_lines[i], input_shape, random=False, proc_img=False)
box_data.append(box)
b0[b]=bottlenecks[0][i]
b1[b]=bottlenecks[1][i]
b2[b]=bottlenecks[2][i]
i = (i+1) % n
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [b0, b1, b2, *y_true], np.zeros(batch_size)
if __name__ == '__main__':
_main()
| 47.941704 | 164 | 0.677953 |
6abb5a4fb69de661e34d4b02fe88cb593be9ad4c | 454 | py | Python | RecoBTag/SecondaryVertex/python/negativeCombinedSecondaryVertexV2Computer_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoBTag/SecondaryVertex/python/negativeCombinedSecondaryVertexV2Computer_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoBTag/SecondaryVertex/python/negativeCombinedSecondaryVertexV2Computer_cfi.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
from RecoBTag.SecondaryVertex.combinedSecondaryVertexV2Computer_cfi import *
negativeCombinedSecondaryVertexV2Computer = combinedSecondaryVertexV2Computer.clone(
vertexFlip = True,
trackFlip = True,
trackSelection = dict(sip3dSigMax = 0),
trackPseudoSelection = dict(sip3dSigMax = 0,
sip2dSigMin = -99999.9,
sip2dSigMax = -2.0)
)
| 34.923077 | 84 | 0.676211 |
f19ef97b8a5e83958a75f464314ea06cb647cc25 | 4,240 | py | Python | mars/tensor/random/rayleigh.py | sighingnow/mars | c7897fbd144d230fff5edabc1494fb3ff44aa0d2 | [
"Apache-2.0"
] | null | null | null | mars/tensor/random/rayleigh.py | sighingnow/mars | c7897fbd144d230fff5edabc1494fb3ff44aa0d2 | [
"Apache-2.0"
] | null | null | null | mars/tensor/random/rayleigh.py | sighingnow/mars | c7897fbd144d230fff5edabc1494fb3ff44aa0d2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ...serialize import AnyField
from .core import TensorRandomOperandMixin, handle_array, TensorDistribution
class TensorRayleigh(TensorDistribution, TensorRandomOperandMixin):
__slots__ = '_scale', '_size'
_input_fields_ = ['_scale']
_op_type_ = OperandDef.RAND_RAYLEIGH
_scale = AnyField('scale')
_func_name = 'rayleigh'
def __init__(self, size=None, state=None, dtype=None, gpu=None, **kw):
dtype = np.dtype(dtype) if dtype is not None else dtype
super(TensorRayleigh, self).__init__(_size=size, _state=state, _dtype=dtype,
_gpu=gpu, **kw)
@property
def scale(self):
return self._scale
def __call__(self, scale, chunk_size=None):
return self.new_tensor([scale], None, raw_chunk_size=chunk_size)
def rayleigh(random_state, scale=1.0, size=None, chunk_size=None, gpu=None, dtype=None):
r"""
Draw samples from a Rayleigh distribution.
The :math:`\chi` and Weibull distributions are generalizations of the
Rayleigh.
Parameters
----------
scale : float or array_like of floats, optional
Scale, also equals the mode. Should be >= 0. Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``scale`` is a scalar. Otherwise,
``mt.array(scale).size`` samples are drawn.
chunk_size : int or tuple of int or tuple of ints, optional
Desired chunk size on each dimension
gpu : bool, optional
Allocate the tensor on GPU if True, False as default
dtype : data-type, optional
Data-type of the returned tensor.
Returns
-------
out : Tensor or scalar
Drawn samples from the parameterized Rayleigh distribution.
Notes
-----
The probability density function for the Rayleigh distribution is
.. math:: P(x;scale) = \frac{x}{scale^2}e^{\frac{-x^2}{2 \cdotp scale^2}}
The Rayleigh distribution would arise, for example, if the East
and North components of the wind velocity had identical zero-mean
Gaussian distributions. Then the wind speed would have a Rayleigh
distribution.
References
----------
.. [1] Brighton Webs Ltd., "Rayleigh Distribution,"
http://www.brighton-webs.co.uk/distributions/rayleigh.asp
.. [2] Wikipedia, "Rayleigh distribution"
http://en.wikipedia.org/wiki/Rayleigh_distribution
Examples
--------
Draw values from the distribution and plot the histogram
>>> import matplotlib.pyplot as plt
>>> import mars.tensor as mt
>>> values = plt.hist(mt.random.rayleigh(3, 100000).execute(), bins=200, normed=True)
Wave heights tend to follow a Rayleigh distribution. If the mean wave
height is 1 meter, what fraction of waves are likely to be larger than 3
meters?
>>> meanvalue = 1
>>> modevalue = mt.sqrt(2 / mt.pi) * meanvalue
>>> s = mt.random.rayleigh(modevalue, 1000000)
The percentage of waves larger than 3 meters is:
>>> (100.*mt.sum(s>3)/1000000.).execute()
0.087300000000000003
"""
if dtype is None:
dtype = np.random.RandomState().rayleigh(
handle_array(scale), size=(0,)).dtype
size = random_state._handle_size(size)
op = TensorRayleigh(size=size, state=random_state.to_numpy(), gpu=gpu, dtype=dtype)
return op(scale, chunk_size=chunk_size)
| 35.630252 | 89 | 0.672877 |
6da9a592fac23acb9676d929fe1a165d32fab4a4 | 950 | py | Python | setup.py | shaheen-syed/pygooglenewsscraper | 734f6b9772bb02ca235ff1181756deda4ee6aaa0 | [
"MIT"
] | null | null | null | setup.py | shaheen-syed/pygooglenewsscraper | 734f6b9772bb02ca235ff1181756deda4ee6aaa0 | [
"MIT"
] | null | null | null | setup.py | shaheen-syed/pygooglenewsscraper | 734f6b9772bb02ca235ff1181756deda4ee6aaa0 | [
"MIT"
] | null | null | null | from distutils.core import setup
setup(
name = 'pygooglenewsscraper',
packages = ['pygooglenewsscraper'],
version = '0.1.2',
license = 'MIT',
description = 'Scrape news content from the Google News website',
author = 'Shaheen Syed',
author_email = 'shaheensyed15@gmail.com',
url = 'https://github.com/shaheen-syed',
download_url = 'https://github.com/shaheen-syed/pygooglenewsscraper/archive/refs/tags/v0.1.1.tar.gz',
keywords = ['web scraper', 'google news', 'parser', 'python', 'crawler'],
install_requires=[
'requests',
'trafilatura',
'beautifulsoup4',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
) | 32.758621 | 103 | 0.671579 |
6a0130e515d5971a695b8f4abc8053e8574e36aa | 70,252 | py | Python | pandas/io/tests/test_sql.py | adamgreenhall/pandas | 12a39bb05973573e5fcfcba28c3d25a76737239d | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/io/tests/test_sql.py | adamgreenhall/pandas | 12a39bb05973573e5fcfcba28c3d25a76737239d | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | pandas/io/tests/test_sql.py | adamgreenhall/pandas | 12a39bb05973573e5fcfcba28c3d25a76737239d | [
"PSF-2.0",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | """SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TesySQLLegacyApi`: test the public API with DBAPI connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, the different tested flavors (sqlite3, MySQL, PostgreSQL)
derive from the base class
- Tests for the legacy mode (`TestSQLiteLegacy` and `TestMySQLLegacy`)
"""
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import sys
import nose
import warnings
import numpy as np
from datetime import datetime
from pandas import DataFrame, Series, Index, MultiIndex, isnull
from pandas import date_range, to_datetime, to_timedelta
import pandas.compat as compat
from pandas.compat import StringIO, range, lrange, string_types
from pandas.core.datetools import format as date_format
import pandas.io.sql as sql
import pandas.util.testing as tm
try:
import sqlalchemy
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'mysql': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'postgresql': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s)
"""
},
'read_parameters': {
'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
'mysql': 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
'postgresql': 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s'
},
'read_named_parameters': {
'sqlite': """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
'mysql': """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
'postgresql': """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
"""
}
}
class PandasSQLTest(unittest.TestCase):
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def drop_table(self, table_name):
self._get_exec().execute("DROP TABLE IF EXISTS %s" % table_name)
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
import io
iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with io.open(iris_csv_file, mode='r', newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(dict(A=[4, 1, 3, 6],
B=['asd', 'gsq', 'ylt', 'jkl'],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=['1990-11-22', '1991-10-26', '1993-11-26', '1995-12-12']))
df['E'] = to_datetime(df['E'])
self.test_frame3 = df
def _load_test3_data(self):
columns = ['index', 'A', 'B']
data = [(
'2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670),
('2000-01-04 00:00:00', -29, -0.0412318367011),
('2000-01-05 00:00:00', 20000, 0.731167677815),
('2000-01-06 00:00:00', -290867, 1.56762092543)]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [(
'first', '2000-01-03 00:00:00', 535852800, 10.10, 1, False, 1, False),
('first', '2000-01-04 00:00:00', 1356998400, 10.10, 1, False, None, None)]
for d in data:
self._get_exec().execute(ins, d)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_sql("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS['read_parameters'][self.flavor]
params = ['Iris-setosa', 5.1]
iris_frame = self.pandasSQL.read_sql(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS['read_named_parameters'][self.flavor]
params = {'name': 'Iris-setosa', 'length': 5.1}
iris_frame = self.pandasSQL.read_sql(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_sql('SELECT * FROM test_frame_roundtrip')
result.set_index('level_0', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
#------------------------------------------------------------------------------
#--- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the legacy mode (`TestSQLLegacyApi`).
These tests are run with sqlite3. Specific tests for the different
sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = 'sqlite'
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
with tm.assert_produces_warning(FutureWarning):
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replace')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'),
'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='append')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'),
'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, 'test_frame5',
self.conn, flavor='sqlite', index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype='int64'), name='series')
sql.to_sql(s, "test_series", self.conn, flavor='sqlite', index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_to_sql_panel(self):
panel = tm.makePanel()
self.assertRaises(NotImplementedError, sql.to_sql, panel,
'test_panel', self.conn, flavor='sqlite')
def test_legacy_write_frame(self):
# Assume that functionality is already tested above so just do
# quick check that it basically works
with tm.assert_produces_warning(FutureWarning):
sql.write_frame(self.test_frame1, 'test_frame_legacy', self.conn,
flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'),
'Table not written to DB')
def test_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index('level_0', inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
# Test date parsing in read_sq
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
self.assertFalse(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
index_col='DateCol',
parse_dates=['DateCol', 'IntDateCol'])
self.assertTrue(issubclass(df.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql('test_timedelta', self.conn)
result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)
tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))
def test_to_sql_index_label(self):
temp_frame = DataFrame({'col1': range(4)})
# no index name, defaults to 'index'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
# using the index name
temp_frame.index.name = 'index_name'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index_name',
"Index name not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame({'col1': range(4)},
index=MultiIndex.from_product([('A0', 'A1'), ('B0', 'B1')]))
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'level_0')
self.assertEqual(frame.columns[1], 'level_1')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['A', 'B'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Specified index_labels not written to database")
# using the index name
temp_frame.index.names = ['A', 'B']
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Index names not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['C', 'D'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['C', 'D'],
"Specified index_labels not written to database")
# wrong length of index_label
self.assertRaises(ValueError, sql.to_sql, temp_frame,
'test_index_label', self.conn, if_exists='replace',
index_label='C')
def test_multiindex_roundtrip(self):
df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
columns=['A','B','C'], index=['A','B'])
df.to_sql('test_multiindex_roundtrip', self.conn)
result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
self.conn, index_col=['A','B'])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn,
if_exists='replace')
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite',
con=self.conn)
self.assertTrue('CREATE' in create_sql)
class TestSQLApi(_TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = 'sqlite'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
raise nose.SkipTest('SQLAlchemy not installed')
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
cols = ['A', 'B']
result = sql.read_sql_table('test_frame', self.conn, columns=cols)
self.assertEqual(result.columns.tolist(), cols,
"Columns not correctly selected")
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
result = sql.read_sql_table('test_frame', self.conn, index_col="index")
self.assertEqual(result.index.names, ["index"],
"index_col not correctly set")
result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"],
columns=["C", "D"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
self.assertEqual(result.columns.tolist(), ["C", "D"],
"columns not set correctly whith index_col")
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql(
"SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table('iris', self.conn)
iris_frame2 = sql.read_sql('iris', self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table('other_table', self.conn)
sql.read_sql_query('SELECT * FROM other_table', self.conn)
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for other table")
class TestSQLLegacyApi(_TestSQLApi):
"""
Test the public legacy API
"""
flavor = 'sqlite'
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn,
flavor="sqlite", index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;",
conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
self.assertRaises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b ']) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn,
flavor="sqlite", index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite')
self.assertTrue('CREATE' in create_sql)
def test_tquery(self):
with tm.assert_produces_warning(FutureWarning):
iris_results = sql.tquery("SELECT * FROM iris", con=self.conn)
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_uquery(self):
with tm.assert_produces_warning(FutureWarning):
rows = sql.uquery("SELECT * FROM iris LIMIT 1", con=self.conn)
self.assertEqual(rows, -1)
#------------------------------------------------------------------------------
#--- Database flavor specific tests
class _TestSQLAlchemy(PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor = None
def setUp(self):
self.setup_import()
self.setup_driver()
self.setup_connect()
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
def setup_import(self):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not installed')
def setup_driver(self):
raise NotImplementedError()
def connect(self):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.PandasSQLAlchemy(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
raise nose.SkipTest("Can't connect to {0} server".format(self.flavor))
def tearDown(self):
raise NotImplementedError()
def test_aread_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.PandasSQLAlchemy(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.PandasSQLAlchemy(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
pandasSQL.drop_table('temp_frame')
self.assertFalse(
temp_conn.has_table('temp_frame'), 'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={'i64':[2**62]})
df.to_sql('test_bigint', self.conn, index=False)
result = sql.read_sql_table('test_bigint', self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn, parse_dates={
'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_datetime(self):
if self.driver == 'pymysql':
raise nose.SkipTest('writing datetime not working with pymysql')
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.to_sql('test_datetime', self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
result = result.drop('index', axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
result = result.drop('index', axis=1)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
# status:
# - postgresql: gives error on inserting "0001-255-255T00:00:00"
# - sqlite3: works, but reading it with query returns '-001--1--1 -1:-1:-1.-00001'
if self.driver == 'pymysql':
raise nose.SkipTest('writing datetime not working with pymysql')
if self.driver == 'psycopg2':
raise nose.SkipTest('writing datetime NaT not working with psycopg2')
if self.flavor == 'sqlite':
raise nose.SkipTest('reading datetime NaT not working with sqlite')
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.loc[1, 'A'] = np.nan
df.to_sql('test_datetime', self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'], coerce=True)
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2**25 + 1,dtype=np.int32)
s2 = Series(0.0,dtype=np.float32)
df = DataFrame({'s1': s1, 's2': s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
if self.driver == 'pymysql':
raise nose.SkipTest('writing NaNs not working with pymysql')
# NaNs in numeric float column
df = DataFrame({'A':[0, 1, 2], 'B':[0.2, np.nan, 5.6]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
if self.driver == 'pymysql':
raise nose.SkipTest('writing NaNs not working with pymysql')
# full NaN column (numeric float column)
df = DataFrame({'A':[0, 1, 2], 'B':[np.nan, np.nan, np.nan]})
df.to_sql('test_nan', self.conn, index=False)
if self.flavor == 'sqlite':
df['B'] = df['B'].astype('object')
df['B'] = None
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
if self.driver == 'pymysql':
raise nose.SkipTest('writing NaNs not working with pymysql')
# NaNs in string column
df = DataFrame({'A':[0, 1, 2], 'B':['a', 'b', np.nan]})
df.to_sql('test_nan', self.conn, index=False)
if self.flavor == 'sqlite':
df.loc[2, 'B'] = None
elif self.flavor == 'postgresql':
df = df.fillna('NaN')
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
class TestSQLiteAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
def connect(self):
return sqlalchemy.create_engine('sqlite:///:memory:')
def setup_driver(self):
# sqlite3 is built-in
self.driver = None
def tearDown(self):
# in memory so tables should not be removed explicitly
pass
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# sqlite has no boolean type, so integer type is returned
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Non-native Bool column with NA values stays as float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({'a':[1,2]}, dtype='int64')
df.to_sql('test_bigintwarning', self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table('test_bigintwarning', self.conn)
self.assertEqual(len(w), 0, "Warning triggered for other table")
class TestMySQLAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = 'mysql'
def connect(self):
return sqlalchemy.create_engine(
'mysql+{driver}://root@localhost/pandas_nosetest'.format(driver=self.driver))
def setup_driver(self):
try:
import pymysql
self.driver = 'pymysql'
except ImportError:
raise nose.SkipTest('pymysql not installed')
def tearDown(self):
c = self.conn.execute('SHOW TABLES')
for table in c.fetchall():
self.conn.execute('DROP TABLE %s' % table[0])
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# MySQL has no real BOOL type (it's an alias for TINYINT)
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA = int column with NA values => becomes float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_read_procedure(self):
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({'a': [1, 2, 3], 'b':[0.1, 0.2, 0.3]})
df.to_sql('test_procedure', self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc)
trans.commit()
except:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class TestPostgreSQLAlchemy(_TestSQLAlchemy):
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = 'postgresql'
def connect(self):
return sqlalchemy.create_engine(
'postgresql+{driver}://postgres@localhost/pandas_nosetest'.format(driver=self.driver))
def setup_driver(self):
try:
import psycopg2
self.driver = 'psycopg2'
except ImportError:
raise nose.SkipTest('psycopg2 not installed')
def tearDown(self):
c = self.conn.execute(
"SELECT table_name FROM information_schema.tables"
" WHERE table_schema = 'public'")
for table in c.fetchall():
self.conn.execute("DROP TABLE %s" % table[0])
#------------------------------------------------------------------------------
#--- Test Sqlite / MySQL fallback
class TestSQLiteLegacy(PandasSQLTest):
"""
Test the legacy mode against an in-memory sqlite database.
"""
flavor = 'sqlite'
def connect(self):
return sqlite3.connect(':memory:')
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def setUp(self):
self.conn = self.connect()
self.pandasSQL = sql.PandasSQLLegacy(self.conn, 'sqlite')
self._load_iris_data()
self._load_test1_data()
def test_invalid_flavor(self):
self.assertRaises(
NotImplementedError, sql.PandasSQLLegacy, self.conn, 'oracle')
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.pandasSQL.to_sql(temp_frame, 'drop_test_frame')
self.assertTrue(self.pandasSQL.has_table('drop_test_frame'),
'Table not written to DB')
self.pandasSQL.drop_table('drop_test_frame')
self.assertFalse(self.pandasSQL.has_table('drop_test_frame'),
'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
class TestMySQLLegacy(TestSQLiteLegacy):
"""
Test the legacy mode against a MySQL database.
"""
flavor = 'mysql'
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % table_name)
self.conn.commit()
def _count_rows(self, table_name):
cur = self._get_exec()
cur.execute(
"SELECT count(*) AS count_1 FROM %s" % table_name)
rows = cur.fetchall()
return rows[0][0]
def connect(self):
return self.driver.connect(host='127.0.0.1', user='root', passwd='', db='pandas_nosetest')
def setUp(self):
try:
import pymysql
self.driver = pymysql
except ImportError:
raise nose.SkipTest('pymysql not installed')
try:
self.conn = self.connect()
except self.driver.err.OperationalError:
raise nose.SkipTest("Can't connect to MySQL server")
self.pandasSQL = sql.PandasSQLLegacy(self.conn, 'mysql')
self._load_iris_data()
self._load_test1_data()
def tearDown(self):
c = self.conn.cursor()
c.execute('SHOW TABLES')
for table in c.fetchall():
c.execute('DROP TABLE %s' % table[0])
self.conn.commit()
self.conn.close()
def test_a_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn,
flavor='mysql')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='mysql'),
'Table not written to DB')
#------------------------------------------------------------------------------
#--- Old tests from 0.13.1 (before refactor using sqlalchemy)
_formatters = {
datetime: lambda dt: "'%s'" % date_format(dt),
str: lambda x: "'%s'" % x,
np.str_: lambda x: "'%s'" % x,
compat.text_type: lambda x: "'%s'" % x,
compat.binary_type: lambda x: "'%s'" % x,
float: lambda x: "%.8f" % x,
int: lambda x: "%s" % x,
type(None): lambda x: "NULL",
np.float64: lambda x: "%.10f" % x,
bool: lambda x: "'%s'" % x,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isnull(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def _skip_if_no_pymysql():
try:
import pymysql
except ImportError:
raise nose.SkipTest('pymysql not installed, skipping')
class TestXSQLite(tm.TestCase):
def setUp(self):
self.db = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
create_sql = sql.get_schema(frame, 'test', 'sqlite')
cur = self.db.cursor()
cur.execute(create_sql)
cur = self.db.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.db.commit()
result = sql.read_frame("select * from test", con=self.db)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite')
cur = self.db.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.ix[0]
sql.execute(ins, self.db, params=tuple(row))
self.db.commit()
result = sql.read_frame("select * from test", self.db)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite', keys=['A', 'B'],)
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY (A,B)' in create_sql)
cur = self.db.cursor()
cur.execute(create_sql)
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.db.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.db)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.db)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.db.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
self.db.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.tquery, "select * from test",
con=self.db)
finally:
sys.stdout = sys.__stdout__
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.write_frame(frame, name='test_table', con=self.db)
result = sql.read_frame("select * from test_table", self.db)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
frame2['Idx'] = Index(lrange(len(frame2))) + 10
sql.write_frame(frame2, name='test_table2', con=self.db)
result = sql.read_frame("select * from test_table2", self.db,
index_col='Idx')
expected = frame.copy()
expected.index = Index(lrange(len(frame2))) + 10
expected.index.name = 'Idx'
tm.assert_frame_equal(expected, result)
def test_tquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.db)
result = sql.tquery("select A from test_table", self.db)
expected = frame.A
result = Series(result, frame.index)
tm.assert_series_equal(result, expected)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db, retry=True)
finally:
sys.stdout = sys.__stdout__
def test_uquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.db)
stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)'
self.assertEqual(sql.uquery(stmt, con=self.db), 1)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db,
retry=True)
finally:
sys.stdout = sys.__stdout__
def test_keyword_as_column_names(self):
'''
'''
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.db, name = 'testkeywords')
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df=DataFrame([1 , 2], columns=['c0'])
sql.write_frame(mono_df, con = self.db, name = 'mono_df')
# computing the sum via sql
con_x=self.db
the_sum=sum([my_c0[0] for my_c0 in con_x.execute("select * from mono_df")])
# it should not fail, and gives 3 ( Issue #3628 )
self.assertEqual(the_sum , 3)
result = sql.read_frame("select * from mono_df",con_x)
tm.assert_frame_equal(result,mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
if sql.table_exists(test_table_to_drop, self.db, flavor='sqlite'):
cur = self.db.cursor()
cur.execute("DROP TABLE %s" % test_table_to_drop)
cur.close()
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='sqlite',
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='sqlite', if_exists='fail')
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='sqlite',
if_exists='fail')
# test if_exists='replace'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='sqlite', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='sqlite', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='sqlite', if_exists='fail')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='sqlite', if_exists='append')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
class TestXMySQL(tm.TestCase):
def setUp(self):
_skip_if_no_pymysql()
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
self.db = pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
self.db = pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError as e:
raise nose.SkipTest(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error as e:
raise nose.SkipTest(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def test_basic(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql')
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.db.commit()
result = sql.read_frame("select * from test", con=self.db)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql')
cur = self.db.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.ix[0].values.tolist()
sql.execute(ins, self.db, params=tuple(row))
self.db.commit()
result = sql.read_frame("select * from test", self.db)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'mysql')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql', keys=['A', 'B'],)
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY (A,B)' in create_sql)
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
def test_execute_fail(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.db)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.db)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.db.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.db)
self.db.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.tquery, "select * from test",
con=self.db)
finally:
sys.stdout = sys.__stdout__
def test_na_roundtrip(self):
_skip_if_no_pymysql()
pass
def _check_roundtrip(self, frame):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql')
result = sql.read_frame("select * from test_table", self.db)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
index = Index(lrange(len(frame2))) + 10
frame2['Idx'] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.db.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame2, name='test_table2', con=self.db, flavor='mysql')
result = sql.read_frame("select * from test_table2", self.db,
index_col='Idx')
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_tquery(self):
try:
import pymysql
except ImportError:
raise nose.SkipTest("no pymysql")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql')
result = sql.tquery("select A from test_table", self.db)
expected = frame.A
result = Series(result, frame.index)
tm.assert_series_equal(result, expected)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.db, retry=True)
finally:
sys.stdout = sys.__stdout__
def test_uquery(self):
try:
import pymysql
except ImportError:
raise nose.SkipTest("no pymysql")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.db.cursor()
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.db, flavor='mysql')
stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)'
self.assertEqual(sql.uquery(stmt, con=self.db), 1)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db)
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.db,
retry=True)
finally:
sys.stdout = sys.__stdout__
def test_keyword_as_column_names(self):
'''
'''
_skip_if_no_pymysql()
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.db, name = 'testkeywords',
if_exists='replace', flavor='mysql')
def test_if_exists(self):
_skip_if_no_pymysql()
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
if sql.table_exists(test_table_to_drop, self.db, flavor='mysql'):
cur = self.db.cursor()
cur.execute("DROP TABLE %s" % test_table_to_drop)
cur.close()
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='mysql',
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='mysql', if_exists='fail')
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.db,
name=table_name,
flavor='mysql',
if_exists='fail')
# test if_exists='replace'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='mysql', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='mysql', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.write_frame(frame=df_if_exists_1, con=self.db, name=table_name,
flavor='mysql', if_exists='fail')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.db, name=table_name,
flavor='mysql', if_exists='append')
self.assertEqual(sql.tquery(sql_select, con=self.db),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 36.704284 | 99 | 0.581535 |
f86d3eb8a890ca570b6e9594ea9185dc5ef2c5cb | 8,403 | py | Python | sdk/python/pulumi_azure_nextgen/compute/v20191201/proximity_placement_group.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/compute/v20191201/proximity_placement_group.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/compute/v20191201/proximity_placement_group.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ProximityPlacementGroup']
class ProximityPlacementGroup(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
colocation_status: Optional[pulumi.Input[pulumi.InputType['InstanceViewStatusArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
proximity_placement_group_name: Optional[pulumi.Input[str]] = None,
proximity_placement_group_type: Optional[pulumi.Input[Union[str, 'ProximityPlacementGroupType']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Specifies information about the proximity placement group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['InstanceViewStatusArgs']] colocation_status: Describes colocation status of the Proximity Placement Group.
:param pulumi.Input[str] location: Resource location
:param pulumi.Input[str] proximity_placement_group_name: The name of the proximity placement group.
:param pulumi.Input[Union[str, 'ProximityPlacementGroupType']] proximity_placement_group_type: Specifies the type of the proximity placement group. <br><br> Possible values are: <br><br> **Standard** : Co-locate resources within an Azure region or Availability Zone. <br><br> **Ultra** : For future use.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['colocation_status'] = colocation_status
__props__['location'] = location
__props__['proximity_placement_group_name'] = proximity_placement_group_name
__props__['proximity_placement_group_type'] = proximity_placement_group_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['availability_sets'] = None
__props__['name'] = None
__props__['type'] = None
__props__['virtual_machine_scale_sets'] = None
__props__['virtual_machines'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:compute:ProximityPlacementGroup"), pulumi.Alias(type_="azure-nextgen:compute/latest:ProximityPlacementGroup"), pulumi.Alias(type_="azure-nextgen:compute/v20180401:ProximityPlacementGroup"), pulumi.Alias(type_="azure-nextgen:compute/v20180601:ProximityPlacementGroup"), pulumi.Alias(type_="azure-nextgen:compute/v20181001:ProximityPlacementGroup"), pulumi.Alias(type_="azure-nextgen:compute/v20190301:ProximityPlacementGroup"), pulumi.Alias(type_="azure-nextgen:compute/v20190701:ProximityPlacementGroup"), pulumi.Alias(type_="azure-nextgen:compute/v20200601:ProximityPlacementGroup"), pulumi.Alias(type_="azure-nextgen:compute/v20201201:ProximityPlacementGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ProximityPlacementGroup, __self__).__init__(
'azure-nextgen:compute/v20191201:ProximityPlacementGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ProximityPlacementGroup':
"""
Get an existing ProximityPlacementGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return ProximityPlacementGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="availabilitySets")
def availability_sets(self) -> pulumi.Output[Sequence['outputs.SubResourceWithColocationStatusResponse']]:
"""
A list of references to all availability sets in the proximity placement group.
"""
return pulumi.get(self, "availability_sets")
@property
@pulumi.getter(name="colocationStatus")
def colocation_status(self) -> pulumi.Output[Optional['outputs.InstanceViewStatusResponse']]:
"""
Describes colocation status of the Proximity Placement Group.
"""
return pulumi.get(self, "colocation_status")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="proximityPlacementGroupType")
def proximity_placement_group_type(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the type of the proximity placement group. <br><br> Possible values are: <br><br> **Standard** : Co-locate resources within an Azure region or Availability Zone. <br><br> **Ultra** : For future use.
"""
return pulumi.get(self, "proximity_placement_group_type")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualMachineScaleSets")
def virtual_machine_scale_sets(self) -> pulumi.Output[Sequence['outputs.SubResourceWithColocationStatusResponse']]:
"""
A list of references to all virtual machine scale sets in the proximity placement group.
"""
return pulumi.get(self, "virtual_machine_scale_sets")
@property
@pulumi.getter(name="virtualMachines")
def virtual_machines(self) -> pulumi.Output[Sequence['outputs.SubResourceWithColocationStatusResponse']]:
"""
A list of references to all virtual machines in the proximity placement group.
"""
return pulumi.get(self, "virtual_machines")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 47.744318 | 751 | 0.6794 |
4aa3e524310061339e9ceb7b845eeaedbbdbfb40 | 2,232 | py | Python | backend/apps/contracts_uis/models.py | grinay/smartz | 44ca50ebcac26d5776b90b4fc96c63edfe804dfe | [
"Apache-2.0"
] | null | null | null | backend/apps/contracts_uis/models.py | grinay/smartz | 44ca50ebcac26d5776b90b4fc96c63edfe804dfe | [
"Apache-2.0"
] | null | null | null | backend/apps/contracts_uis/models.py | grinay/smartz | 44ca50ebcac26d5776b90b4fc96c63edfe804dfe | [
"Apache-2.0"
] | 1 | 2018-10-23T15:04:52.000Z | 2018-10-23T15:04:52.000Z | import random
import string
from datetime import datetime
import pytz
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.db import models
from apps.common.constants import BLOCKCHAINS, BLOCKCHAIN_ETHEREUM
from apps.contracts_uis.validators import validate_functions, validate_abi, \
validate_dashboard_functions, validate_functions_specs
from apps.users.models import User
from constructor_engine.services import ContractsProcessorsManager
class ContractUI(models.Model):
name = models.CharField(max_length=200)
slug = models.CharField(max_length=24, unique=True)
blockchain = models.CharField(choices=BLOCKCHAINS, max_length=50, default=BLOCKCHAIN_ETHEREUM)
network_id = models.CharField(max_length=200, blank=True, default='')
address = models.CharField(max_length=42, blank=True, default='')
description = models.TextField()
abi = JSONField(validators=[validate_abi], default=[], blank=True)
functions = JSONField(validators=[validate_functions], blank=True, default={})
function_specs = JSONField(validators=[validate_functions_specs], blank=True, default={})
dashboard_functions = JSONField(validators=[validate_dashboard_functions], blank=True, default=[])
user = models.ForeignKey(User, on_delete=models.PROTECT, null=True)
sorting_order = models.IntegerField(default=0)
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
image = models.CharField(max_length=200, default='', blank=True)
@classmethod
def create(cls, **kwargs):
ui = cls(**kwargs)
ui.slug = ''.join(
random.SystemRandom().choice('abcdef' + string.digits) for _ in range(24)
)
return ui
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.id:
self.created_at = datetime.now(pytz.timezone(settings.TIME_ZONE))
self.updated_at = datetime.now(pytz.timezone(settings.TIME_ZONE))
self.function_specs = ContractsProcessorsManager().require_contract_processor(self.blockchain)\
.process_functions_specs(self.abi, self.functions)
return super().save(*args, **kwargs) | 36 | 103 | 0.733423 |
8ddc99f32782bf53e856dc638920ca9ed11cc661 | 2,138 | py | Python | azure-eventgrid/azure/eventgrid/models/iot_hub_device_created_event_data_py3.py | Christina-Kang/azure-sdk-for-python | bbf982eb06aab04b8151f69f1d230b7f5fb96ebf | [
"MIT"
] | null | null | null | azure-eventgrid/azure/eventgrid/models/iot_hub_device_created_event_data_py3.py | Christina-Kang/azure-sdk-for-python | bbf982eb06aab04b8151f69f1d230b7f5fb96ebf | [
"MIT"
] | null | null | null | azure-eventgrid/azure/eventgrid/models/iot_hub_device_created_event_data_py3.py | Christina-Kang/azure-sdk-for-python | bbf982eb06aab04b8151f69f1d230b7f5fb96ebf | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .device_life_cycle_event_properties import DeviceLifeCycleEventProperties
class IotHubDeviceCreatedEventData(DeviceLifeCycleEventProperties):
"""Event data for Microsoft.Devices.DeviceCreated event.
:param device_id: The unique identifier of the device. This case-sensitive
string can be up to 128 characters long, and supports ASCII 7-bit
alphanumeric characters plus the following special characters: - : . + % _
# * ? ! ( ) , = @ ; $ '.
:type device_id: str
:param hub_name: Name of the IoT Hub where the device was created or
deleted.
:type hub_name: str
:param op_type: The event type specified for this operation by the IoT
Hub.
:type op_type: str
:param operation_timestamp: The ISO8601 timestamp of the operation.
:type operation_timestamp: str
:param twin: Information about the device twin, which is the cloud
represenation of application device metadata.
:type twin: ~azure.eventgrid.models.DeviceTwinInfo
"""
_attribute_map = {
'device_id': {'key': 'deviceId', 'type': 'str'},
'hub_name': {'key': 'hubName', 'type': 'str'},
'op_type': {'key': 'opType', 'type': 'str'},
'operation_timestamp': {'key': 'operationTimestamp', 'type': 'str'},
'twin': {'key': 'twin', 'type': 'DeviceTwinInfo'},
}
def __init__(self, *, device_id: str=None, hub_name: str=None, op_type: str=None, operation_timestamp: str=None, twin=None, **kwargs) -> None:
super(IotHubDeviceCreatedEventData, self).__init__(device_id=device_id, hub_name=hub_name, op_type=op_type, operation_timestamp=operation_timestamp, twin=twin, **kwargs)
| 46.478261 | 177 | 0.652947 |
cb779cc66f314d2d2731877166e373742311a2fe | 428 | py | Python | editor/importme.py | Amazeryogo/surf-exel | 0d6a43a7ba2b059f61405db846e546308a035733 | [
"MIT"
] | 3 | 2020-08-12T05:59:47.000Z | 2020-11-08T00:01:04.000Z | editor/importme.py | Amazeryogo/surf-exel | 0d6a43a7ba2b059f61405db846e546308a035733 | [
"MIT"
] | 8 | 2020-08-19T06:24:06.000Z | 2020-10-27T04:37:46.000Z | editor/importme.py | Amazeryogo/surf-exel | 0d6a43a7ba2b059f61405db846e546308a035733 | [
"MIT"
] | 1 | 2020-10-25T13:35:17.000Z | 2020-10-25T13:35:17.000Z | from Tkinter import *
from Tkinter import filedialog, simpledialog
from Tkinter import messagebox
from editor.settings import backgroundcolor as bc
from editor.settings import forgroundcolor as fc
from editor.settings import back as b
from editor.settings import fore as f
from editor.settings import size
from editor.settings import font as fontx
from gtts import gTTS
import playsound
import os
# Imported everything here
| 23.777778 | 49 | 0.831776 |
594e8406b5dad0ef381a9dd9d2ec9fbb75e0efd7 | 275 | py | Python | SipMask-VIS/mmdet/core/bbox/assigners/__init__.py | anirudh-chakravarthy/SipMask | fc82b12c13abb091e271eb4f1b6734da18234443 | [
"MIT"
] | 859 | 2019-09-29T05:36:03.000Z | 2022-03-15T08:33:03.000Z | SipMask-VIS/mmdet/core/bbox/assigners/__init__.py | anirudh-chakravarthy/SipMask | fc82b12c13abb091e271eb4f1b6734da18234443 | [
"MIT"
] | 69 | 2019-10-14T11:07:51.000Z | 2022-03-10T14:39:00.000Z | SipMask-VIS/mmdet/core/bbox/assigners/__init__.py | anirudh-chakravarthy/SipMask | fc82b12c13abb091e271eb4f1b6734da18234443 | [
"MIT"
] | 165 | 2019-10-05T02:59:29.000Z | 2022-03-28T02:30:11.000Z | from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
from .max_iou_assigner import MaxIoUAssigner
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult'
]
| 30.555556 | 76 | 0.829091 |
883ffd392d03a30d85b527af5e5ef3bd2e8bed05 | 591 | py | Python | lab4/lab4Ex1/TextStats.py | RustyRipper/ScriptingLanguages- | b0944821ddfcd6ca2d57cf00614029bf43786719 | [
"MIT"
] | null | null | null | lab4/lab4Ex1/TextStats.py | RustyRipper/ScriptingLanguages- | b0944821ddfcd6ca2d57cf00614029bf43786719 | [
"MIT"
] | null | null | null | lab4/lab4Ex1/TextStats.py | RustyRipper/ScriptingLanguages- | b0944821ddfcd6ca2d57cf00614029bf43786719 | [
"MIT"
] | null | null | null | import re
class TextStats:
def __init__(self, text):
self.number_of_lines = 0
self.number_of_words = 0
self.number_of_nonalpha = 0
self.__compute(text)
def __compute(self, text):
self.number_of_lines = text.count('\n') + 1
self.number_of_words = len(re.findall(r"[\w']+", text))
s = ''.join(ch for ch in text if ch.isalpha())
self.number_of_nonalpha = len(text) - len(s) - 1
def __str__(self):
return str(self.number_of_lines) + " " + (str(self.number_of_words)) + " " + (str(self.number_of_nonalpha))
| 29.55 | 115 | 0.609137 |
a0c5fd843385895799545447dc4fa7246597b45a | 702 | py | Python | saltlint/rules/JinjaVariableHasSpacesRule.py | roaldnefs/salt-lint | 3b732c81d9864706769a26bf3231d531fe8e4075 | [
"MIT"
] | 24 | 2019-04-19T06:19:10.000Z | 2020-08-14T11:29:03.000Z | saltlint/rules/JinjaVariableHasSpacesRule.py | roaldnefs/salt-lint | 3b732c81d9864706769a26bf3231d531fe8e4075 | [
"MIT"
] | 35 | 2019-10-04T06:33:20.000Z | 2019-10-18T19:08:17.000Z | saltlint/rules/JinjaVariableHasSpacesRule.py | roaldnefs/salt-lint | 3b732c81d9864706769a26bf3231d531fe8e4075 | [
"MIT"
] | 10 | 2019-10-04T13:08:23.000Z | 2019-10-14T07:36:34.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Will Thames and contributors
# Copyright (c) 2018 Ansible Project
# Modified work Copyright (c) 2019 Warpnet B.V.
import re
from saltlint.linter import SaltLintRule
class JinjaVariableHasSpacesRule(SaltLintRule):
id = '206'
shortdesc = 'Jinja variables should have spaces before and after: {{ var_name }}'
description = 'Jinja variables should have spaces before and after: ``{{ var_name }}``'
severity = 'LOW'
tags = ['formatting', 'jinja']
version_added = 'v0.0.1'
bracket_regex = re.compile(r"{{[^ \-\+]|{{[-\+][^ ]|[^ \-\+]}}|[^ ][-\+]}}")
def match(self, file, line):
return self.bracket_regex.search(line)
| 31.909091 | 91 | 0.64245 |
7a02fdd9fdcdce6ad1305373401fd1ab4d5ecfd7 | 5,772 | py | Python | boxFilter.py | alfonsoirai/Image-Border-Analyzer | 910bed1258ec109ad9f3c0ec66d0dffb917b772d | [
"MIT"
] | null | null | null | boxFilter.py | alfonsoirai/Image-Border-Analyzer | 910bed1258ec109ad9f3c0ec66d0dffb917b772d | [
"MIT"
] | null | null | null | boxFilter.py | alfonsoirai/Image-Border-Analyzer | 910bed1258ec109ad9f3c0ec66d0dffb917b772d | [
"MIT"
] | null | null | null | from Tkinter import *
from PIL import Image, ImageTk
from scipy.ndimage import imread
import numpy as np
import sys
import math
import io
class Sobel:
def __init__(self, path, root):
self.imagePath = path
self.image = self.rgb_conversion()
self.w, self.h = self.image.size
self.actualImage = self.image
self.root = root
self.display = ImageTk.PhotoImage(self.image)
self.canvas = Canvas(self.root, width = self.w, height = self.h)
self.label = Label(self.canvas, image = self.display)
self.label.image = self.display
self.label.pack()
self.panelBotones = Canvas(self.root, width=150, height=self.h)
self.boton = Button(self.panelBotones, text = 'Box Filter', fg = 'black', command = self.box_filter)
self.panel = self.panelBotones.create_window(5,0, anchor = 'nw', window = self.boton)
self.panelBotones.pack(side=LEFT)
self.canvas.pack()
def update_image(self):
display = ImageTk.PhotoImage(self.actualImage)
self.label.config(image=display)
self.label.image = display
def rgb_conversion(self):
image = Image.open(self.imagePath)
image = image.convert('RGB')
return image
def box_filter(self):
image = imread('prueba.jpg')
##Modify the radius from 2 to 10
radio = 9
height = image.shape[0]
width = image.shape[1]
boxImage = np.zeros((image.shape[0]-2, image.shape[1]-2))
if radio == 2:
for i in np.arange(3, height-3):
for j in np.arange(3, width-3):
sum = 0
for k in np.arange(-1, 1):
for l in np.arange(-1, 1):
a = image.item(i+k, j+l)
sum = sum + a
b = int(sum / 4.0)
boxImage.itemset((i,j), b)
elif radio == 3:
for i in np.arange(3, height-3):
for j in np.arange(3, width-3):
sum = 0
for k in np.arange(-1, 2):
for l in np.arange(-1, 2):
a = image.item(i+k, j+l)
sum = sum + a
b = int(sum / 9.0)
boxImage.itemset((i,j), b)
elif radio == 4:
for i in np.arange(3, height-3):
for j in np.arange(3, width-3):
sum = 0
for k in np.arange(-2, 2):
for l in np.arange(-2, 2):
a = image.item(i+k, j+l)
sum = sum + a
b = int(sum / 16.0)
boxImage.itemset((i,j), b)
elif radio == 5:
for i in np.arange(3, height-3):
for j in np.arange(3, width-3):
sum = 0
for k in np.arange(-2, 3):
for l in np.arange(-2, 3):
a = image.item(i+k, j+l)
sum = sum + a
b = int(sum / 25.0)
boxImage.itemset((i,j), b)
elif radio == 6:
for i in np.arange(3, height-3):
for j in np.arange(3, width-3):
sum = 0
for k in np.arange(-3, 3):
for l in np.arange(-3, 3):
a = image.item(i+k, j+l)
sum = sum + a
b = int(sum / 36.0)
boxImage.itemset((i,j), b)
elif radio == 7:
for i in np.arange(3, height-3):
for j in np.arange(3, width-3):
sum = 0
for k in np.arange(-3, 4):
for l in np.arange(-3, 4):
a = image.item(i+k, j+l)
sum = sum + a
b = int(sum / 49.0)
boxImage.itemset((i,j), b)
elif radio == 8:
for i in np.arange(3, height-3):
for j in np.arange(3, width-3):
sum = 0
for k in np.arange(-3, 4):
for l in np.arange(-3, 4):
a = image.item(i+k, j+l)
sum = sum + a
b = int(sum / 64.0)
boxImage.itemset((i,j), b)
elif radio == 9:
for i in np.arange(3, height-3):
for j in np.arange(3, width-3):
sum = 0
for k in np.arange(-4, 4):
for l in np.arange(-4, 4):
a = image.item(i+k, j+l)
sum = sum + a
b = int(sum / 81.0)
boxImage.itemset((i,j), b)
elif radio == 10:
for i in np.arange(3, height-3):
for j in np.arange(3, width-3):
sum = 0
for k in np.arange(-5, 4):
for l in np.arange(-5, 4):
a = image.item(i+k, j+l)
sum = sum + a
b = int(sum / 100.0)
boxImage.itemset((i,j), b)
self.actualImage = Image.fromarray(boxImage)
self.update_image()
def main():
try:
imagePath = 'prueba.jpg'
except:
print "Selecciona una imagen"
return
root = Tk()
conversion = Sobel(imagePath, root)
root.title("Box Filter")
root.mainloop()
if __name__ == "__main__":
main() | 36.764331 | 108 | 0.415974 |
245d9ebd44048fc660cbb39abd42598c4435da71 | 6,975 | py | Python | naslib/predictors/lcsvr.py | shenyann/NASLib | 6fad875f21e41bb9c91647bbd0620aa6e6dc8c7f | [
"Apache-2.0"
] | 14 | 2021-12-08T17:56:01.000Z | 2022-01-15T05:06:59.000Z | naslib/predictors/lcsvr.py | shenyann/NASLib | 6fad875f21e41bb9c91647bbd0620aa6e6dc8c7f | [
"Apache-2.0"
] | 4 | 2022-01-10T09:04:38.000Z | 2022-01-23T03:35:09.000Z | naslib/predictors/lcsvr.py | shenyann/NASLib | 6fad875f21e41bb9c91647bbd0620aa6e6dc8c7f | [
"Apache-2.0"
] | 1 | 2021-12-08T17:56:06.000Z | 2021-12-08T17:56:06.000Z | # Author: Robin Ru @ University of Oxford
# This is an implementation of learning curve extrapolation method based on:
# B. Baker et al. 2017, “Accelerating neural architecture search using performance prediction,” arXiv preprint arXiv:1705.10823.
from sklearn.svm import NuSVR
from sklearn.linear_model import BayesianRidge
from sklearn.ensemble import RandomForestRegressor
import time
from sklearn.model_selection import cross_val_score, train_test_split
import numpy as np
from naslib.predictors.predictor import Predictor
from scipy import stats
import numpy as np
from naslib.search_spaces.core.query_metrics import Metric
def loguniform(low=0, high=1, size=None):
return np.exp(np.random.uniform(np.log(low), np.log(high), size))
class SVR_Estimator(Predictor):
def __init__(self, metric=Metric.VAL_ACCURACY, all_curve=True, model_name='svr',best_hyper=None, n_hypers=1000):
self.n_hypers = n_hypers
self.all_curve = all_curve
self.model_name = model_name
self.best_hyper = best_hyper
self.name = 'LcSVR'
self.metric=metric
def fit(self, xtrain, ytrain, info, learn_hyper=True):
# prepare training data
xtrain_data = self.prepare_data(info)
y_train = np.array(ytrain)
# learn hyperparameters of the extrapolator by cross validation
if self.best_hyper is None or learn_hyper:
# specify model hyper-parameters
if self.model_name == 'svr':
C = loguniform(1e-5, 10, self.n_hypers)
nu = np.random.uniform(0, 1, self.n_hypers)
gamma = loguniform(1e-5, 10, self.n_hypers)
hyper = np.vstack([C, nu, gamma]).T
elif self.model_name == 'blr':
alpha_1 = np.random.uniform(1e-7, 1e-5, self.n_hypers)
alpha_2 = np.random.uniform(1e-7, 1e-5, self.n_hypers)
lambda_1 = np.random.uniform(1e-7, 1e-5, self.n_hypers)
lambda_2 = np.random.uniform(1e-7, 1e-5, self.n_hypers)
hyper = np.vstack([alpha_1, alpha_2, lambda_1, lambda_2]).T
elif self.model_name == 'rf':
n_trees = np.random.randint(10, 800, self.n_hypers)
frac_feature = np.random.uniform(0.1, 0.5, self.n_hypers)
hyper = np.vstack([n_trees, frac_feature]).T
print(f'start CV on {self.model_name}')
mean_score_list = []
t_start = time.time()
for i in range(self.n_hypers):
# define model
if self.model_name == 'svr':
model = NuSVR(C=hyper[i, 0], nu=hyper[i, 1], gamma=hyper[i, 2], kernel='rbf')
# model = SVR(C=hyper[i, 0], nu=hyper[i, 1], gamma= ,kernel='linear')
elif self.model_name == 'blr':
model = BayesianRidge(alpha_1=hyper[i, 0], alpha_2=hyper[i, 1],
lambda_1=hyper[i, 2], lambda_2=hyper[i, 3])
elif self.model_name == 'rf':
model = RandomForestRegressor(n_estimators=int(hyper[i, 0]), max_features=hyper[i, 1])
# perform cross validation to learn the best hyper value
scores = cross_val_score(model, xtrain_data, y_train, cv=3)
mean_scores = np.mean(scores)
mean_score_list.append(mean_scores)
# print(f'hper={hyper[i]}, score={mean_scores}')
t_end = time.time()
best_hyper_idx = np.argmax(mean_score_list)
best_hyper = hyper[best_hyper_idx]
max_score = np.max(mean_score_list)
time_taken = t_end - t_start
print(f'{self.model_name}'
f'best_hyper={best_hyper}, score={max_score}, time={time_taken}')
self.best_hyper = best_hyper
# fit the extrapolator with the best hyperparameters to the training data
if self.model_name == 'svr':
best_model = NuSVR(C=self.best_hyper[0], nu=self.best_hyper[1], gamma=self.best_hyper[2], kernel='rbf')
# model = SVR(C=hyper[i, 0], nu=hyper[i, 1], gamma= ,kernel='linear')
elif self.model_name == 'blr':
best_model = BayesianRidge(alpha_1=self.best_hyper[0], alpha_2=self.best_hyper[1],
lambda_1=self.best_hyper[2], lambda_2=self.best_hyper[3])
elif self.model_name == 'rf':
best_model = RandomForestRegressor(n_estimators=int(self.best_hyper[0]), max_features=self.best_hyper[1])
best_model.fit(xtrain_data, y_train)
self.best_model = best_model
def collate_inputs(self, VC_all_archs_list, AP_all_archs_list):
"""
Args:
VC_all_archs_list: a list of validation accuracy curves for all archs
AP_all_archs_list: a list of architecture features for all archs
Returns:
X: an collated array of all input information used for extrapolation model
"""
VC = np.vstack(VC_all_archs_list) # dimension: n_archs x n_epochs
DVC = np.diff(VC, n=1, axis=1)
DDVC = np.diff(DVC, n=1, axis=1)
mVC = np.mean(VC, axis=1)[:, None]
stdVC = np.std(VC, axis=1)[:, None]
mDVC = np.mean(DVC, axis=1)[:, None]
stdDVC = np.std(DVC, axis=1)[:, None]
mDDVC = np.mean(DDVC, axis=1)[:, None]
stdDDVC = np.std(DDVC, axis=1)[:, None]
if self.all_curve:
TS_list = [VC, DVC, DDVC, mVC, stdVC]
else:
TS_list = [mVC, stdVC, mDVC, stdDVC, mDDVC, stdDDVC]
if self.metric == Metric.TRAIN_LOSS:
sumVC = np.sum(VC, axis=1)[:, None]
TS_list += [sumVC]
TS = np.hstack(TS_list)
if len(AP_all_archs_list) != 0:
AP = np.vstack(AP_all_archs_list)
X = np.hstack([AP, TS])
else:
X = TS
return X
def query(self, xtest, info):
data = self.prepare_data(info)
pred_on_test_set = self.best_model.predict(data)
return pred_on_test_set
def get_data_reqs(self):
"""
Returns a dictionary with info about whether the predictor needs
extra info to train/query.
"""
reqs = {'requires_partial_lc':True,
'metric':self.metric,
'requires_hyperparameters':True,
'hyperparams':['flops', 'latency', 'params']
}
return reqs
def prepare_data(self, info):
# todo: this can be added at the top of collate_inputs
val_acc_curve = []
arch_params = []
for i in range(len(info)):
acc_metric = info[i]['lc']
arch_hp = [info[i][hp] for hp in ['flops', 'latency', 'params']]
val_acc_curve.append(acc_metric)
arch_params.append(arch_hp)
return self.collate_inputs(val_acc_curve, arch_params)
| 42.018072 | 128 | 0.591971 |
51af1ed6d4e8569e8634b7d2778abbe3dd5ffd44 | 1,736 | py | Python | setup.py | jsoref/xmldiff | 33b1b41d439f2a38894e0b9cb52ec64804398888 | [
"MIT"
] | null | null | null | setup.py | jsoref/xmldiff | 33b1b41d439f2a38894e0b9cb52ec64804398888 | [
"MIT"
] | null | null | null | setup.py | jsoref/xmldiff | 33b1b41d439f2a38894e0b9cb52ec64804398888 | [
"MIT"
] | null | null | null | from io import open
from setuptools import setup, find_packages
version = '2.4.dev0'
with open('README.rst', 'rt', encoding='utf8') as readme:
description = readme.read()
with open('CHANGES.rst', 'rt', encoding='utf8') as changes:
history = changes.read()
setup(name='xmldiff',
version=version,
description="Creates diffs of XML files",
long_description=description + '\n' + history,
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Development Status :: 5 - Production/Stable',
'Topic :: Text Processing :: Markup :: XML',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
],
keywords='xml html diff',
author='Lennart Regebro',
author_email='lregebro@shoobx.com',
url='https://github.com/Shoobx/xmldiff',
license='MIT',
packages=find_packages(exclude=['doc', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'lxml>=3.1.0',
'six',
],
test_suite='tests',
entry_points={
'console_scripts': [
'xmldiff = xmldiff.main:diff_command',
'xmlpatch = xmldiff.main:patch_command',
],
},
)
| 34.72 | 79 | 0.548963 |
a7e2147e95722cbba7806929be7b043cdbd8cb8b | 50 | py | Python | test/login.py | FantasyWorm/testpython | eaedd41b43679987a91d352fc3f8a7c3b1dcac4d | [
"MIT"
] | null | null | null | test/login.py | FantasyWorm/testpython | eaedd41b43679987a91d352fc3f8a7c3b1dcac4d | [
"MIT"
] | null | null | null | test/login.py | FantasyWorm/testpython | eaedd41b43679987a91d352fc3f8a7c3b1dcac4d | [
"MIT"
] | null | null | null | num = 11
num2 = 2222222
num3 = 444444444444444
| 7.142857 | 22 | 0.7 |
e0218e88ac15afd3c89b87daf6d60b9c5b5a1f11 | 14,306 | py | Python | src/archefilter/fc_archefilter.py | dfarrow0/flu-contest | 8356cf48910a76d2643d105651342288076a9377 | [
"MIT"
] | null | null | null | src/archefilter/fc_archefilter.py | dfarrow0/flu-contest | 8356cf48910a76d2643d105651342288076a9377 | [
"MIT"
] | 5 | 2020-02-24T19:06:24.000Z | 2020-04-30T16:40:55.000Z | src/archefilter/fc_archefilter.py | dfarrow0/flu-contest | 8356cf48910a76d2643d105651342288076a9377 | [
"MIT"
] | 3 | 2020-03-22T16:30:38.000Z | 2020-04-27T17:39:12.000Z | """
==================
=== Deprecated ===
==================
As of 2017-11-09, this file is no longer used. The ArcheFilter forecasting
system was only used for the 2015--2016 flu season.
===============
=== Purpose ===
===============
Assimilates digital surveillance signals and a flu model to produce nowcasts
(and, secondarily, forecasts) of flu.
=================
=== Changelog ===
=================
2016-12-08
+ use secrets
2015-12-30
+ enforce minimum number of bins when sampling
* quick hack for 2015w50: min_shift from -10 to 0
2015-12-17
* penalizing HHS6 curve height by 15%
2015-12-14
+ AF_Utils.signal* (replace `data_io` version)
* replace `data_io` with Epidata API call to `signals`
* prefixed output with [AF]
- don't penalize tall curves
- AF_Utils.check (duplicate of AF_Utils._get)
2015-12-07
+ penalize ridiculously tall curves (i.e. hhs6 on 2015w45)
2015-11-09
* near total rewrite of process model and filtering
2015-10-26
+ first version
"""
# built-in
# external
from filterpy.kalman import MerweScaledSigmaPoints as SigmaPoints
from filterpy.kalman import UnscentedKalmanFilter as UKF
import numpy as np
import scipy.stats as stats
# local
from archetype import Archetype
from delphi_epidata import Epidata
import epiweek as flu
from fc_abstract import Forecaster
from neldermead import NelderMead
import secrets
class FluProcess:
""" the model, based on the Archetype idea """
def __init__(self, archetype):
self.archetype = archetype
self.target_mean = {}
self.target_var = {}
self.target_std = {}
def score(self, region, curve):
# half of summed squared normalized error (from multivariate normal PDF)
if region == 'hhs6':
curve = curve * 1.15
z_scores = self.weights * (curve - self.target_mean[region]) / self.target_std[region]
return np.dot(z_scores, z_scores) / 2
def scan_grid(self, region, min_shift, max_shift, n_shift, min_scale, max_scale, n_scale):
# calculate parameter bins
shifts = np.linspace(min_shift, max_shift, n_shift)
scales = np.linspace(min_scale, max_scale, n_scale)
d_shift, d_scale = shifts[1] - shifts[0], scales[1] - scales[0]
bins = [[(t, s) for s in scales] for t in shifts]
samples = []
# get score of curve in center of each bin
grid = np.zeros((n_shift, n_scale))
for (t, shift) in enumerate(shifts):
for (s, scale) in enumerate(scales):
grid[t][s] = self.score(region, self.archetype[region].instance(scale, shift, False))
# convert scores to PMF
grid = np.exp(-grid)
grid /= np.sum(grid)
# find best bin index
best = np.unravel_index(np.argmax(grid), grid.shape)
return grid, bins, best, d_shift, d_scale
def get_best_fit(self, region, output=None):
# coarse sweep over global parameter space
grid, bins, best, d_shift, d_scale = self.scan_grid(region, 0, +10, 32, 1 / 3, 3, 32)
guess = bins[best[0]][best[1]]
# initialize derivate-free optimizer to find best parameters
def objective(params):
return self.score(region, self.archetype[region].instance(params[1], params[0], False))
solver = NelderMead(objective, limit_iterations=100, silent=True)
simplex = solver.get_simplex(len(guess), guess, min(d_shift, d_scale))
# do the optimization
shift, scale = solver.run(simplex)._location
if output is not None:
output[0] = shift
output[1] = scale
# return the best-fit curve
return self.archetype[region].instance(scale, shift, False)
def get_sample_fits(self, region, num_samples, add_holiday):
## find the best part of parameter space
#loc = [0, 0]
#self.get_best_fit(region, loc)
# fine sweep over local parameter space
#nw, ns = 2, 1.1
#t1, t2 = loc[0] - nw, loc[0] + nw
#s1, s2 = loc[1] / ns, loc[1] * ns
t1, t2 = 0, +10
s1, s2 = 1 / 3, 3
grid, bins, best, d_shift, d_scale = self.scan_grid(region, t1, t2, 128, s1, s2, 128)
# sort by decreasing bin likelihood
data = []
for (t, row) in enumerate(bins):
for (s, (shift, scale)) in enumerate(row):
data.append((grid[t][s], shift, scale))
data = np.array(sorted(data, key=lambda d: -d[0]))
# limit to the bins containing 99% of the probability
limit = max(1, np.searchsorted(np.cumsum(data[:, 0]), 0.99))
probs, shifts, scales = data[:limit, 0], data[:limit, 1], data[:limit, 2]
cprob = np.cumsum(probs / sum(probs))
# get sample curves
curves = []
for i in range(num_samples):
# randomly select a weighted bin
index = np.searchsorted(cprob, np.random.random())
# randomly select a point within the bin
try:
shift = shifts[index] + np.random.uniform(-d_shift, +d_shift) / 2
scale = scales[index] + np.random.uniform(-d_scale, +d_scale) / 2
except ex:
print('shift/scale index out of bounds!')
print(len(shifts), shift, d_shift)
print(len(scales), scale, d_scale)
raise ex
# build the archetype curve with the selected parameters
curves.append(self.archetype[region].instance(scale, shift, add_holiday))
return curves, grid
def inform(self, region, mean, var):
# combine observations and archetype
self.week = len(mean)
m1 = mean
v1 = var
m2 = self.archetype[region].unaligned_unsmoothed_mean[self.week:]
v2 = self.archetype[region].unaligned_unsmoothed_var[self.week:]
self.target_mean[region] = np.hstack((m1, m2))
#self.target_var = np.ones(len(self.target_mean)) #np.hstack((v1, v2))
self.target_var[region] = np.hstack((v1, v2))
self.target_std[region] = self.target_var[region] ** 0.5
# build weight vector
self.weights = np.ones(len(self.target_mean[region])) * 0.2
self.weights[max(0, self.week - 5):self.week] = 1
def forecast(self, state):
output = []
for (x, region) in zip(state, AF_Utils.regions):
self.target_mean[region][self.week - 1] = x
# TODO: variance here?
self.target_var[region][self.week - 1] = 1e-3
curve = self.get_best_fit(region)
output.append(curve[self.week])
return np.array(output)
def measure(self, state):
# twitter (11)
# wiki (1)
# uili (11)
twitter = []
wiki = []
uili = []
for (x, region) in zip(state, AF_Utils.regions):
ili_nh = x
ili_h = self.archetype[region].add_holiday_week(ili_nh, self.week)
twitter.append(ili_nh)
uili.append(ili_h)
nat_nh = [AF_Utils.get_national(twitter)]
nat_h = [AF_Utils.get_national(uili)]
twitter = nat_nh + twitter
wiki = nat_nh
uili = nat_h + uili
return np.array(twitter + wiki + uili)
class AF_Utils:
""" helper for loading (and generating) data """
regions = ['hhs%d' % i for i in range(1, 11)]
@staticmethod
def _get(res):
if res['result'] != 1:
raise Exception('API result=%d (%s)' % (res['result'], res['message']))
return res['epidata']
@staticmethod
def get_season(season, location):
#end = (season + 1) * 100 + 29
#epiweeks = Epidata.range(flu.add_epiweeks(end, -51), end)
begin = season * 100 + 30
epiweeks = Epidata.range(begin, flu.add_epiweeks(begin, 51))
rows = AF_Utils._get(Epidata.ilinet(location, epiweeks))
return [row['wili'] for row in rows]
@staticmethod
def initialize_filter(x, P, Q, R, process):
# Update system state
fx = lambda x, dt: process.forecast(x)
# Expected measurement, given system state
hx = lambda x: process.measure(x)
# Get the sigma points for the unscented transformation
# https://github.com/rlabbe/filterpy/blob/master/filterpy/kalman/sigma_points.py
alpha, beta, kappa = 1e-3, 2, 0
points = SigmaPoints(n=len(x), alpha=alpha, beta=beta, kappa=kappa)
# Instantiate an Unscented Kalman Filter
ukf = UKF(dim_x=len(x), dim_z=len(R[0]), dt=1, hx=hx, fx=fx, points=points)
ukf.x, ukf.P, ukf.Q, ukf.R = x, P, Q, R
# Return filter
return ukf
@staticmethod
def get_unstable_wILI(region, ew1, ew2):
weeks = Epidata.range(ew1, ew2)
epidata = AF_Utils._get(Epidata.fluview(region, weeks, issues=ew2))
data = [row['wili'] for row in epidata]
if len(data) != flu.delta_epiweeks(ew1, ew2) + 1:
raise Exception('missing data')
return data
@staticmethod
def get_national(regional):
weights = [0.045286439944771467, 0.10177386656841922, 0.095681349146225586, 0.19610707945020625, 0.16310640558744591, 0.12488754783066998, 0.043916824425230531, 0.034124204104827027, 0.15298339758467921, 0.041244820532846248]
return np.dot(weights, regional)
@staticmethod
def _signal(name, region, epiweek):
rows = AF_Utils._get(Epidata.signals(secrets.api.signals, name, region, epiweek))
if len(rows) != 1:
raise Exception('expected one signal row')
return rows[0]['value']
@staticmethod
def signal_twitter(region, epiweek):
return AF_Utils._signal('twitter', region, epiweek)
@staticmethod
def signal_wiki(epiweek):
return AF_Utils._signal('wiki', 'nat', epiweek)
@staticmethod
def signal_uili(region, epiweek):
return AF_Utils._signal('uili', region, epiweek)
class Archefilter(Forecaster):
# TODO: calculate backfill at runtime
BF = {
'nat': [0.133, 0.104, 0.071, 0.064, 0.057, 0.048, 0.041, 0.031, 0.028, 0.023],
'hhs1': [0.173, 0.098, 0.083, 0.074, 0.066, 0.052, 0.044, 0.041, 0.036, 0.030],
'hhs2': [0.384, 0.247, 0.179, 0.143, 0.117, 0.086, 0.064, 0.053, 0.049, 0.044],
'hhs3': [0.268, 0.142, 0.106, 0.083, 0.072, 0.067, 0.062, 0.056, 0.052, 0.044],
'hhs4': [0.160, 0.076, 0.051, 0.044, 0.039, 0.031, 0.030, 0.029, 0.024, 0.023],
'hhs5': [0.159, 0.087, 0.071, 0.066, 0.061, 0.056, 0.051, 0.044, 0.037, 0.036],
'hhs6': [0.239, 0.217, 0.096, 0.086, 0.065, 0.054, 0.053, 0.045, 0.041, 0.036],
'hhs7': [0.255, 0.190, 0.124, 0.098, 0.072, 0.050, 0.037, 0.024, 0.023, 0.021],
'hhs8': [0.160, 0.140, 0.130, 0.122, 0.121, 0.114, 0.110, 0.103, 0.098, 0.093],
'hhs9': [0.679, 0.573, 0.446, 0.409, 0.378, 0.320, 0.267, 0.195, 0.170, 0.132],
'hhs10': [0.371, 0.299, 0.250, 0.227, 0.210, 0.201, 0.188, 0.189, 0.186, 0.184],
}
def __init__(self, test_season, locations, num_samples):
super().__init__('fc-archefilter', test_season, locations)
self.archetypes = {}
self.num_samples = num_samples
def run(self, epiweek):
process = FluProcess(self.archetypes)
# timing
ew0 = flu.join_epiweek(self.test_season, 30)
ew1 = flu.add_epiweeks(ew0, 52)
num_weeks = flu.delta_epiweeks(ew0, epiweek) + 1
# setup each region
_x, _P = [], []
_Q = [0.5 ** 2] * 10
_R = [0.7 ** 2] * 11 + [0.5 ** 2] + [0.5 ** 2] * 11
for region in AF_Utils.regions:
# get unstable ili up until now
wili = AF_Utils.get_unstable_wILI(region, ew0, epiweek)
if len(wili) != num_weeks:
raise Exception('missing data')
# remove holiday effect
wili = np.array(wili) * self.archetypes[region].holiday[:len(wili)]
# TODO: use an actual backfill model
bf_var = Archefilter.BF[region][::-1]
while len(bf_var) < len(wili):
bf_var = [bf_var[0]] + bf_var
while len(bf_var) > len(wili):
bf_var = bf_var[1:]
bf_var = np.array(bf_var)
# setup the flu process
process.inform(region, wili, bf_var)
# UKF data
_x.append(wili[-1])
_P.append(bf_var[-1])
# set up the UKF
x = np.array(_x)
P = np.diag(_P)
Q = np.diag(_Q)
R = np.diag(_R)
ukf = AF_Utils.initialize_filter(x, P, Q, R, process)
# make it happen
print(' [AF] state:', ukf.x)
# predict next week's wILI
ukf.predict()
print(' [AF] state:', ukf.x)
# measure digitial surveillance signals
ew = flu.add_epiweeks(epiweek, 1)
twitter, wiki, uili = [], [], []
for region in ['nat'] + AF_Utils.regions:
twitter.append(AF_Utils.signal_twitter(region, ew))
if region == 'nat':
wiki.append(AF_Utils.signal_wiki(ew))
uili.append(AF_Utils.signal_uili(region, ew))
measurement = np.array(twitter + wiki + uili)
print(' [AF] measurement:', measurement)
ukf.update(measurement)
print(' [AF] state:', ukf.x)
# update the process with the latest estimate
for (i, region) in enumerate(AF_Utils.regions + ['nat']):
# get unstable ili up until now
wili = AF_Utils.get_unstable_wILI(region, ew0, epiweek)
if len(wili) != num_weeks:
raise Exception('missing data')
# remove holiday effect
wili = np.array(wili) * self.archetypes[region].holiday[:len(wili)]
# TODO: use an actual backfill model
bf_var = Archefilter.BF[region][::-1]
while len(bf_var) < len(wili):
bf_var = [bf_var[0]] + bf_var
while len(bf_var) > len(wili):
bf_var = bf_var[1:]
bf_var = np.array(bf_var)
# add in the filter state
if region == 'nat':
national = AF_Utils.get_national(ukf.x)
# TODO: what is national variance?
x = np.mean(np.diag(ukf.P))
est_mean = np.hstack((wili, np.array([national])))
est_var = np.hstack((bf_var, np.array([x])))
else:
est_mean = np.hstack((wili, np.array([ukf.x[i]])))
est_var = np.hstack((bf_var, np.array([ukf.P[i][i]])))
process.inform(region, est_mean, est_var)
self.process = process
def _train(self, region):
# get the data and build the archetype
train_seasons = [season for season in range(2004, self.test_season) if season not in (2008, 2009)]
curves = [AF_Utils.get_season(season, region) for season in train_seasons]
self.archetypes[region] = Archetype(curves, baseline=0)
def _forecast(self, region, epiweek):
if region == 'nat':
self.run(epiweek)
# use the process for each region to get sample curves
curves, grid = self.process.get_sample_fits(region, self.num_samples, True)
#if region == 'nat':
# import pylab as plt
# for c in curves[:25]:
# plt.plot(c, color='#888888', linewidth=1)
# ew0 = flu.join_epiweek(self.test_season, 30)
# wili = AF_Utils.get_unstable_wILI(region, ew0, epiweek)
# plt.plot(wili, color='#000000', linewidth=2)
# plt.show()
#raise Exception()
return [curve[10:43] for curve in curves]
| 36.966408 | 229 | 0.641619 |
0cca76af0f4b51b99c81d46eb6f4a88340fd661e | 25,234 | py | Python | cinder/brick/local_dev/lvm.py | Thingee/cinder | 721e657073d73d639619f839d935a463d32b59b5 | [
"Apache-2.0"
] | null | null | null | cinder/brick/local_dev/lvm.py | Thingee/cinder | 721e657073d73d639619f839d935a463d32b59b5 | [
"Apache-2.0"
] | null | null | null | cinder/brick/local_dev/lvm.py | Thingee/cinder | 721e657073d73d639619f839d935a463d32b59b5 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
LVM class for performing LVM operations.
"""
import math
import re
import itertools
from cinder.brick import exception
from cinder.brick import executor
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
LOG = logging.getLogger(__name__)
class LVM(executor.Executor):
"""LVM object to enable various LVM related operations."""
def __init__(self, vg_name, root_helper, create_vg=False,
physical_volumes=None, lvm_type='default',
executor=putils.execute):
"""Initialize the LVM object.
The LVM object is based on an LVM VolumeGroup, one instantiation
for each VolumeGroup you have/use.
:param vg_name: Name of existing VG or VG to create
:param root_helper: Execution root_helper method to use
:param create_vg: Indicates the VG doesn't exist
and we want to create it
:param physical_volumes: List of PVs to build VG on
:param lvm_type: VG and Volume type (default, or thin)
:param executor: Execute method to use, None uses common/processutils
"""
super(LVM, self).__init__(execute=executor, root_helper=root_helper)
self.vg_name = vg_name
self.pv_list = []
self.lv_list = []
self.vg_size = 0.0
self.vg_free_space = 0.0
self.vg_lv_count = 0
self.vg_uuid = None
self.vg_thin_pool = None
self.vg_thin_pool_size = 0.0
self.vg_thin_pool_free_space = 0.0
self._supports_snapshot_lv_activation = None
self._supports_lvchange_ignoreskipactivation = None
if create_vg and physical_volumes is not None:
self.pv_list = physical_volumes
try:
self._create_vg(physical_volumes)
except putils.ProcessExecutionError as err:
LOG.exception(_('Error creating Volume Group'))
LOG.error(_('Cmd :%s') % err.cmd)
LOG.error(_('StdOut :%s') % err.stdout)
LOG.error(_('StdErr :%s') % err.stderr)
raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name)
if self._vg_exists() is False:
LOG.error(_('Unable to locate Volume Group %s') % vg_name)
raise exception.VolumeGroupNotFound(vg_name=vg_name)
# NOTE: we assume that the VG has been activated outside of Cinder
if lvm_type == 'thin':
pool_name = "%s-pool" % self.vg_name
if self.get_volume(pool_name) is None:
self.create_thin_pool(pool_name)
else:
self.vg_thin_pool = pool_name
self.activate_lv(self.vg_thin_pool)
self.pv_list = self.get_all_physical_volumes(root_helper, vg_name)
def _vg_exists(self):
"""Simple check to see if VG exists.
:returns: True if vg specified in object exists, else False
"""
exists = False
(out, err) = self._execute(
'env', 'LC_ALL=C', 'vgs', '--noheadings', '-o', 'name',
self.vg_name, root_helper=self._root_helper, run_as_root=True)
if out is not None:
volume_groups = out.split()
if self.vg_name in volume_groups:
exists = True
return exists
def _create_vg(self, pv_list):
cmd = ['vgcreate', self.vg_name, ','.join(pv_list)]
self._execute(*cmd, root_helper=self._root_helper, run_as_root=True)
def _get_vg_uuid(self):
(out, err) = self._execute('env', 'LC_ALL=C', 'vgs', '--noheadings',
'-o uuid', self.vg_name)
if out is not None:
return out.split()
else:
return []
def _get_thin_pool_free_space(self, vg_name, thin_pool_name):
"""Returns available thin pool free space.
:param vg_name: the vg where the pool is placed
:param thin_pool_name: the thin pool to gather info for
:returns: Free space in GB (float), calculated using data_percent
"""
cmd = ['env', 'LC_ALL=C', 'lvs', '--noheadings', '--unit=g',
'-o', 'size,data_percent', '--separator', ':', '--nosuffix']
# NOTE(gfidente): data_percent only applies to some types of LV so we
# make sure to append the actual thin pool name
cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name))
free_space = 0.0
try:
(out, err) = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
if out is not None:
out = out.strip()
data = out.split(':')
pool_size = float(data[0])
data_percent = float(data[1])
consumed_space = pool_size / 100 * data_percent
free_space = pool_size - consumed_space
free_space = round(free_space, 2)
except putils.ProcessExecutionError as err:
LOG.exception(_('Error querying thin pool about data_percent'))
LOG.error(_('Cmd :%s') % err.cmd)
LOG.error(_('StdOut :%s') % err.stdout)
LOG.error(_('StdErr :%s') % err.stderr)
return free_space
@staticmethod
def get_lvm_version(root_helper):
"""Static method to get LVM version from system.
:param root_helper: root_helper to use for execute
:returns: version 3-tuple
"""
cmd = ['env', 'LC_ALL=C', 'vgs', '--version']
(out, err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
lines = out.split('\n')
for line in lines:
if 'LVM version' in line:
version_list = line.split()
# NOTE(gfidente): version is formatted as follows:
# major.minor.patchlevel(library API version)[-customisation]
version = version_list[2]
version_filter = r"(\d+)\.(\d+)\.(\d+).*"
r = re.search(version_filter, version)
version_tuple = tuple(map(int, r.group(1, 2, 3)))
return version_tuple
@staticmethod
def supports_thin_provisioning(root_helper):
"""Static method to check for thin LVM support on a system.
:param root_helper: root_helper to use for execute
:returns: True if supported, False otherwise
"""
return LVM.get_lvm_version(root_helper) >= (2, 2, 95)
@property
def supports_snapshot_lv_activation(self):
"""Property indicating whether snap activation changes are supported.
Check for LVM version >= 2.02.91.
(LVM2 git: e8a40f6 Allow to activate snapshot)
:returns: True/False indicating support
"""
if self._supports_snapshot_lv_activation is not None:
return self._supports_snapshot_lv_activation
self._supports_snapshot_lv_activation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 91))
return self._supports_snapshot_lv_activation
@property
def supports_lvchange_ignoreskipactivation(self):
"""Property indicating whether lvchange can ignore skip activation.
Check for LVM version >= 2.02.99.
(LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange)
"""
if self._supports_lvchange_ignoreskipactivation is not None:
return self._supports_lvchange_ignoreskipactivation
self._supports_lvchange_ignoreskipactivation = (
self.get_lvm_version(self._root_helper) >= (2, 2, 99))
return self._supports_lvchange_ignoreskipactivation
@staticmethod
def get_all_volumes(root_helper, vg_name=None):
"""Static method to get all LV's on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with LV info
"""
cmd = ['env', 'LC_ALL=C', 'lvs', '--noheadings', '--unit=g',
'-o', 'vg_name,name,size', '--nosuffix']
if vg_name is not None:
cmd.append(vg_name)
(out, err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
lv_list = []
if out is not None:
volumes = out.split()
for vg, name, size in itertools.izip(*[iter(volumes)] * 3):
lv_list.append({"vg": vg, "name": name, "size": size})
return lv_list
def get_volumes(self):
"""Get all LV's associated with this instantiation (VG).
:returns: List of Dictionaries with LV info
"""
self.lv_list = self.get_all_volumes(self._root_helper, self.vg_name)
return self.lv_list
def get_volume(self, name):
"""Get reference object of volume specified by name.
:returns: dict representation of Logical Volume if exists
"""
ref_list = self.get_volumes()
for r in ref_list:
if r['name'] == name:
return r
@staticmethod
def get_all_physical_volumes(root_helper, vg_name=None):
"""Static method to get all PVs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with PV info
"""
cmd = ['env', 'LC_ALL=C', 'pvs', '--noheadings',
'--unit=g',
'-o', 'vg_name,name,size,free',
'--separator', ':',
'--nosuffix']
(out, err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
pvs = out.split()
if vg_name is not None:
pvs = [pv for pv in pvs if vg_name == pv.split(':')[0]]
pv_list = []
for pv in pvs:
fields = pv.split(':')
pv_list.append({'vg': fields[0],
'name': fields[1],
'size': float(fields[2]),
'available': float(fields[3])})
return pv_list
def get_physical_volumes(self):
"""Get all PVs associated with this instantiation (VG).
:returns: List of Dictionaries with PV info
"""
self.pv_list = self.get_all_physical_volumes(self._root_helper,
self.vg_name)
return self.pv_list
@staticmethod
def get_all_volume_groups(root_helper, vg_name=None):
"""Static method to get all VGs on a system.
:param root_helper: root_helper to use for execute
:param vg_name: optional, gathers info for only the specified VG
:returns: List of Dictionaries with VG info
"""
cmd = ['env', 'LC_ALL=C', 'vgs', '--noheadings', '--unit=g',
'-o', 'name,size,free,lv_count,uuid', '--separator', ':',
'--nosuffix']
if vg_name is not None:
cmd.append(vg_name)
(out, err) = putils.execute(*cmd,
root_helper=root_helper,
run_as_root=True)
vg_list = []
if out is not None:
vgs = out.split()
for vg in vgs:
fields = vg.split(':')
vg_list.append({'name': fields[0],
'size': float(fields[1]),
'available': float(fields[2]),
'lv_count': int(fields[3]),
'uuid': fields[4]})
return vg_list
def update_volume_group_info(self):
"""Update VG info for this instantiation.
Used to update member fields of object and
provide a dict of info for caller.
:returns: Dictionaries of VG info
"""
vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name)
if len(vg_list) != 1:
LOG.error(_('Unable to find VG: %s') % self.vg_name)
raise exception.VolumeGroupNotFound(vg_name=self.vg_name)
self.vg_size = float(vg_list[0]['size'])
self.vg_free_space = float(vg_list[0]['available'])
self.vg_lv_count = int(vg_list[0]['lv_count'])
self.vg_uuid = vg_list[0]['uuid']
if self.vg_thin_pool is not None:
for lv in self.get_all_volumes(self._root_helper, self.vg_name):
if lv['name'] == self.vg_thin_pool:
self.vg_thin_pool_size = lv['size']
tpfs = self._get_thin_pool_free_space(self.vg_name,
self.vg_thin_pool)
self.vg_thin_pool_free_space = tpfs
def _calculate_thin_pool_size(self):
"""Calculates the correct size for a thin pool.
Ideally we would use 100% of the containing volume group and be done.
But the 100%VG notation to lvcreate is not implemented and thus cannot
be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347
Further, some amount of free space must remain in the volume group for
metadata for the contained logical volumes. The exact amount depends
on how much volume sharing you expect.
:returns: An lvcreate-ready string for the number of calculated bytes.
"""
# make sure volume group information is current
self.update_volume_group_info()
# leave 5% free for metadata
return "%sg" % (self.vg_free_space * 0.95)
def create_thin_pool(self, name=None, size_str=None):
"""Creates a thin provisioning pool for this VG.
The syntax here is slightly different than the default
lvcreate -T, so we'll just write a custom cmd here
and do it.
:param name: Name to use for pool, default is "<vg-name>-pool"
:param size_str: Size to allocate for pool, default is entire VG
:returns: The size string passed to the lvcreate command
"""
if not self.supports_thin_provisioning(self._root_helper):
LOG.error(_('Requested to setup thin provisioning, '
'however current LVM version does not '
'support it.'))
return None
if name is None:
name = '%s-pool' % self.vg_name
vg_pool_name = '%s/%s' % (self.vg_name, name)
if not size_str:
size_str = self._calculate_thin_pool_size()
cmd = ['lvcreate', '-T', '-L', size_str, vg_pool_name]
LOG.debug(_('Created thin pool \'%(pool)s\' with size %(size)s of '
'total %(free)sg') % {'pool': vg_pool_name,
'size': size_str,
'free': self.vg_free_space})
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
self.vg_thin_pool = name
return size_str
def create_volume(self, name, size_str, lv_type='default', mirror_count=0):
"""Creates a logical volume on the object's VG.
:param name: Name to use when creating Logical Volume
:param size_str: Size to use when creating Logical Volume
:param lv_type: Type of Volume (default or thin)
:param mirror_count: Use LVM mirroring with specified count
"""
if lv_type == 'thin':
pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool)
cmd = ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path]
else:
cmd = ['lvcreate', '-n', name, self.vg_name, '-L', size_str]
if mirror_count > 0:
cmd.extend(['-m', mirror_count, '--nosync',
'--mirrorlog', 'mirrored'])
terras = int(size_str[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd.extend(['-R', str(rsize)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_('Error creating Volume'))
LOG.error(_('Cmd :%s') % err.cmd)
LOG.error(_('StdOut :%s') % err.stdout)
LOG.error(_('StdErr :%s') % err.stderr)
raise
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
"""Creates a snapshot of a logical volume.
:param name: Name to assign to new snapshot
:param source_lv_name: Name of Logical Volume to snapshot
:param lv_type: Type of LV (default or thin)
"""
source_lvref = self.get_volume(source_lv_name)
if source_lvref is None:
LOG.error(_("Trying to create snapshot by non-existent LV: %s")
% source_lv_name)
raise exception.VolumeDeviceNotFound(device=source_lv_name)
cmd = ['lvcreate', '--name', name,
'--snapshot', '%s/%s' % (self.vg_name, source_lv_name)]
if lv_type != 'thin':
size = source_lvref['size']
cmd.extend(['-L', '%sg' % (size)])
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_('Error creating snapshot'))
LOG.error(_('Cmd :%s') % err.cmd)
LOG.error(_('StdOut :%s') % err.stdout)
LOG.error(_('StdErr :%s') % err.stderr)
raise
def _mangle_lv_name(self, name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not name.startswith('snapshot'):
return name
return '_' + name
def activate_lv(self, name, is_snapshot=False):
"""Ensure that logical volume/snapshot logical volume is activated.
:param name: Name of LV to activate
:raises: putils.ProcessExecutionError
"""
# This is a no-op if requested for a snapshot on a version
# of LVM that doesn't support snapshot activation.
# (Assume snapshot LV is always active.)
if is_snapshot and not self.supports_snapshot_lv_activation:
return
lv_path = self.vg_name + '/' + self._mangle_lv_name(name)
# Must pass --yes to activate both the snap LV and its origin LV.
# Otherwise lvchange asks if you would like to do this interactively,
# and fails.
cmd = ['lvchange', '-a', 'y', '--yes']
if self.supports_lvchange_ignoreskipactivation:
cmd.append('-K')
cmd.append(lv_path)
try:
self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_('Error activating LV'))
LOG.error(_('Cmd :%s') % err.cmd)
LOG.error(_('StdOut :%s') % err.stdout)
LOG.error(_('StdErr :%s') % err.stderr)
raise
def delete(self, name):
"""Delete logical volume or snapshot.
:param name: Name of LV to delete
"""
def run_udevadm_settle():
self._execute('udevadm', 'settle',
root_helper=self._root_helper, run_as_root=True,
check_exit_code=False)
try:
need_force_remove = False
# LV removal seems to be a race with udev in
# some cases (see LP #1270192), so we do it in several steps:
# - Deactivate the LV/Snapshot, which triggers udev events
# - Wait for udev to finish its job with udevadmn settle
# - Remove the LV
try:
self._execute('lvchange', '-y', '-an',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
except putils.ProcessExecutionError as err:
mesg = (_('Error during lvchange -an: CMD: %(command)s, '
'RESPONSE: %(response)s') %
{'command': err.cmd, 'response': err.stderr})
LOG.debug(mesg)
need_force_remove = True
run_udevadm_settle()
cmd = ['lvremove', ]
# if deactivation failed, use the --force, lvm!
if need_force_remove:
cmd.append('-f')
cmd.append('%s/%s' % (self.vg_name, name))
self._execute(*cmd,
root_helper=self._root_helper, run_as_root=True)
except putils.ProcessExecutionError as err:
mesg = (_('Error reported running lvremove: CMD: %(command)s, '
'RESPONSE: %(response)s') %
{'command': err.cmd, 'response': err.stderr})
LOG.debug(mesg)
LOG.debug(_('Attempting udev settle and retry of lvremove...'))
run_udevadm_settle()
self._execute('lvremove',
'-f',
'%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
def revert(self, snapshot_name):
"""Revert an LV from snapshot.
:param snapshot_name: Name of snapshot to revert
"""
self._execute('lvconvert', '--merge',
snapshot_name, root_helper=self._root_helper,
run_as_root=True)
def lv_has_snapshot(self, name):
out, err = self._execute(
'env', 'LC_ALL=C', 'lvdisplay', '--noheading',
'-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name),
root_helper=self._root_helper, run_as_root=True)
if out:
out = out.strip()
if (out[0] == 'o') or (out[0] == 'O'):
return True
return False
def extend_volume(self, lv_name, new_size):
"""Extend the size of an existing volume."""
try:
self._execute('lvextend', '-L', new_size,
'%s/%s' % (self.vg_name, lv_name),
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_('Error extending Volume'))
LOG.error(_('Cmd :%s') % err.cmd)
LOG.error(_('StdOut :%s') % err.stdout)
LOG.error(_('StdErr :%s') % err.stderr)
raise
def vg_mirror_free_space(self, mirror_count):
free_capacity = 0.0
disks = []
for pv in self.pv_list:
disks.append(float(pv['available']))
while True:
disks = sorted([a for a in disks if a > 0.0], reverse=True)
if len(disks) <= mirror_count:
break
# consume the smallest disk
disk = disks[-1]
disks = disks[:-1]
# match extents for each mirror on the largest disks
for index in list(range(mirror_count)):
disks[index] -= disk
free_capacity += disk
return free_capacity
def vg_mirror_size(self, mirror_count):
return (self.vg_free_space / (mirror_count + 1))
def rename_volume(self, lv_name, new_name):
"""Change the name of an existing volume."""
try:
self._execute('lvrename', self.vg_name, lv_name, new_name,
root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError as err:
LOG.exception(_('Error renaming logical volume'))
LOG.error(_('Cmd :%s') % err.cmd)
LOG.error(_('StdOut :%s') % err.stdout)
LOG.error(_('StdErr :%s') % err.stderr)
raise
| 36.837956 | 79 | 0.560514 |
d67ba5f5b2036a87965a6eba28ac0cae75e397b2 | 6,643 | py | Python | lhotse/dataset/source_separation.py | stachu86/lhotse | d5e78154db2d4d52f15aaadc8882f76eb5b77640 | [
"Apache-2.0"
] | 353 | 2020-10-31T10:38:51.000Z | 2022-03-30T05:22:52.000Z | lhotse/dataset/source_separation.py | stachu86/lhotse | d5e78154db2d4d52f15aaadc8882f76eb5b77640 | [
"Apache-2.0"
] | 353 | 2020-10-27T23:25:12.000Z | 2022-03-31T22:16:05.000Z | lhotse/dataset/source_separation.py | stachu86/lhotse | d5e78154db2d4d52f15aaadc8882f76eb5b77640 | [
"Apache-2.0"
] | 66 | 2020-11-01T06:08:08.000Z | 2022-03-29T02:03:07.000Z | import warnings
from typing import Dict, List, Optional, Tuple
import torch
from torch.utils.data import Dataset
from lhotse import validate
from lhotse.cut import Cut, CutSet, MonoCut
from lhotse.utils import EPSILON
class SourceSeparationDataset(Dataset):
"""
.. warning: Speech separation datasets are not yet updated to use the new Lhotse's sampling mechanism.
An abstract base class, implementing PyTorch Dataset for the source separation task.
It's created from two CutSets - one provides the audio cuts for the sources, and the other one the audio cuts for
the signal mix. When queried for data samples, it returns a dict of:
.. code-block::
{
'sources': (N x T x F) tensor,
'mixture': (T x F) tensor,
'real_mask': (N x T x F) tensor,
'binary_mask': (T x F) tensor
}
"""
def __init__(
self,
sources_set: CutSet,
mixtures_set: CutSet,
):
super().__init__()
warnings.warn(
"Speech separation datasets are not yet updated to use the new Lhotse's sampling mechanism."
)
self.sources_set = sources_set
self.mixtures_set = mixtures_set
self.cut_ids = list(self.mixtures_set.ids)
def _obtain_mixture(self, cut_id: str) -> Tuple[Cut, List[MonoCut]]:
raise NotImplementedError(
"You are using SpeechSeparationDataset, which is an abstract base class; instead, "
"use one of its derived classes that specify whether the mix is pre-computed or "
"done dynamically (on-the-fly)."
)
def validate(self):
validate(self.sources_set)
validate(self.mixtures_set)
# Make sure it's possible to iterate through the whole dataset and resolve the sources for each mixture
for cut in self.mixtures_set.mixed_cuts.values():
_, source_cuts = self._obtain_mixture(cut.id)
assert len(source_cuts) > 1
def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
cut_id = self.cut_ids[idx]
mixture_cut, source_cuts = self._obtain_mixture(cut_id=cut_id)
mixture = torch.from_numpy(mixture_cut.load_features())
sources = torch.stack(
[
torch.from_numpy(source_cut.load_features())
for source_cut in source_cuts
],
dim=0,
)
# Compute the masks given the source features
sources_exp = sources.exp()
real_mask = sources_exp / (sources_exp.sum(0, keepdim=True) + EPSILON)
# Get the src idx having the maximum energy
binary_mask = real_mask.argmax(0)
return {
"sources": sources,
"mixture": mixture,
"real_mask": real_mask,
"binary_mask": binary_mask,
}
def __len__(self):
return len(self.cut_ids)
class DynamicallyMixedSourceSeparationDataset(SourceSeparationDataset):
"""
A PyTorch Dataset for the source separation task.
It's created from a number of CutSets:
- ``sources_set``: provides the audio cuts for the sources that (the targets of source separation),
- ``mixtures_set``: provides the audio cuts for the signal mix (the input of source separation),
- ``nonsources_set``: *(optional)* provides the audio cuts for other signals that are in the mix,
but are not the targets of source separation. Useful for adding noise.
When queried for data samples, it returns a dict of:
.. code-block::
{
'sources': (N x T x F) tensor,
'mixture': (T x F) tensor,
'real_mask': (N x T x F) tensor,
'binary_mask': (T x F) tensor
}
This Dataset performs on-the-fly feature-domain mixing of the sources. It expects the mixtures_set to contain
MixedCuts, so that it knows which Cuts should be mixed together.
"""
def __init__(
self,
sources_set: CutSet,
mixtures_set: CutSet,
nonsources_set: Optional[CutSet] = None,
):
super().__init__(sources_set=sources_set, mixtures_set=mixtures_set)
self.nonsources_set = nonsources_set
def validate(self):
super().validate()
validate(self.nonsources_set)
def _obtain_mixture(self, cut_id: str) -> Tuple[Cut, List[MonoCut]]:
mixture_cut = self.mixtures_set.mixed_cuts[cut_id]
source_cuts = [
track.cut
for track in mixture_cut.tracks
if track.cut.id
in self.sources_set # tracks will be missing in the sources set when they are noise
]
return mixture_cut, source_cuts
class PreMixedSourceSeparationDataset(SourceSeparationDataset):
"""
A PyTorch Dataset for the source separation task.
It's created from two CutSets - one provides the audio cuts for the sources, and the other one the audio cuts for
the signal mix. When queried for data samples, it returns a dict of:
.. code-block::
{
'sources': (N x T x F) tensor,
'mixture': (T x F) tensor,
'real_mask': (N x T x F) tensor,
'binary_mask': (T x F) tensor
}
It expects both CutSets to return regular Cuts, meaning that the signals were mixed in the time domain.
In contrast to DynamicallyMixedSourceSeparationDataset, no on-the-fly feature-domain-mixing is performed.
"""
def __init__(
self,
sources_set: CutSet,
mixtures_set: CutSet,
):
# The following code assumes that the speech separation dataset is created from
# cuts that span the whole recordings (i.e. one recording == one utterance), so it is safe to assume that
# matching them by recording_id will yield correct mixture <=> sources mapping.
# If we want to support datasets where cuts are parts of recordings (e.g. a single utterance in a
# 15 minute conversation), we will need to provide an external mapping here.
self.mixture_to_source = {
# We expect mixture and source cuts to share the same recording_ids
cut.id: [c.id for c in sources_set if c.recording_id == cut.recording_id]
for cut in mixtures_set
}
super().__init__(sources_set=sources_set, mixtures_set=mixtures_set)
def _obtain_mixture(self, cut_id: str) -> Tuple[Cut, List[MonoCut]]:
mixture_cut = self.mixtures_set.cuts[cut_id]
source_cuts = [
self.sources_set.cuts[id] for id in self.mixture_to_source[mixture_cut.id]
]
return mixture_cut, source_cuts
| 37.111732 | 117 | 0.643384 |
460d2e52b2fb51da4b0162652003424975714004 | 66 | py | Python | what_is_the_name_main_in_python/demo2.py | NightmareQAQ/python-notes | 4e766be06073a495ff9654f0dd8c0bb03310c559 | [
"MIT"
] | 106 | 2017-05-02T10:25:50.000Z | 2022-03-23T14:57:28.000Z | what_is_the_name_main_in_python/demo2.py | NightmareQAQ/python-notes | 4e766be06073a495ff9654f0dd8c0bb03310c559 | [
"MIT"
] | 2 | 2021-01-14T15:07:15.000Z | 2021-12-21T07:18:05.000Z | what_is_the_name_main_in_python/demo2.py | NightmareQAQ/python-notes | 4e766be06073a495ff9654f0dd8c0bb03310c559 | [
"MIT"
] | 42 | 2017-07-31T07:07:38.000Z | 2021-12-26T09:36:55.000Z | from demo1 import a1_func
print('demo2.py is called')
a1_func()
| 11 | 27 | 0.742424 |
34d8707282f0434b5fad3873976dc09a092489d2 | 5,230 | py | Python | tests/dolfinx/test_aaa.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | 1 | 2021-04-25T06:28:01.000Z | 2021-04-25T06:28:01.000Z | tests/dolfinx/test_aaa.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | null | null | null | tests/dolfinx/test_aaa.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | null | null | null | # Copyright (C) 2014 Garth N. Wells
#
# This file is part of DOLFIN (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# This demo solves the equations of static linear elasticity for a
# pulley subjected to centripetal accelerations. The solver uses
# smoothed aggregation algebraic multigrid.
from contextlib import ExitStack
import numpy as np
from petsc4py import PETSc
import dolfinx
from dolfinx import (MPI, UnitCubeMesh,
DirichletBC, Function, VectorFunctionSpace, cpp)
from dolfinx.cpp.mesh import CellType
from dolfinx.fem import apply_lifting, assemble_matrix, assemble_vector, set_bc
#from dolfin.io import XDMFFile
from dolfinx.la import VectorSpaceBasis
from ufl import (Identity, SpatialCoordinate, TestFunction, TrialFunction,
as_vector, dx, grad, inner, sym, tr)
def build_nullspace(V):
"""Function to build null space for 3D elasticity"""
# Create list of vectors for null space
index_map = V.dofmap.index_map
nullspace_basis = [cpp.la.create_vector(index_map) for i in range(6)]
with ExitStack() as stack:
vec_local = [stack.enter_context(x.localForm()) for x in nullspace_basis]
basis = [np.asarray(x) for x in vec_local]
# Build translational null space basis
V.sub(0).dofmap.set(basis[0], 1.0)
V.sub(1).dofmap.set(basis[1], 1.0)
V.sub(2).dofmap.set(basis[2], 1.0)
# Build rotational null space basis
V.sub(0).set_x(basis[3], -1.0, 1)
V.sub(1).set_x(basis[3], 1.0, 0)
V.sub(0).set_x(basis[4], 1.0, 2)
V.sub(2).set_x(basis[4], -1.0, 0)
V.sub(2).set_x(basis[5], 1.0, 1)
V.sub(1).set_x(basis[5], -1.0, 2)
# Create vector space basis and orthogonalize
basis = VectorSpaceBasis(nullspace_basis)
basis.orthonormalize()
_x = [basis[i] for i in range(6)]
nsp = PETSc.NullSpace()
nsp.create(_x)
return nsp
# Load mesh from file
# mesh = Mesh(MPI.comm_world)
# XDMFFile(MPI.comm_world, "../pulley.xdmf").read(mesh)
mesh = UnitCubeMesh(MPI.comm_world, 3, 3, 3)
#mesh = BoxMesh(
# MPI.comm_world, [np.array([0.0, 0.0, 0.0]),
# np.array([2.0, 1.0, 1.0])], [12, 12, 12],
# CellType.tetrahedron, dolfin.cpp.mesh.GhostMode.none)
cmap = dolfinx.fem.create_coordinate_map(mesh.ufl_domain())
mesh.geometry.coord_mapping = cmap
def boundary(x):
return np.logical_or(x[0] < 10.0 * np.finfo(float).eps,
x[0] > 1.0 - 10.0 * np.finfo(float).eps)
# Rotation rate and mass density
omega = 300.0
rho = 10.0
# Loading due to centripetal acceleration (rho*omega^2*x_i)
x = SpatialCoordinate(mesh)
f = as_vector((rho * omega**2 * x[0], rho * omega**2 * x[1], 0.0))
# Elasticity parameters
E = 1.0e9
nu = 0.0
mu = E / (2.0 * (1.0 + nu))
lmbda = E * nu / ((1.0 + nu) * (1.0 - 2.0 * nu))
def sigma(v):
return 2.0 * mu * sym(grad(v)) + lmbda * tr(sym(grad(v))) * Identity(
len(v))
# Create function space
V = VectorFunctionSpace(mesh, ("Lagrange", 1))
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
a = inner(sigma(u), grad(v)) * dx
L = inner(f, v) * dx
u0 = Function(V)
with u0.vector.localForm() as bc_local:
bc_local.set(0.0)
# Set up boundary condition on inner surface
bc = DirichletBC(V, u0, boundary)
# Assemble system, applying boundary conditions and preserving symmetry)
A = assemble_matrix(a, [bc])
A.assemble()
b = assemble_vector(L)
apply_lifting(b, [a], [[bc]])
b.ghostUpdate(addv=PETSc.InsertMode.ADD, mode=PETSc.ScatterMode.REVERSE)
set_bc(b, [bc])
# Create solution function
u = Function(V)
# Create near null space basis (required for smoothed aggregation AMG).
null_space = build_nullspace(V)
# Attach near nullspace to matrix
A.setNearNullSpace(null_space)
# Set solver options
opts = PETSc.Options()
opts["ksp_type"] = "cg"
opts["ksp_rtol"] = 1.0e-12
opts["pc_type"] = "gamg"
# Use Chebyshev smoothing for multigrid
opts["mg_levels_ksp_type"] = "chebyshev"
opts["mg_levels_pc_type"] = "jacobi"
# Improve estimate of eigenvalues for Chebyshev smoothing
opts["mg_levels_esteig_ksp_type"] = "cg"
opts["mg_levels_ksp_chebyshev_esteig_steps"] = 20
# Create CG Krylov solver and turn convergence monitoring on
solver = PETSc.KSP().create(MPI.comm_world)
solver.setFromOptions()
# Set matrix operator
solver.setOperators(A)
# Compute solution
solver.setMonitor(lambda ksp, its, rnorm: print("Iteration: {}, rel. residual: {}".format(its, rnorm)))
solver.solve(b, u.vector)
#solver.view()
############################### Plot solution
from vedo.dolfin import plot
plot(u, mode="displaced mesh",
scalarbar=False,
axes=1,
bg='white',
viewup='z',
offscreen=1)
#################################################################################
from vedo import settings, screenshot
actor = settings.plotter_instance.actors[0]
solution = actor.scalars(0)
screenshot('elasticbeam.png')
print('ArrayNames', actor.getArrayNames())
print('min', 'mean', 'max, N:')
print(np.min(solution), np.mean(solution), np.max(solution), len(solution))
# Plot solution
# import matplotlib.pyplot as plt
# import dolfin.plotting
# dolfin.plotting.plot(u)
# plt.show() | 28.27027 | 103 | 0.668069 |
5f10f314220cca69e42e99b0d05e93a709b4c5f4 | 8,238 | py | Python | reports/configs/try_all_logs_dgin3_1/other_config.py | hengwei-chan/graph_network_demo | 542f2a59b1b9708abdc718d77db7111f3ba2df96 | [
"MIT"
] | 1 | 2021-10-18T03:44:53.000Z | 2021-10-18T03:44:53.000Z | reports/configs/try_all_logs_dgin3_1/other_config.py | hengwei-chan/graph_network_demo | 542f2a59b1b9708abdc718d77db7111f3ba2df96 | [
"MIT"
] | null | null | null | reports/configs/try_all_logs_dgin3_1/other_config.py | hengwei-chan/graph_network_demo | 542f2a59b1b9708abdc718d77db7111f3ba2df96 | [
"MIT"
] | 1 | 2022-02-22T08:32:01.000Z | 2022-02-22T08:32:01.000Z | from dataclasses import dataclass, field
from typing import List
import tensorflow as tf
from graph_networks.utilities import *
import logging
import os
ATOM_FEATURE_DIM = DGIN3_ATOM_FEATURE_DIM
EDGE_FEATURE_DIM = DGIN3_EDGE_FEATURE_DIM
@dataclass
class BasicModelConfig:
"""
Config for model1/2/3 run file.
General model parameters
"""
model_name: str = 'try_all_logs_dgin3_1' # without h_w in DGIN gin part - added h_v_0 instead
# whole train/eval split - no more double split within train data set
# random train/test split in get_data_sd - only change overall_seed
# CHANGES dgin3 10.02.2021:
# *added new bondFeaturesDGIN2 and atomFeaturesDGIN2; DGIN2_ATOM_FEATURE_DIM; DGIN2_EDGE_FEATURE_DIM
# *from project_path+'data/processed/lipo/pickled/train_frags3/' to project_path+'data/processed/lipo/pickled/test_frags3/'
# CHANGES dgin3 16.02.2021:
# *added new bondFeaturesDGIN3 and atomFeaturesDGIN3; DGIN3_ATOM_FEATURE_DIM; DGIN3_EDGE_FEATURE_DIM
# *from project_path+'data/processed/lipo/pickled/train_frags_dgin3/' to project_path+'data/processed/lipo/pickled/test_frags_dgin3/'
# CHANGES dgin4 16.02.2021:
# *added add_species bool in model1 config - previously not there; for dgin2 featurization adds the species type after the dgin
# encoding before logD prediction
# test_frags_dgin4 was added for species inclusion in model2 call()
batch_size: int =15
override_if_exists: bool = True
overall_seed: int = 2
# path to the project folder
project_path:str = "./"
retrain_model: bool = False
retrain_model_name: str = ''
retrain_model_epoch: str = ''
retrain_model_weights_dir: str = project_path+'reports/model_weights/'+retrain_model_name+'/epoch_'+retrain_model_epoch+'/checkp_'+retrain_model_epoch
train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin3_logd/'
test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin3_logd/'
combined_dataset: bool = True
add_train_data_dir: str = project_path+'data/processed/lipo/pickled/train_dgin3_logs/'
add_test_data_dir: str = project_path+'data/processed/lipo/pickled/test_dgin3_logs/'
test_model: bool = False
test_model_epoch: str = '1520'
# define the number or test runs for the CI.
# the mean and std of the RMSE and r^2 of the combined runs are taken as the output.
test_n_times: int = 2
# do you want to test the model with consensus mode?
# if yes, a defined ML model will be included in the consensus predictions during the testing.
consensus: bool = True
# include dropout during testing?
include_dropout: bool = False
test_model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/epoch_'+test_model_epoch+'/checkp_'+test_model_epoch
encode_hidden: bool = False
log_dir: str = project_path+'reports/logs/'+model_name+'.log'
verbosity_level = logging.INFO
plot_dir: str = project_path+'reports/figures/'+model_name+'/'
tensorboard_log_dir: str = project_path+'reports/tensorboard/'+model_name+'/'
config_log_dir: str = project_path+'reports/configs/'+model_name+'/'
model_weights_dir: str = project_path+'reports/model_weights/'+model_name+'/'
stats_log_dir: str = project_path+'reports/stats/'+model_name+'/'
@dataclass
class DGINConfig:
"""
Config for direcpted-mpnn class.
"""
dropout_aggregate_dmpnn: bool = False
layernorm_aggregate_dmpnn: bool = True
dropout_passing_dmpnn: bool = False
layernorm_passing_dmpnn: bool = True
dropout_aggregate_gin: bool = False
layernorm_aggregate_gin: bool = True
dropout_passing_gin: bool = False
layernorm_passing_gin: bool = True
gin_aggregate_bias: bool = False
dmpnn_passing_bias: bool = False
init_bias: bool = False
massge_iteration_dmpnn: int = 4
message_iterations_gin: int = 4
dropout_rate: float = 0.15
input_size: int = (ATOM_FEATURE_DIM+EDGE_FEATURE_DIM) # combination of node feature len (33) and edge feature len (12)
passing_hidden_size: int = 56 # this can be changed
input_size_gin: int = (ATOM_FEATURE_DIM+passing_hidden_size)
return_hv: bool = True # model3 parameter
@dataclass
class Model1Config:
"""
Config model1 class - no subclass configs are defined here.
"""
validation_split: float = 0.90
learning_rate: float = 0.004
clip_rate: float = 0.6
optimizer = tf.keras.optimizers.Adam(learning_rate)
lipo_loss_mse = tf.keras.losses.mse
lipo_loss_mae = tf.keras.losses.mae
logP_loss_mse = tf.keras.losses.mse
logS_loss_mse = tf.keras.losses.mse
mw_loss_mse = tf.keras.losses.mse
metric = tf.keras.losses.mae
epochs: int = 1600
safe_after_batch: int = 3
dropout_rate: float = 0.15 # the overall dropout rate of the readout functions
train_data_seed: int = 0
hidden_readout_1: int = 32
hidden_readout_2: int = 14
activation_func_readout = tf.nn.relu
include_logD: bool = True
include_logS: bool = True
include_logP: bool = True
include_mw: bool = False
include_rot_bond: bool = False
include_HBA: bool = False
include_HBD: bool = False
best_evaluation_threshold: float = 2.45 #was introduced on the 25.03.2021/
# define the individual thresholds. If one model is better, the corresponding
# model weights are being saved.
best_evaluation_threshold_logd: float = 1.85
best_evaluation_threshold_logp: float = 1.65
best_evaluation_threshold_logs: float = 2.15
# 2.45 for all_logs
# 0.70 logP
# 0.75 logD
# 1.00 logS
# 1.75 logSD
# 1.70 logSP
# 1.45 logDP
include_fragment_conv: bool = False # was introduced on the 4.12.2020
use_rmse: bool = True # uses RMSE instead of MSE for only lipo_loss
shuffle_inside: bool = True # reshuffles the train/valid test seach in each epoch (generalizes)
add_species: bool = False # 16.02 introduction; previously not there; for dgin3 adds the species type after the dgin encoding before logD prediction
@dataclass
class FrACConfig:
"""
Config fragment aggregation class - no subclass configs are defined here.
"""
input_size_gin: int = 28
layernorm_aggregate: bool = True
reduce_mean: bool = True # when false -> reduce_sum
@dataclass
class MLConfig:
"""
Configs for the ML algorithm
"""
# which algorithm do you want to use for the consensus?
# possibilities are: "SVM", "RF", "KNN" or "LR" - all are regression models!
# SVM: Support Vector Machine; RF: Random Forest, KNN: K-Nearest Neigbors; LR: Linear Regression;
algorithm: str = "SVM"
# which fingerprint to use - possibilities are: "ECFP" or "MACCS"
fp_types: str = "ECFP"
# If 'ECFP' fingerprint is used, define the number of bits - maximum is 2048!
n_bits: int = 2048
# If "ECFP" fingerprint is used, define the radius
radius: int = 4
# define if descriptors should be included into the non-GNN molecular representation
include_descriptors: bool = True
# define if the descriptors should be standardizedby scaling and centering (Sklearn)
standardize: bool = True
@dataclass
class Config():
"""
Overall config class for model2 and run file.
Includes all submodels config
"""
basic_model_config: BasicModelConfig
model1_config: Model1Config
d_gin_config: DGINConfig
frag_acc_config: FrACConfig
ml_config: MLConfig
model: str = 'model10' | 41.396985 | 169 | 0.661326 |
53dba552ae334e64fb5a7ccb6989011420d8c439 | 171 | py | Python | config/views.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 189 | 2020-03-17T17:18:58.000Z | 2022-02-22T09:49:45.000Z | config/views.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 598 | 2020-03-19T21:22:09.000Z | 2022-03-30T05:08:37.000Z | config/views.py | gigincg/care | 07be6a7982b5c46a854e3435a52662f32800c8ae | [
"MIT"
] | 159 | 2020-03-19T18:45:56.000Z | 2022-03-17T13:23:12.000Z | import logging
from django.views.generic import TemplateView
from django.shortcuts import render
def home_view(request):
return render(request, "pages/home.html")
| 17.1 | 45 | 0.789474 |
7b36a42b0045cf42ecbcaa0941f578cee0e73111 | 1,614 | py | Python | faasmcli/faasmcli/tasks/compile.py | Galaxy-sz/faasm | f5fa8d9699151704a6f032f1bbd30252e9050bd2 | [
"Apache-2.0"
] | 1 | 2021-03-03T09:54:21.000Z | 2021-03-03T09:54:21.000Z | faasmcli/faasmcli/tasks/compile.py | Galaxy-sz/faasm | f5fa8d9699151704a6f032f1bbd30252e9050bd2 | [
"Apache-2.0"
] | null | null | null | faasmcli/faasmcli/tasks/compile.py | Galaxy-sz/faasm | f5fa8d9699151704a6f032f1bbd30252e9050bd2 | [
"Apache-2.0"
] | null | null | null | from os import listdir
from os.path import join, splitext
from invoke import task
from faasmcli.util.env import FUNC_DIR, PROJ_ROOT
from faasmcli.util.compile import wasm_cmake, wasm_copy_upload
FUNC_BUILD_DIR = join(PROJ_ROOT, "build", "func")
def _copy_built_function(user, func):
src_file = join(FUNC_BUILD_DIR, user, ".".join([func, "wasm"]))
wasm_copy_upload(user, func, src_file)
@task(default=True, name="compile")
def compile(ctx, user, func, clean=False, debug=False):
"""
Compile a function
"""
# Build the function (gets written to the build dir)
# Will fail if compilation fails
target = func
wasm_cmake(FUNC_DIR, FUNC_BUILD_DIR, target, clean, debug)
_copy_built_function(user, func)
@task
def user(ctx, user, clean=False, debug=False):
"""
Compile all functions belonging to the given user
"""
# Build all funcs for this user (will fail if any builds fail)
target = "{}_all_funcs".format(user)
wasm_cmake(FUNC_DIR, FUNC_BUILD_DIR, target, clean, debug)
# Work out all the functions for this user (that we assume will have been
# built)
for func_file in listdir(join(FUNC_BUILD_DIR, user)):
name, ext = splitext(func_file)
if ext != ".wasm":
continue
_copy_built_function(user, name)
@task
def local(ctx, clean=False, debug=False):
"""
Compile all functions used in the tests
"""
user(ctx, "demo", clean, debug)
user(ctx, "errors", clean, debug)
user(ctx, "mpi", clean, debug)
user(ctx, "omp", clean, debug)
user(ctx, "python", clean, debug)
| 26.9 | 77 | 0.675341 |
d9be63a9d8efefd4dbc041183526ceb0e3eb8b57 | 5,475 | py | Python | tests/hwsim/test_wpas_config.py | fuyajun1983cn/wpa_supplicant | cb3034cad5375dbc382aa14d3ff34d1e201cf877 | [
"Unlicense"
] | null | null | null | tests/hwsim/test_wpas_config.py | fuyajun1983cn/wpa_supplicant | cb3034cad5375dbc382aa14d3ff34d1e201cf877 | [
"Unlicense"
] | null | null | null | tests/hwsim/test_wpas_config.py | fuyajun1983cn/wpa_supplicant | cb3034cad5375dbc382aa14d3ff34d1e201cf877 | [
"Unlicense"
] | null | null | null | # wpa_supplicant config file
# Copyright (c) 2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import os
from wpasupplicant import WpaSupplicant
def check_config(config):
with open(config, "r") as f:
data = f.read()
if "update_config=1\n" not in data:
raise Exception("Missing update_config")
if "device_name=name\n" not in data:
raise Exception("Missing device_name")
if "eapol_version=2\n" not in data:
raise Exception("Missing eapol_version")
if "ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=" not in data:
raise Exception("Missing ctrl_interface")
if "blob-base64-foo={" not in data:
raise Exception("Missing blob")
if "cred={" not in data:
raise Exception("Missing cred")
if "network={" not in data:
raise Exception("Missing network")
if "wps_priority=5\n" not in data:
raise Exception("Missing wps_priority")
return data
def test_wpas_config_file(dev):
"""wpa_supplicant config file parsing/writing"""
config = "/tmp/test_wpas_config_file.conf"
if os.path.exists(config):
os.remove(config)
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
try:
wpas.interface_add("wlan5", config=config)
initialized = True
except:
initialized = False
if initialized:
raise Exception("Missing config file did not result in an error")
try:
with open(config, "w") as f:
f.write("update_config=1 \t\r\n")
f.write("# foo\n")
f.write("\n")
f.write(" \t\reapol_version=2")
for i in range(0, 100):
f.write(" ")
f.write("foo\n")
f.write("device_name=name#foo\n")
wpas.interface_add("wlan5", config=config)
wpas.request("SET wps_priority 5")
id = wpas.add_network()
wpas.set_network_quoted(id, "ssid", "foo")
wpas.set_network_quoted(id, "psk", "12345678")
wpas.set_network(id, "bssid", "00:11:22:33:44:55")
wpas.set_network(id, "proto", "RSN")
wpas.set_network(id, "key_mgmt", "WPA-PSK-SHA256")
wpas.set_network(id, "pairwise", "CCMP")
wpas.set_network(id, "group", "CCMP")
wpas.set_network(id, "auth_alg", "OPEN")
id = wpas.add_cred()
wpas.set_cred(id, "priority", "3")
wpas.set_cred(id, "sp_priority", "6")
wpas.set_cred(id, "update_identifier", "4")
wpas.set_cred(id, "ocsp", "1")
wpas.set_cred(id, "eap", "TTLS")
wpas.set_cred(id, "req_conn_capab", "6:1234")
wpas.set_cred_quoted(id, "realm", "example.com")
wpas.set_cred_quoted(id, "provisioning_sp", "example.com")
wpas.set_cred_quoted(id, "domain", "example.com")
wpas.set_cred_quoted(id, "domain_suffix_match", "example.com")
wpas.set_cred(id, "roaming_consortium", "112233")
wpas.set_cred(id, "required_roaming_consortium", "112233")
wpas.set_cred_quoted(id, "roaming_partner",
"roaming.example.net,1,127,*")
wpas.set_cred_quoted(id, "ca_cert", "/tmp/ca.pem")
wpas.set_cred_quoted(id, "username", "user")
wpas.set_cred_quoted(id, "password", "secret")
ev = wpas.wait_event(["CRED-MODIFIED 0 password"])
wpas.request("SET blob foo 12345678")
if "OK" not in wpas.request("SAVE_CONFIG"):
raise Exception("Failed to save configuration file")
if "OK" not in wpas.global_request("SAVE_CONFIG"):
raise Exception("Failed to save configuration file")
wpas.interface_remove("wlan5")
data1 = check_config(config)
wpas.interface_add("wlan5", config=config)
if len(wpas.list_networks()) != 1:
raise Exception("Unexpected number of networks")
if len(wpas.request("LIST_CREDS").splitlines()) != 2:
raise Exception("Unexpected number of credentials")
if "OK" not in wpas.request("SAVE_CONFIG"):
raise Exception("Failed to save configuration file")
data2 = check_config(config)
if data1 != data2:
logger.debug(data1)
logger.debug(data2)
raise Exception("Unexpected configuration change")
wpas.request("SET update_config 0")
wpas.global_request("SET update_config 0")
if "OK" in wpas.request("SAVE_CONFIG"):
raise Exception("SAVE_CONFIG succeeded unexpectedly")
if "OK" in wpas.global_request("SAVE_CONFIG"):
raise Exception("SAVE_CONFIG (global) succeeded unexpectedly")
# replace the config file with a directory to break writing/renaming
os.remove(config)
os.mkdir(config)
wpas.request("SET update_config 1")
wpas.global_request("SET update_config 1")
if "OK" in wpas.request("SAVE_CONFIG"):
raise Exception("SAVE_CONFIG succeeded unexpectedly")
if "OK" in wpas.global_request("SAVE_CONFIG"):
raise Exception("SAVE_CONFIG (global) succeeded unexpectedly")
finally:
try:
os.remove(config)
except:
pass
try:
os.remove(config + ".tmp")
except:
pass
try:
os.rmdir(config)
except:
pass
| 36.744966 | 76 | 0.610228 |
f5447101142df5ea7ca4a62d4473c688429fe653 | 2,808 | py | Python | packaging/setup/plugins/ovirt-engine-remove/ovirt-engine/system/engine.py | leongold/ovirt-engine | 8b915dab8ad8157849b36b60eb0ca159b1923faf | [
"Apache-2.0"
] | null | null | null | packaging/setup/plugins/ovirt-engine-remove/ovirt-engine/system/engine.py | leongold/ovirt-engine | 8b915dab8ad8157849b36b60eb0ca159b1923faf | [
"Apache-2.0"
] | null | null | null | packaging/setup/plugins/ovirt-engine-remove/ovirt-engine/system/engine.py | leongold/ovirt-engine | 8b915dab8ad8157849b36b60eb0ca159b1923faf | [
"Apache-2.0"
] | null | null | null | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Engine plugin."""
import gettext
import os
from otopi import plugin
from otopi import util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine import constants as oenginecons
def _(m):
return gettext.dgettext(message=m, domain='ovirt-engine-setup')
@util.export
class Plugin(plugin.PluginBase):
"""Engine plugin."""
def __init__(self, context):
super(Plugin, self).__init__(context=context)
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
)
def _setup(self):
if not os.path.exists(
osetupcons.FileLocations.OVIRT_SETUP_POST_INSTALL_CONFIG
):
if os.path.exists(
osetupcons.FileLocations.OVIRT_ENGINE_PKI_ENGINE_CA_CERT
):
self.dialog.note(
text=_(
'If you want to cleanup after setup of a previous '
'version, you should use the setup package of that '
'version.'
)
)
raise RuntimeError(
_('Could not detect product setup')
)
@plugin.event(
stage=plugin.Stages.STAGE_CUSTOMIZATION,
name=oenginecons.Stages.REMOVE_CUSTOMIZATION_ENGINE,
after=(
osetupcons.Stages.REMOVE_CUSTOMIZATION_COMMON,
),
condition=lambda self: self.environment[oenginecons.CoreEnv.ENABLE],
)
def _customization(self):
if self.environment[osetupcons.RemoveEnv.REMOVE_ALL]:
self.environment[oenginecons.RemoveEnv.REMOVE_ENGINE] = True
self.environment[
oenginecons.RemoveEnv.REMOVE_ENGINE_DATABASE
] = True
@plugin.event(
stage=plugin.Stages.STAGE_MISC,
condition=lambda self: (
self.environment[oenginecons.RemoveEnv.REMOVE_ENGINE] and
not self.environment[osetupcons.CoreEnv.DEVELOPER_MODE]
),
)
def _misc(self):
self.services.startup(
name=oenginecons.Const.ENGINE_SERVICE_NAME,
state=False,
)
# vim: expandtab tabstop=4 shiftwidth=4
| 29.87234 | 76 | 0.643162 |
ace3b8cd54ca53bc683002cbdaef4ad9b4b0fe1d | 533 | py | Python | day02/t02/models.py | SunShuoJia/pyproject | 71f3cada463fd90243b2cdac8c982fb622f9ef9c | [
"Apache-2.0"
] | null | null | null | day02/t02/models.py | SunShuoJia/pyproject | 71f3cada463fd90243b2cdac8c982fb622f9ef9c | [
"Apache-2.0"
] | null | null | null | day02/t02/models.py | SunShuoJia/pyproject | 71f3cada463fd90243b2cdac8c982fb622f9ef9c | [
"Apache-2.0"
] | null | null | null | from django.db import models
# Create your models here.
class People(models.Model):
name = models.CharField(
max_length=30,
verbose_name='名字'
)
age = models.IntegerField()
sexy = models.CharField(
max_length=10,
verbose_name='性别'
)
birthday = models.DateField(
verbose_name="出生日期"
)
is_married = models.BooleanField(
default=False,
verbose_name="是否已婚"
)
class Meta():
verbose_name="个人信息",
db_table = "person_imformation" | 23.173913 | 39 | 0.606004 |
587457f2ca2ffe3a07446e52384e9340e90bb60b | 4,044 | py | Python | homeassistant/components/hassio/discovery.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/hassio/discovery.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | homeassistant/components/hassio/discovery.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Implement the services discovery feature from Hass.io for Add-ons."""
from __future__ import annotations
import asyncio
from dataclasses import dataclass
import logging
from typing import Any
from aiohttp import web
from aiohttp.web_exceptions import HTTPServiceUnavailable
from homeassistant import config_entries
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import ATTR_NAME, ATTR_SERVICE, EVENT_HOMEASSISTANT_START
from homeassistant.core import HomeAssistant, callback
from homeassistant.data_entry_flow import BaseServiceInfo
from .const import ATTR_ADDON, ATTR_CONFIG, ATTR_DISCOVERY, ATTR_UUID
from .handler import HassioAPIError
_LOGGER = logging.getLogger(__name__)
@dataclass
class HassioServiceInfo(BaseServiceInfo):
"""Prepared info from hassio entries."""
config: dict[str, Any]
@callback
def async_setup_discovery_view(hass: HomeAssistant, hassio):
"""Discovery setup."""
hassio_discovery = HassIODiscovery(hass, hassio)
hass.http.register_view(hassio_discovery)
# Handle exists discovery messages
async def _async_discovery_start_handler(event):
"""Process all exists discovery on startup."""
try:
data = await hassio.retrieve_discovery_messages()
except HassioAPIError as err:
_LOGGER.error("Can't read discover info: %s", err)
return
jobs = [
hassio_discovery.async_process_new(discovery)
for discovery in data[ATTR_DISCOVERY]
]
if jobs:
await asyncio.wait(jobs)
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, _async_discovery_start_handler
)
class HassIODiscovery(HomeAssistantView):
"""Hass.io view to handle base part."""
name = "api:hassio_push:discovery"
url = "/api/hassio_push/discovery/{uuid}"
def __init__(self, hass: HomeAssistant, hassio):
"""Initialize WebView."""
self.hass = hass
self.hassio = hassio
async def post(self, request, uuid):
"""Handle new discovery requests."""
# Fetch discovery data and prevent injections
try:
data = await self.hassio.get_discovery_message(uuid)
except HassioAPIError as err:
_LOGGER.error("Can't read discovery data: %s", err)
raise HTTPServiceUnavailable() from None
await self.async_process_new(data)
return web.Response()
async def delete(self, request, uuid):
"""Handle remove discovery requests."""
data = await request.json()
await self.async_process_del(data)
return web.Response()
async def async_process_new(self, data):
"""Process add discovery entry."""
service = data[ATTR_SERVICE]
config_data = data[ATTR_CONFIG]
# Read additional Add-on info
try:
addon_info = await self.hassio.get_addon_info(data[ATTR_ADDON])
except HassioAPIError as err:
_LOGGER.error("Can't read add-on info: %s", err)
return
config_data[ATTR_ADDON] = addon_info[ATTR_NAME]
# Use config flow
await self.hass.config_entries.flow.async_init(
service,
context={"source": config_entries.SOURCE_HASSIO},
data=HassioServiceInfo(config=config_data),
)
async def async_process_del(self, data):
"""Process remove discovery entry."""
service = data[ATTR_SERVICE]
uuid = data[ATTR_UUID]
# Check if really deletet / prevent injections
try:
data = await self.hassio.get_discovery_message(uuid)
except HassioAPIError:
pass
else:
_LOGGER.warning("Retrieve wrong unload for %s", service)
return
# Use config flow
for entry in self.hass.config_entries.async_entries(service):
if entry.source != config_entries.SOURCE_HASSIO:
continue
await self.hass.config_entries.async_remove(entry)
| 31.84252 | 82 | 0.673096 |
6b92d6b621ce89de8e82919227a1d24570b2d47a | 288 | py | Python | is_number/__init__.py | dolun/toy-package | 6df5499a4d19e4471bd2b98a84548280c8fc3554 | [
"MIT"
] | null | null | null | is_number/__init__.py | dolun/toy-package | 6df5499a4d19e4471bd2b98a84548280c8fc3554 | [
"MIT"
] | null | null | null | is_number/__init__.py | dolun/toy-package | 6df5499a4d19e4471bd2b98a84548280c8fc3554 | [
"MIT"
] | null | null | null | """Utility functions to calculate if an object is a number and other things."""
from .core import is_it_number, is_float, add_numbers, get_array_shape
# from module1 import square_plus_one
# from ._version import get_versions
# __version__ = get_versions()["version"]
# del get_versions
| 36 | 79 | 0.788194 |
cd755ab6d0673e9f77efda5907ddb31408c0bac4 | 19,416 | py | Python | tensorflow2/tf2cv/models/sepreresnet.py | oliviaweng/imgclsmob | 80fffbb46f986614b162c725b21f3d208597ac77 | [
"MIT"
] | 2 | 2020-11-14T08:40:41.000Z | 2021-11-08T09:30:41.000Z | tensorflow2/tf2cv/models/sepreresnet.py | ibrahim85/Sandbox-for-training-convolutional-networks-for-computer-vision | a1f1f52eecbb841fa878bff4d3c311b79864835d | [
"MIT"
] | null | null | null | tensorflow2/tf2cv/models/sepreresnet.py | ibrahim85/Sandbox-for-training-convolutional-networks-for-computer-vision | a1f1f52eecbb841fa878bff4d3c311b79864835d | [
"MIT"
] | 2 | 2020-09-01T12:22:50.000Z | 2020-10-24T22:02:35.000Z | """
SE-PreResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEPreResNet', 'sepreresnet10', 'sepreresnet12', 'sepreresnet14', 'sepreresnet16', 'sepreresnet18',
'sepreresnet26', 'sepreresnetbc26b', 'sepreresnet34', 'sepreresnetbc38b', 'sepreresnet50', 'sepreresnet50b',
'sepreresnet101', 'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200',
'sepreresnet200b', 'SEPreResUnit']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import conv1x1, SEBlock, flatten
from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation
class SEPreResUnit(nn.Layer):
"""
SE-PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
data_format="channels_last",
**kwargs):
super(SEPreResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
data_format=data_format,
name="body")
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="body")
self.se = SEBlock(
channels=out_channels,
data_format=data_format,
name="se")
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name="identity_conv")
def call(self, x, training=None):
identity = x
x, x_pre_activ = self.body(x, training=training)
x = self.se(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class SEPreResNet(tf.keras.Model):
"""
SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SEPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = tf.keras.Sequential(name="features")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = tf.keras.Sequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
data_format=data_format,
name="final_block"))
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_sepreresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def sepreresnet10(**kwargs):
"""
SE-PreResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=10, model_name="sepreresnet10", **kwargs)
def sepreresnet12(**kwargs):
"""
SE-PreResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=12, model_name="sepreresnet12", **kwargs)
def sepreresnet14(**kwargs):
"""
SE-PreResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=14, model_name="sepreresnet14", **kwargs)
def sepreresnet16(**kwargs):
"""
SE-PreResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=16, model_name="sepreresnet16", **kwargs)
def sepreresnet18(**kwargs):
"""
SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs)
def sepreresnet26(**kwargs):
"""
SE-PreResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=26, bottleneck=False, model_name="sepreresnet26", **kwargs)
def sepreresnetbc26b(**kwargs):
"""
SE-PreResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc26b", **kwargs)
def sepreresnet34(**kwargs):
"""
SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs)
def sepreresnetbc38b(**kwargs):
"""
SE-PreResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc38b", **kwargs)
def sepreresnet50(**kwargs):
"""
SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs)
def sepreresnet50b(**kwargs):
"""
SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs)
def sepreresnet101(**kwargs):
"""
SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs)
def sepreresnet101b(**kwargs):
"""
SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs)
def sepreresnet152(**kwargs):
"""
SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs)
def sepreresnet152b(**kwargs):
"""
SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs)
def sepreresnet200(**kwargs):
"""
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an
experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
def sepreresnet200b(**kwargs):
"""
SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
pretrained = False
models = [
sepreresnet10,
sepreresnet12,
sepreresnet14,
sepreresnet16,
sepreresnet18,
sepreresnet26,
sepreresnetbc26b,
sepreresnet34,
sepreresnetbc38b,
sepreresnet50,
sepreresnet50b,
sepreresnet101,
sepreresnet101b,
sepreresnet152,
sepreresnet152b,
sepreresnet200,
sepreresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
batch_saze = 14
x = tf.random.normal((batch_saze, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch_saze, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet10 or weight_count == 5461668)
assert (model != sepreresnet12 or weight_count == 5536232)
assert (model != sepreresnet14 or weight_count == 5833840)
assert (model != sepreresnet16 or weight_count == 7022976)
assert (model != sepreresnet18 or weight_count == 11776928)
assert (model != sepreresnet26 or weight_count == 18092188)
assert (model != sepreresnetbc26b or weight_count == 17388424)
assert (model != sepreresnet34 or weight_count == 21957204)
assert (model != sepreresnetbc38b or weight_count == 24019064)
assert (model != sepreresnet50 or weight_count == 28080472)
assert (model != sepreresnet50b or weight_count == 28080472)
assert (model != sepreresnet101 or weight_count == 49319320)
assert (model != sepreresnet101b or weight_count == 49319320)
assert (model != sepreresnet152 or weight_count == 66814296)
assert (model != sepreresnet152b or weight_count == 66814296)
assert (model != sepreresnet200 or weight_count == 71828312)
assert (model != sepreresnet200b or weight_count == 71828312)
if __name__ == "__main__":
_test()
| 34.364602 | 119 | 0.6267 |
47b0fdf5186853ddc99731cae03b7adc2c211e06 | 8,720 | py | Python | teslakit/plotting/waves.py | teslakit/teslak | 3f3dda08c5c5998cb2a7debbf22f2be675a4ff8b | [
"MIT"
] | 12 | 2019-11-14T22:19:12.000Z | 2022-03-04T01:25:33.000Z | teslakit/plotting/waves.py | anderdyl/teslaCoSMoS | 1495bfa2364ddbacb802d145b456a35213abfb7c | [
"MIT"
] | 5 | 2020-03-24T18:21:41.000Z | 2021-08-23T20:39:43.000Z | teslakit/plotting/waves.py | anderdyl/teslaCoSMoS | 1495bfa2364ddbacb802d145b456a35213abfb7c | [
"MIT"
] | 2 | 2021-03-06T07:54:41.000Z | 2021-06-30T14:33:22.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
# pip
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
# teslakit
from ..util.operations import GetBestRowsCols
from .custom_colors import GetFamsColors
from .outputs import axplot_compare_histograms
# import constants
from .config import _faspect, _fsize, _fdpi
def axplot_distplot(ax, vars_values, vars_colors, n_bins, wt_num, xlims):
'axes plot seaborn distplot variable at families'
for vv, vc in zip(vars_values, vars_colors):
sns.distplot(vv, bins=n_bins, color=tuple(vc), ax=ax);
# wt text
ax.text(0.87, 0.85, wt_num, transform=ax.transAxes, fontweight='bold')
# customize axes
ax.set_xlim(xlims)
ax.set_xticks([])
ax.set_yticks([])
def axplot_polarhist(ax, vars_values, vars_colors, n_bins, wt_num):
'axes plot polar hist dir at families'
for vv, vc in zip(vars_values, vars_colors):
plt.hist(
np.deg2rad(vv),
range = [0, np.deg2rad(360)],
bins = n_bins, color = vc,
histtype='stepfilled', alpha = 0.5,
)
# wt text
ax.text(0.87, 0.85, wt_num, transform=ax.transAxes, fontweight='bold')
# customize axes
ax.set_facecolor('whitesmoke')
ax.set_xticks([])
ax.set_yticks([])
def Plot_Waves_DWTs(xds_wvs_fams_sel, bmus, n_clusters, show=True):
'''
Plot waves families by DWT
wvs_fams (waves families):
xarray.Dataset (time,), fam1_Hs, fam1_Tp, fam1_Dir, ...
{any number of families}
xds_DWTs - ESTELA predictor KMA
xarray.Dataset (time,), bmus, ...
'''
# plot_parameters
n_bins = 35
# get families names and colors
n_fams = [vn.replace('_Hs','') for vn in xds_wvs_fams_sel.keys() if '_Hs' in vn]
fams_colors = GetFamsColors(len(n_fams))
# get number of rows and cols for gridplot
n_rows, n_cols = GetBestRowsCols(n_clusters)
# Hs and Tp
l_figs = []
for wv in ['Hs', 'Tp']:
# get common xlims for histogram
allvals = np.concatenate(
[xds_wvs_fams_sel['{0}_{1}'.format(fn, wv)].values[:] for fn in n_fams]
)
av_min, av_max = np.nanmin(allvals), np.nanmax(allvals)
xlims = [math.floor(av_min), av_max]
# figure
fig = plt.figure(figsize=(_faspect*_fsize, _fsize))
gs = gridspec.GridSpec(n_rows, n_cols, wspace=0.0, hspace=0.0)
gr, gc = 0, 0
for ic in range(n_clusters):
# data mean at clusters
pc = np.where(bmus==ic)[0][:]
xds_wvs_c = xds_wvs_fams_sel.isel(time=pc)
vrs = [xds_wvs_c['{0}_{1}'.format(fn, wv)].values[:] for fn in n_fams]
# axes plot
ax = plt.subplot(gs[gr, gc])
axplot_distplot(
ax, vrs,
fams_colors, n_bins,
wt_num = ic+1,
xlims=xlims,
)
# fig legend
if gc == 0 and gr == 0:
plt.legend(
title = 'Families',
labels = n_fams,
bbox_to_anchor=(1, 1),
bbox_transform=fig.transFigure,
)
# counter
gc += 1
if gc >= n_cols:
gc = 0
gr += 1
fig.suptitle(
'{0} Distributions: {1}'.format(wv, ', '.join(n_fams)),
fontsize=14, fontweight = 'bold')
l_figs.append(fig)
# show
if show: plt.show()
# Dir
fig = plt.figure(figsize=(_faspect*_fsize, _fsize))
gs = gridspec.GridSpec(n_rows, n_cols, wspace=0.0, hspace=0.1)
gr, gc = 0, 0
for ic in range(n_clusters):
# data mean at clusters
pc = np.where(bmus==ic)[0][:]
xds_wvs_c = xds_wvs_fams_sel.isel(time=pc)
vrs = [xds_wvs_c['{0}_Dir'.format(fn)].values[:] for fn in n_fams]
# axes plot
ax = plt.subplot(
gs[gr, gc],
projection='polar',
theta_direction = -1, theta_offset = np.pi/2,
)
axplot_polarhist(
ax, vrs,
fams_colors, n_bins,
wt_num = ic+1,
)
# fig legend
if gc == n_cols-1 and gr==0:
plt.legend(
title = 'Families',
labels = n_fams,
bbox_to_anchor=(1, 1),
bbox_transform=fig.transFigure,
)
# counter
gc += 1
if gc >= n_cols:
gc = 0
gr += 1
fig.suptitle(
'{0} Distributions: {1}'.format('Dir', ', '.join(n_fams)),
fontsize=14, fontweight='bold')
l_figs.append(fig)
# show
if show: plt.show()
return l_figs
def Plot_Waves_Histogram_FitSim(wvs_fams_hist, wvs_fams_sim,
vns=['Hs', 'Tp','Dir'], show=True):
'''
Plot waves families histogram fitting - simulation comparison
wvs_fams_sim, wvs_fams_sim (waves families):
xarray.Dataset (time,), fam1_Hs, fam1_Tp, fam1_Dir, ...
vns - variables to plot
'''
# plot_parameters
n_bins = 40
# get families names and colors
n_fams = [vn.replace('_Hs','') for vn in wvs_fams_hist.keys() if
'_{0}'.format(vns[0]) in vn]
fams_colors = GetFamsColors(len(n_fams))
# fig params
n_rows = len(vns)
n_cols = len(n_fams)
# figure
fig = plt.figure(figsize=(_faspect*_fsize, _fsize))
gs = gridspec.GridSpec(n_rows, n_cols) #, wspace=0.0, hspace=0.0)
# iterate families
for nf, fc in zip(n_fams, fams_colors):
# iterate variables
for nv in vns:
# get variable fit and sim
vf = wvs_fams_hist['{0}_{1}'.format(nf, nv)].values[:]
vs = wvs_fams_sim['{0}_{1}'.format(nf, nv)].values[:]
# remove nans (chromosomes off)
vf = vf[~np.isnan(vf)]
vs = vs[~np.isnan(vs)]
# axes plot
gr = vns.index(nv)
gc = n_fams.index(nf)
ax = plt.subplot(gs[gr, gc])
axplot_compare_histograms(ax, vf, vs, color_2=fc, n_bins=n_bins)
# first row titles
if gr == 0:
ax.set_title(nf, fontweight='bold')
# first column variables
if gc == 0:
ax.set_ylabel(nv, fontweight='bold')
fig.suptitle(
'Historical - Simulation Waves Families Comparison',
fontsize=14, fontweight = 'bold'
)
# show and return figure
if show: plt.show()
return fig
def Plot_DWTs_FamiliesPopulation(xds_wvs_fams_sel, bmus, n_clusters, show=True):
'''
Plot waves families data count and availability for each WT
wvs_fams (waves families):
xarray.Dataset (time,), fam1_Hs, fam1_Tp, fam1_Dir, ...
{any number of families}
xds_DWTs - ESTELA predictor KMA
xarray.Dataset (time,), bmus, ...
'''
# get families names and colors
n_fams = [vn.replace('_Hs','') for vn in xds_wvs_fams_sel.keys() if '_Hs' in vn]
n_bins = len(n_fams)
# get number of rows and cols for gridplot
n_rows, n_cols = GetBestRowsCols(n_clusters)
# use Hs to count data
wv = 'Hs'
# figure
fig = plt.figure(figsize=(_faspect*_fsize, _fsize))
gs = gridspec.GridSpec(n_rows, n_cols, wspace=0.15, hspace=0.33)
gr, gc = 0, 0
for ic in range(n_clusters):
# count data at clusters for each family
pc = np.where(bmus==ic)[0][:]
yd = np.zeros(len(n_fams))*np.nan
nd = np.zeros(len(n_fams))*np.nan
for fc, fn in enumerate(n_fams):
dd = xds_wvs_fams_sel['{0}_{1}'.format(fn, wv)]
vv = dd.isel(time=pc).values[:]
yd[fc] = sum(~np.isnan(vv))
nd[fc] = sum(np.isnan(vv))
td = len(vv)
# axes plot
ax = plt.subplot(gs[gr, gc])
ax.bar(np.arange(n_bins), yd, 0.35)
ax.bar(np.arange(n_bins), nd, 0.35, bottom=yd)
ax.set_xticks([])
ax.set_yticks([])
ax.set_title('WT {0} - {1}'.format(ic+1, td))
# fig legend
if gc == 0 and gr == 0:
plt.legend(
labels = ['available', 'NaN'],
bbox_to_anchor=(1, 1),
bbox_transform=fig.transFigure,
)
# counter
gc += 1
if gc >= n_cols:
gc = 0
gr += 1
fig.suptitle(
'Number of Samples and NaNs by WT and Family: {0}'.format(', '.join(n_fams)),
fontsize=14, fontweight = 'bold')
# show
if show: plt.show()
return fig
| 27.335423 | 85 | 0.548968 |
55de61648c5caf3eeb8657299b35d8c30e48540f | 3,467 | py | Python | test/IntegrationTests.py | mako-npm/dash-core-components | 0cbc3d8093c678e59b5b4dfa3aa2637d071a5b33 | [
"MIT"
] | null | null | null | test/IntegrationTests.py | mako-npm/dash-core-components | 0cbc3d8093c678e59b5b4dfa3aa2637d071a5b33 | [
"MIT"
] | null | null | null | test/IntegrationTests.py | mako-npm/dash-core-components | 0cbc3d8093c678e59b5b4dfa3aa2637d071a5b33 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import multiprocessing
import os
import platform
import threading
import time
import unittest
import percy
import flask
import requests
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class IntegrationTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(IntegrationTests, cls).setUpClass()
options = Options()
capabilities = DesiredCapabilities.CHROME
capabilities['loggingPrefs'] = {'browser': 'SEVERE'}
if 'DASH_TEST_CHROMEPATH' in os.environ:
options.binary_location = os.environ['DASH_TEST_CHROMEPATH']
cls.driver = webdriver.Chrome(options=options, desired_capabilities=capabilities)
loader = percy.ResourceLoader(
webdriver=cls.driver,
base_url='/assets',
root_dir='test/assets'
)
cls.percy_runner = percy.Runner(loader=loader)
cls.percy_runner.initialize_build()
@classmethod
def tearDownClass(cls):
super(IntegrationTests, cls).tearDownClass()
cls.driver.quit()
cls.percy_runner.finalize_build()
def setUp(self):
pass
def tearDown(self):
if platform.system() == 'Windows':
requests.get('http://localhost:8050/stop')
else:
self.server_process.terminate()
self.clear_log()
time.sleep(1)
def startServer(self, app):
"""
:param app:
:type app: dash.Dash
:return:
"""
if 'DASH_TEST_PROCESSES' in os.environ:
processes = int(os.environ['DASH_TEST_PROCESSES'])
else:
processes = 4
def run():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
app.run_server(
port=8050,
debug=False,
processes=processes,
threaded=False,
)
def run_windows():
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
@app.server.route('/stop')
def _stop_server_windows():
stopper = flask.request.environ['werkzeug.server.shutdown']
stopper()
return 'stop'
app.run_server(
port=8050,
debug=False,
threaded=True
)
# Run on a separate process so that it doesn't block
system = platform.system()
if system == 'Windows':
# multiprocess can't pickle an inner func on windows (closure are not serializable by default on windows)
self.server_thread = threading.Thread(target=run_windows)
self.server_thread.start()
else:
self.server_process = multiprocessing.Process(target=run)
self.server_process.start()
time.sleep(2)
# Visit the dash page
self.driver.get('http://localhost:8050')
def clear_log(self):
entries = self.driver.get_log("browser")
if entries:
self.last_timestamp = entries[-1]["timestamp"]
def get_log(self):
entries = self.driver.get_log("browser")
return [entry for entry in entries if entry["timestamp"] > self.last_timestamp]
last_timestamp = 0
| 28.418033 | 117 | 0.605423 |
152ee315206f92661f32107c612e81ce37732321 | 1,533 | py | Python | dbserver/api/migrations/0014_auto_20191130_2133.py | nancheng58/database_course_design | 890f3880922d8c08a2b3092b2e948148669fbbb1 | [
"MIT"
] | 2 | 2020-12-01T01:09:43.000Z | 2020-12-01T01:32:43.000Z | dbserver/api/migrations/0014_auto_20191130_2133.py | nancheng58/database_course_design | 890f3880922d8c08a2b3092b2e948148669fbbb1 | [
"MIT"
] | null | null | null | dbserver/api/migrations/0014_auto_20191130_2133.py | nancheng58/database_course_design | 890f3880922d8c08a2b3092b2e948148669fbbb1 | [
"MIT"
] | 4 | 2020-09-17T12:54:48.000Z | 2020-10-19T12:08:44.000Z | # Generated by Django 2.2.6 on 2019-11-30 13:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0013_auto_20191128_1714'),
]
operations = [
migrations.AddField(
model_name='course',
name='teacher',
field=models.ForeignKey(default=233666, on_delete=django.db.models.deletion.CASCADE, to='api.Teacher'),
preserve_default=False,
),
migrations.AlterField(
model_name='admin',
name='email',
field=models.CharField(blank=True, max_length=40),
),
migrations.AlterField(
model_name='admin',
name='usertype',
field=models.IntegerField(default=2),
),
migrations.AlterField(
model_name='teacher',
name='address',
field=models.CharField(blank=True, max_length=200),
),
migrations.CreateModel(
name='Elective',
fields=[
('elective_id', models.AutoField(primary_key=True, serialize=False)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Course')),
('student', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Student')),
],
options={
'verbose_name': '选课',
'verbose_name_plural': '选课',
},
),
]
| 31.9375 | 115 | 0.560992 |
ec796a6ca72d2c151fe6085bf4730d55546af576 | 1,001 | py | Python | tensorflow/contrib/losses/__init__.py | smrutiranjans/tensorflow | d8e8b872eae63188c75046d5bb068e03a81b3f85 | [
"Apache-2.0"
] | 42 | 2016-08-22T03:49:32.000Z | 2020-06-29T14:00:27.000Z | tensorflow/contrib/losses/__init__.py | smrutiranjans/tensorflow | d8e8b872eae63188c75046d5bb068e03a81b3f85 | [
"Apache-2.0"
] | 6 | 2016-09-02T03:44:35.000Z | 2018-11-27T13:20:36.000Z | tensorflow/contrib/losses/__init__.py | smrutiranjans/tensorflow | d8e8b872eae63188c75046d5bb068e03a81b3f85 | [
"Apache-2.0"
] | 12 | 2017-03-18T22:22:28.000Z | 2020-06-19T20:54:53.000Z | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for building neural network losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.losses.python.losses import *
from tensorflow.python.util.all_util import make_all
| 38.5 | 80 | 0.723277 |
a8c7a860d663afee6775b298c41dcf921f9180d2 | 6,302 | py | Python | Server.py | danielpiro/Hackathon | 6d5b040fd956a13326b5d451f73e75e3f9080da8 | [
"MIT"
] | null | null | null | Server.py | danielpiro/Hackathon | 6d5b040fd956a13326b5d451f73e75e3f9080da8 | [
"MIT"
] | null | null | null | Server.py | danielpiro/Hackathon | 6d5b040fd956a13326b5d451f73e75e3f9080da8 | [
"MIT"
] | null | null | null | import socket
import time
import struct
from random import randint
from threading import Thread
import os
from scapy.arch import get_if_addr
os.system("")
# Colors for prints
class Colors:
GREEN = '\033[32m'
BLUE = '\033[34m'
PINK = '\033[35m'
UDP_DEST_IP = '<broadcast>'
UDP_DEST_PORT = 13117
TCP_DEST_PORT = 2006
MESSAGE_LENGTH = 1024
TIME_UNTIL_GAME = 10 # seconds
TIME_TO_PLAY = 10 # seconds
sockUDP = None
sockTCP = None
CONN_A = None
CONN_B = None
counter = 0
def start_udp():
global sockUDP
ip = get_if_addr("eth1")
sockUDP = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # UDP
sockUDP.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sockUDP.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
return ip
def send_broadcast():
global sockUDP
ip = start_udp()
print(Colors.GREEN + "Server started, listening on IP address " + ip)
while counter < 2:
buffer = struct.pack('IBH', 0xabcddcba, 0x2, TCP_DEST_PORT)
sockUDP.sendto(buffer, (UDP_DEST_IP, UDP_DEST_PORT))
time.sleep(1) # dont overload
def connect_clients():
global counter, sockTCP, CONN_B, CONN_A # can use address later
while True:
if counter < 2:
try:
conn, address = sockTCP.accept()
if counter == 0:
CONN_A = conn
CONN_A.setblocking(0)
else:
CONN_B = conn
CONN_B.setblocking(0)
counter += 1
except Exception as e:
pass
else:
print(Colors.BLUE + "game starts in 10 secs")
break
def get_group_names():
try:
name_a = CONN_A.recv(MESSAGE_LENGTH).decode()
name_b = CONN_B.recv(MESSAGE_LENGTH).decode()
return name_a, name_b, True
except Exception as e:
print(Colors.GREEN + "group name was not entered so couldn't start the game")
send_message("group name was not entered so couldn't start the game")
return "", "", False
def send_message(message):
try:
CONN_A.sendall(message.encode())
CONN_B.sendall(message.encode())
except Exception as e:
pass
def receive_char(answer):
global CONN_A, CONN_B
a_won = False
got_answer = False
timeout = time.time() + TIME_TO_PLAY
while time.time() < timeout and not got_answer:
try:
data = CONN_A.recv(1024)
if int(data) == answer:
a_won = True
else:
a_won = False
got_answer = True
return a_won, got_answer
except Exception as e:
try:
data = CONN_B.recv(1024)
if int(data) == answer:
a_won = False
else:
a_won = True
got_answer = True
return a_won, got_answer
except Exception as e:
time.sleep(0.1)
return a_won, got_answer
def send_end_message(name_a, name_b, answer, a_won, got_answer):
if a_won:
winner_group = name_a
else:
winner_group = name_b
if got_answer:
end_message = "Game over!\nThe correct answer was " + str(
answer) + "!\nCongratulations to the winner:" + winner_group
else:
end_message = "Game over!\nNo one answered - Draw"
print(Colors.PINK + end_message)
send_message(end_message)
def send_math_question():
try:
math = ["2+3", "4-2", "9-3", "2*4", "1*5", "6/3", "8/4", "ln(e^3)"]
answerTable = [5, 2, 6, 8, 5, 2, 2, 3]
value = randint(0, 7)
send_message("How much is: " + math[value] + "?\n")
return answerTable[value]
except Exception as e:
pass
def start_game():
try:
time.sleep(TIME_UNTIL_GAME)
# part 1 - only thing that can stop a game, get group names
name_a, name_b, isValid = get_group_names()
if isValid:
# part 2 - send the openning message and random math question
begin_message = "Welcome to Quick Maths.\nPlayer 1: " + name_a + "\nPlayer 2: " + name_b + "\n====\n Please " \
"answer the " \
"following " \
"question as fast " \
"as you can:\n "
send_message(begin_message)
answer = send_math_question()
# part 3 - receive answer
a_won, got_answer = receive_char(answer)
# part 4 - declare the winner
send_end_message(name_a, name_b, answer, a_won, got_answer)
except Exception as e:
pass
def start_tcp():
global sockTCP
sockTCP = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # init the TCP socket
sockTCP.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sockTCP.bind(('', TCP_DEST_PORT))
sockTCP.listen(2)
def reset_params():
global counter
counter = 0
def main():
global counter
start_tcp()
while True:
try:
time.sleep(1) # not a must, but make sure clients can disconnect
# part 1 - broadcast
broadcaster = Thread(target=send_broadcast, args=())
# part 2 - wait for 2 clients to connect, wait indefinitely
client_connector = Thread(target=connect_clients, args=())
broadcaster.start()
client_connector.start()
# part 3 - make sure they finish before game starts
broadcaster.join()
client_connector.join()
# part 4 - play the game
start_game()
# part 5 - game ended, start anew
reset_params()
CONN_A.close()
CONN_B.close()
except Exception as e:
pass
print("Game over, sending out offer requests...")
if __name__ == "__main__":
main()
| 28.387387 | 124 | 0.544272 |
1ec3c6228675b2382893b0415928b0d3b865d30c | 1,143 | py | Python | vagabond/ostypes.py | gnulnx/vagabond | d3acbcf01a52271eebcea98f22b1c831ba838d2e | [
"MIT"
] | null | null | null | vagabond/ostypes.py | gnulnx/vagabond | d3acbcf01a52271eebcea98f22b1c831ba838d2e | [
"MIT"
] | null | null | null | vagabond/ostypes.py | gnulnx/vagabond | d3acbcf01a52271eebcea98f22b1c831ba838d2e | [
"MIT"
] | null | null | null | """
This is a list of ostypes from:
VBoxManage list ostypes
"""
OSTYPES = ['Other', 'Other_64', 'Windows31', 'Windows95', 'Windows98', 'WindowsMe', 'WindowsNT4', 'Windows2000', 'WindowsXP', 'WindowsXP_64', 'Windows2003', 'Windows2003_64', 'WindowsVista', 'WindowsVista_64', 'Windows2008', 'Windows2008_64', 'Windows7', 'Windows7_64', 'Windows8', 'Windows8_64', 'Windows81', 'Windows81_64', 'Windows2012_64', 'WindowsNT', 'WindowsNT_64', 'Linux22', 'Linux24', 'Linux24_64', 'Linux26', 'Linux26_64', 'ArchLinux', 'ArchLinux_64', 'Debian', 'Debian_64', 'OpenSUSE', 'OpenSUSE_64', 'Fedora', 'Fedora_64', 'Gentoo', 'Gentoo_64', 'Mandriva', 'Mandriva_64', 'RedHat', 'RedHat_64', 'Turbolinux', 'Turbolinux_64', 'Ubuntu', 'Ubuntu_64', 'Xandros', 'Xandros_64', 'Oracle', 'Oracle_64', 'Linux', 'Linux_64', 'Solaris', 'Solaris_64', 'OpenSolaris', 'OpenSolaris_64', 'Solaris11_64', 'FreeBSD', 'FreeBSD_64', 'OpenBSD', 'OpenBSD_64', 'NetBSD', 'NetBSD_64', 'OS2Warp3', 'OS2Warp4', 'OS2Warp45', 'OS2eCS', 'OS2', 'MacOS', 'MacOS_64', 'MacOS106', 'MacOS106_64', 'MacOS107_64', 'MacOS108_64', 'MacOS109_64', 'DOS', 'Netware', 'L4', 'QNX', 'JRockitVE']
| 163.285714 | 1,069 | 0.685039 |
48810aeaaf6f06b8b45fc0036e14948467b682d8 | 5,891 | py | Python | src/airflow_docker/ext/aws/role_assumption.py | Jwan622/airflow-docker | 55310bc730f94bc1a293ba6e27ecf5bb663052ba | [
"Apache-2.0"
] | 17 | 2019-11-16T13:25:59.000Z | 2022-03-31T02:50:59.000Z | src/airflow_docker/ext/aws/role_assumption.py | Jwan622/airflow-docker | 55310bc730f94bc1a293ba6e27ecf5bb663052ba | [
"Apache-2.0"
] | 14 | 2019-09-13T20:02:15.000Z | 2022-03-16T19:23:13.000Z | src/airflow_docker/ext/aws/role_assumption.py | Jwan622/airflow-docker | 55310bc730f94bc1a293ba6e27ecf5bb663052ba | [
"Apache-2.0"
] | 2 | 2020-02-16T10:46:51.000Z | 2022-03-14T18:52:04.000Z | import os
import textwrap
import boto3
MAX_ROLE_DURATION_SECONDS = 60 * 60 * 12 # 12 hours in seconds
MAX_ROLE_SESSION_NAME_LENGTH = 64
ROLE_SESSION_NAME_TEMPLATE = "{dag_run_id}__{task_instance_try}__{task_id}"
credential_key_map = {
"aws_access_key_id": "AccessKeyId",
"aws_secret_access_key": "SecretAccessKey",
"aws_session_token": "SessionToken",
}
def generate_role_credentials(
role_arn, session_name, role_session_duration=MAX_ROLE_DURATION_SECONDS
):
client = boto3.client("sts")
response = client.assume_role(
RoleArn=role_arn,
RoleSessionName=session_name,
DurationSeconds=role_session_duration,
)
return response
def format_credentials_data(raw_credentials):
credentials = {
key: raw_credentials["Credentials"][mapped_key]
for key, mapped_key in credential_key_map.items()
}
return credentials
def aws_credentials_file_format(
aws_access_key_id, aws_secret_access_key, aws_session_token, profile="default"
):
credentials = textwrap.dedent(
f"""\
[{profile}]
aws_access_key_id={aws_access_key_id}
aws_secret_access_key={aws_secret_access_key}
aws_session_token={aws_session_token}
"""
)
return credentials
def generate_role_session_name(context):
return ROLE_SESSION_NAME_TEMPLATE.format(
dag_run_id=context["dag_run"].id,
task_instance_try=context["task_instance"].try_number,
task_id=context["task_instance"].task_id,
)[:MAX_ROLE_SESSION_NAME_LENGTH]
def get_credentials(context, role_arn, role_session_duration=MAX_ROLE_DURATION_SECONDS):
raw_credentials = generate_role_credentials(
role_arn=role_arn,
session_name=generate_role_session_name(context),
role_session_duration=role_session_duration,
)
return raw_credentials
def write_credentials(credentials, credentials_path):
credentials_file_contents = aws_credentials_file_format(**credentials)
os.makedirs(os.path.dirname(credentials_path), exist_ok=True)
with open(credentials_path, "wb") as f:
f.write(credentials_file_contents.encode("utf-8"))
def find_role_session_duration(role_arn):
iam = boto3.client("iam")
response = iam.get_role(RoleName=parse_role_name_from_arn(role_arn))
return response["Role"]["MaxSessionDuration"]
def log_credentials(operator, raw_credentials):
access_key_id = raw_credentials["Credentials"]["AccessKeyId"]
expiration = raw_credentials["Credentials"]["Expiration"]
user_arn = raw_credentials["AssumedRoleUser"]["AssumedRoleId"]
operator.log.info("Assumed Role:")
operator.log.info(" Access Key ID = {}".format(access_key_id))
operator.log.info(" Expiration = {}".format(expiration.isoformat()))
operator.log.info(" Arn = {}".format(user_arn))
def parse_role_name_from_arn(role_arn):
role_name_start = role_arn.index("role") + 5
return role_arn[role_name_start:]
class AWSRoleAssumptionExtension:
"""Assume a role for your task.
Session will have a session name of the following template:
{dag_run_id}__{task_instance_try}__{task_id}
Operator Keyword Arguments:
role_arn: The role arn you want to assume
role_session_duration: The number of seconds to assume the role for.
Min: 900, Max: 43200 secs
Defaults to Max of 43200 seconds (12 hours).
role_set_env_vars: Use environment variables to configure AWS credentials (Default: False)
"""
kwargs = {"role_arn", "role_session_duration", "role_set_env_vars"}
@classmethod
def post_prepare_environment(
cls, operator, config, context, host_tmp_dir, session=None
):
if "role_arn" in operator.extra_kwargs:
role_arn = operator.extra_kwargs["role_arn"]
if "role_session_duration" in operator.extra_kwargs:
role_session_duration = int(
operator.extra_kwargs["role_session_duration"]
)
else:
try:
role_session_duration = find_role_session_duration(role_arn)
except Exception:
operator.log.exception(
"Something when wrong with finding the max role session duration."
)
role_session_duration = MAX_ROLE_DURATION_SECONDS
operator.log.info("Assuming role: {}".format(role_arn))
raw_credentials = get_credentials(
context=context,
role_arn=role_arn,
role_session_duration=role_session_duration,
)
credentials = format_credentials_data(raw_credentials)
if operator.extra_kwargs.get("role_set_env_vars"):
operator.environment["AWS_ACCESS_KEY_ID"] = credentials[
"aws_access_key_id"
]
operator.environment["AWS_SECRET_ACCESS_KEY"] = credentials[
"aws_secret_access_key"
]
operator.environment["AWS_SESSION_TOKEN"] = credentials[
"aws_session_token"
]
else:
host_credentials_path = os.path.join(
host_tmp_dir, ".aws", "credentials"
)
container_credentials_path = os.path.join(
operator.tmp_dir, ".aws", "credentials"
)
write_credentials(
credentials=credentials, credentials_path=host_credentials_path
)
operator.environment[
"AWS_SHARED_CREDENTIALS_FILE"
] = container_credentials_path
log_credentials(operator, raw_credentials)
else:
operator.log.info("Not assuming role")
| 34.052023 | 98 | 0.656765 |
cecca6376416a4b63b9136498cfd11d2b64554c8 | 2,345 | py | Python | bgx/cli/bgx_cli/cli_config.py | DGT-Network/DGT-SDK | 3413ae22e79c13e71264271fa3f82203fd49f0b3 | [
"Apache-2.0"
] | null | null | null | bgx/cli/bgx_cli/cli_config.py | DGT-Network/DGT-SDK | 3413ae22e79c13e71264271fa3f82203fd49f0b3 | [
"Apache-2.0"
] | null | null | null | bgx/cli/bgx_cli/cli_config.py | DGT-Network/DGT-SDK | 3413ae22e79c13e71264271fa3f82203fd49f0b3 | [
"Apache-2.0"
] | 1 | 2021-01-12T21:38:01.000Z | 2021-01-12T21:38:01.000Z | # Copyright 2020 NTRLab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import os
import toml
LOGGER = logging.getLogger(__name__)
def _load_default_cli_config():
return {
'url': 'http://localhost:8008'
}
def load_cli_config(args):
"""Modifies ARGS in-place to have the attributes defined in the CLI
config file if it doesn't already have them. Certain default
values are given if they are not in ARGS or the config file.
"""
default_cli_config = _load_default_cli_config()
toml_config = _load_toml_cli_config()
for config in (toml_config, default_cli_config):
for key, val in config.items():
if key in args and getattr(args, key) is not None:
pass
else:
setattr(args, key, val)
class CliConfigurationError(Exception):
pass
def _load_toml_cli_config(filename=None):
if filename is None:
filename = os.path.join(
_get_config_dir(),
'cli.toml')
if not os.path.exists(filename):
LOGGER.info(
"Skipping CLI config loading from non-existent config file: %s",
filename)
return {}
LOGGER.info("Loading CLI information from config: %s", filename)
try:
with open(filename) as fd:
raw_config = fd.read()
except IOError as e:
raise CliConfigurationError(
"Unable to load CLI configuration file: {}".format(str(e)))
return toml.loads(raw_config)
def _get_config_dir():
if 'PEER_HOME' in os.environ:
return os.path.join(os.environ['PEER_HOME'], 'etc')
elif 'SAWTOOTH_HOME' in os.environ:
return os.path.join(os.environ['SAWTOOTH_HOME'], 'etc')
return '/etc/sawtooth'
| 28.253012 | 80 | 0.643497 |
bb10c0dedaf19a8da07fe4cb90805cd89947d271 | 1,608 | py | Python | tests/lint/files_exist.py | aunderwo/tools | dbfc6d8a7afcd7577a021b80e84ed9567ed0a3da | [
"MIT"
] | null | null | null | tests/lint/files_exist.py | aunderwo/tools | dbfc6d8a7afcd7577a021b80e84ed9567ed0a3da | [
"MIT"
] | null | null | null | tests/lint/files_exist.py | aunderwo/tools | dbfc6d8a7afcd7577a021b80e84ed9567ed0a3da | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import yaml
import nf_core.lint
def test_files_exist_missing_config(self):
"""Lint test: critical files missing FAIL"""
new_pipeline = self._make_pipeline_copy()
os.remove(os.path.join(new_pipeline, "CHANGELOG.md"))
lint_obj = nf_core.lint.PipelineLint(new_pipeline)
lint_obj._load()
lint_obj.nf_config["manifest.name"] = "testpipeline"
results = lint_obj.files_exist()
assert results["failed"] == ["File not found: `CHANGELOG.md`"]
def test_files_exist_missing_main(self):
"""Check if missing main issues warning"""
new_pipeline = self._make_pipeline_copy()
os.remove(os.path.join(new_pipeline, "main.nf"))
lint_obj = nf_core.lint.PipelineLint(new_pipeline)
lint_obj._load()
results = lint_obj.files_exist()
assert results["warned"] == ["File not found: `main.nf`"]
def test_files_exist_depreciated_file(self):
"""Check whether depreciated file issues warning"""
new_pipeline = self._make_pipeline_copy()
nf = os.path.join(new_pipeline, "parameters.settings.json")
os.system("touch {}".format(nf))
lint_obj = nf_core.lint.PipelineLint(new_pipeline)
lint_obj._load()
results = lint_obj.files_exist()
assert results["failed"] == ["File must be removed: `parameters.settings.json`"]
def test_files_exist_pass(self):
"""Lint check should pass if all files are there"""
new_pipeline = self._make_pipeline_copy()
lint_obj = nf_core.lint.PipelineLint(new_pipeline)
lint_obj._load()
results = lint_obj.files_exist()
assert results["failed"] == []
| 27.724138 | 84 | 0.709577 |
d042b6de423e0bb18eba089f16243ba6bdf16240 | 2,699 | py | Python | test/Subst/TypeError.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 3 | 2017-01-06T09:26:23.000Z | 2017-03-04T04:13:20.000Z | test/Subst/TypeError.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 2 | 2015-10-27T20:17:24.000Z | 2016-08-04T21:49:56.000Z | test/Subst/TypeError.py | datalogics-staylor/scons | 4c48deb6947066e53aac7d86621a7ec17f3b4034 | [
"MIT"
] | 4 | 2015-03-31T16:09:15.000Z | 2021-08-04T12:41:47.000Z | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify the exit status and error output if variable expansion
throws a TypeError.
"""
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re_dotall)
expect_build = r"""scons: \*\*\*%s TypeError `(unsubscriptable object|'NoneType' object is unsubscriptable)' trying to evaluate `%s'
"""
expect_read = "\n" + expect_build + TestSCons.file_expr
# Type errors at SConscript read time:
test.write('SConstruct', """\
env = Environment(NONE = None)
env.subst('${NONE[0]}')
""")
test.run(status=2, stderr=expect_read % ('', r'\$\{NONE\[0\]\}'))
# Type errors at build time:
test.write('SConstruct', """\
env = Environment(NONE = None)
env.Command('foo.bar', [], '${NONE[0]}')
""")
expect = expect_build % (r' \[foo\.bar\]', r'\$\{NONE\[0\]\}')
test.run(status=2, stderr=expect)
expect_build = r"""scons: \*\*\*%s TypeError `(not enough arguments; expected 3, got 1|func\(\) takes exactly 3 arguments \(1 given\))' trying to evaluate `%s'
"""
expect_read = "\n" + expect_build + TestSCons.file_expr
# Type errors at SConscript read time:
test.write('SConstruct', """\
def func(a, b, c):
pass
env = Environment(func = func)
env.subst('${func(1)}')
""")
test.run(status=2, stderr=expect_read % ('', r'\$\{func\(1\)\}'))
# Type errors at build time:
test.write('SConstruct', """\
def func(a, b, c):
pass
env = Environment(func = func)
env.Command('foo.bar', [], '${func(1)}')
""")
expect = expect_build % (r' \[foo\.bar\]', r'\$\{func\(1\)\}')
test.run(status=2, stderr=expect)
test.pass_test()
| 29.021505 | 159 | 0.701371 |
65193f9a5d38b2d54dd4ac7ff48e162c0544f301 | 13,440 | py | Python | pipe-cli/src/utilities/storage/common.py | cmbkoko1989/cloud-pipeline | 9af1218151ef02f87915726eb92c0cc626f7ab11 | [
"Apache-2.0"
] | null | null | null | pipe-cli/src/utilities/storage/common.py | cmbkoko1989/cloud-pipeline | 9af1218151ef02f87915726eb92c0cc626f7ab11 | [
"Apache-2.0"
] | null | null | null | pipe-cli/src/utilities/storage/common.py | cmbkoko1989/cloud-pipeline | 9af1218151ef02f87915726eb92c0cc626f7ab11 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from abc import abstractmethod, ABCMeta
import click
import jwt
from src.config import Config
from src.model.data_storage_wrapper_type import WrapperType
class StorageOperations:
PATH_SEPARATOR = '/'
DEFAULT_PAGE_SIZE = 100
MAX_TAGS_NUMBER = 10
MAX_KEY_LENGTH = 128
MAX_VALUE_LENGTH = 256
TAG_SHORTEN_SUFFIX = '...'
TAGS_VALIDATION_PATTERN = re.compile('[^a-zA-Z0-9\s_.\-@:+/\\\]+')
CP_SOURCE_TAG = 'CP_SOURCE'
CP_OWNER_TAG = 'CP_OWNER'
STORAGE_PATH = '%s://%s/%s'
__config__ = None
@classmethod
def get_proxy_config(cls, target_url=None):
if cls.__config__ is None:
cls.__config__ = Config.instance()
if cls.__config__.proxy is None:
return None
else:
return cls.__config__.resolve_proxy(target_url=target_url)
@classmethod
def init_wrapper(cls, wrapper, versioning=False):
delimiter = StorageOperations.PATH_SEPARATOR
prefix = StorageOperations.get_prefix(wrapper.path)
check_file = True
if prefix.endswith(delimiter):
prefix = prefix[:-1]
check_file = False
listing_manager = wrapper.get_list_manager(show_versions=versioning)
for item in listing_manager.list_items(prefix, show_all=True):
if prefix.endswith(item.name.rstrip(delimiter)) and (check_file or item.type == 'Folder'):
wrapper.exists_flag = True
wrapper.is_file_flag = item.type == 'File'
break
return wrapper
@classmethod
def get_prefix(cls, path, delimiter=PATH_SEPARATOR):
if path:
prefix = path
if prefix.startswith(delimiter):
prefix = prefix[1:]
else:
prefix = delimiter
return prefix
@classmethod
def get_item_name(cls, path, prefix, delimiter=PATH_SEPARATOR):
possible_folder_name = prefix if prefix.endswith(delimiter) else \
prefix + StorageOperations.PATH_SEPARATOR
if prefix and path.startswith(prefix) and path != possible_folder_name and path != prefix:
if not path == prefix:
splitted = prefix.split(StorageOperations.PATH_SEPARATOR)
return splitted[len(splitted) - 1] + path[len(prefix):]
else:
return path[len(prefix):]
elif not path.endswith(StorageOperations.PATH_SEPARATOR) and path == prefix:
return os.path.basename(path)
elif path == possible_folder_name:
return os.path.basename(path.rstrip(StorageOperations.PATH_SEPARATOR)) + StorageOperations.PATH_SEPARATOR
else:
return path
@classmethod
def normalize_path(cls, destination_wrapper, relative_path, delimiter=PATH_SEPARATOR):
if destination_wrapper.path.endswith(delimiter) or not destination_wrapper.is_file():
if os.path.sep != delimiter:
relative_path = relative_path.replace(os.path.sep, delimiter)
skip_separator = destination_wrapper.path.endswith(delimiter)
if destination_wrapper.path:
if skip_separator:
destination_key = destination_wrapper.path + relative_path
else:
destination_key = destination_wrapper.path + delimiter + relative_path
else:
destination_key = relative_path
else:
destination_key = destination_wrapper.path
result = cls.remove_double_slashes(destination_key)
if result.startswith(delimiter):
return result[1:]
else:
return result
@classmethod
def remove_double_slashes(cls, path, delimiter=PATH_SEPARATOR):
return re.sub(delimiter + '+', delimiter, path)
@classmethod
def show_progress(cls, quiet, size):
return not quiet and size is not None and size != 0
@classmethod
def get_local_file_size(cls, path):
try:
return os.path.getsize(path)
except OSError:
return None
@classmethod
def without_prefix(cls, string, prefix):
if string.startswith(prefix):
return string[len(prefix):]
@classmethod
def without_suffix(cls, string, suffix):
if string.endswith(suffix):
return string[:-len(suffix)]
@classmethod
def is_relative_path(cls, full_path, prefix, delimiter=PATH_SEPARATOR):
relative_path = StorageOperations.without_prefix(full_path, prefix)
return not relative_path or relative_path.startswith(delimiter)
@classmethod
def parse_tags(cls, tags):
if not tags:
return {}
if len(tags) > cls.MAX_TAGS_NUMBER:
raise ValueError(
"Maximum allowed number of tags is {}. Provided {} tags.".format(cls.MAX_TAGS_NUMBER, len(tags)))
tags_dict = {}
for tag in tags:
if "=" not in tag:
raise ValueError("Tags must be specified as KEY=VALUE pair.")
parts = tag.split("=", 1)
key = parts[0]
if len(key) > cls.MAX_KEY_LENGTH:
click.echo("Maximum key value is {}. Provided key {}.".format(cls.MAX_KEY_LENGTH, key))
continue
value = parts[1]
value = value.replace('\\', '/')
if not value or value.isspace() or bool(StorageOperations.TAGS_VALIDATION_PATTERN.search(value)):
click.echo("The tag value you have provided is invalid: %s. The tag %s will be skipped." % (value, key))
continue
if len(value) > cls.MAX_VALUE_LENGTH:
value = value[:cls.MAX_VALUE_LENGTH - len(cls.TAG_SHORTEN_SUFFIX)] + cls.TAG_SHORTEN_SUFFIX
tags_dict[key] = value
return tags_dict
@classmethod
def get_user(cls):
config = Config.instance()
user_info = jwt.decode(config.access_key, verify=False)
if 'sub' in user_info:
return user_info['sub']
raise RuntimeError('Cannot find user info.')
@classmethod
def generate_tags(cls, raw_tags, source):
tags = StorageOperations.parse_tags(raw_tags)
tags[StorageOperations.CP_SOURCE_TAG] = source
tags[StorageOperations.CP_OWNER_TAG] = StorageOperations.get_user()
return tags
@classmethod
def source_tags(cls, tags, source_path, storage_wrapper):
bucket = storage_wrapper.bucket
default_tags = {}
if StorageOperations.CP_SOURCE_TAG not in tags:
scheme = WrapperType.cloud_scheme(bucket.type)
default_tags[StorageOperations.CP_SOURCE_TAG] = StorageOperations.STORAGE_PATH \
% (scheme, bucket.name, source_path)
if StorageOperations.CP_OWNER_TAG not in tags:
default_tags[StorageOperations.CP_OWNER_TAG] = StorageOperations.get_user()
return default_tags
@classmethod
def get_items(cls, listing_manager, relative_path, delimiter=PATH_SEPARATOR):
prefix = StorageOperations.get_prefix(relative_path).rstrip(delimiter)
for item in listing_manager.list_items(prefix, recursive=True, show_all=True):
if not StorageOperations.is_relative_path(item.name, prefix):
continue
if item.name == relative_path:
item_relative_path = os.path.basename(item.name)
else:
item_relative_path = StorageOperations.get_item_name(item.name, prefix + delimiter)
yield ('File', item.name, item_relative_path, item.size)
@classmethod
def file_is_empty(cls, size):
return not size or size == 0
class AbstractTransferManager:
__metaclass__ = ABCMeta
@abstractmethod
def transfer(self, source_wrapper, destination_wrapper, path=None, relative_path=None, clean=False,
quiet=False, size=None, tags=(), skip_existing=False):
"""
Transfers data from the source storage to the destination storage.
:param source_wrapper: Source data storage resource wrapper.
:type source_wrapper: DataStorageWrapper.
:param destination_wrapper: Destination data storage resource wrapper.
:type destination_wrapper: DataStorageWrapper.
:param path: Transfer data full path.
:param relative_path: Transfer data relative path.
:param clean: Remove source files after the transferring.
:param quiet: True if quite mode specified.
:param size: Size of the transfer source object.
:param tags: Additional tags that will be included to the transferring object.
Tags CP_SOURCE and CP_OWNER will be included by default.
:param skip_existing: Skips transfer objects that already exist in the destination storage.
"""
pass
class AbstractListingManager:
__metaclass__ = ABCMeta
@abstractmethod
def list_items(self, relative_path=None, recursive=False, page_size=StorageOperations.DEFAULT_PAGE_SIZE,
show_all=False):
"""
Lists files and folders by a relative path in the current storage.
:param relative_path: Storage relative path to be listed.
:param recursive: Specifies if the listing has to be recursive.
:param page_size: Max number of items to return. The argument is ignored if show_all argument is specified.
:param show_all: Specifies if all items have to be listed.
"""
pass
@abstractmethod
def get_summary_with_depth(self, max_depth, relative_path=None):
"""
Returns tree with storage usage statistic under the given relative path and according to given depth.
:param max_depth: returns N or fewer levels below
:param relative_path: Storage relative path to be processed
:return: tree with storage usage statistic
"""
pass
@abstractmethod
def get_summary(self, relative_path=None):
"""
Calculates storage usage statistic according to relative path
:param relative_path: Storage relative path to be processed
:return: <Storage path>, <total objects by path>, <total objects size>
"""
pass
def get_items(self, relative_path):
"""
Returns all files under the given relative path in forms of tuples with the following structure:
('File', full_path, relative_path, size)
:param relative_path: Path to a folder or a file.
:return: Generator of file tuples.
"""
prefix = StorageOperations.get_prefix(relative_path).rstrip(StorageOperations.PATH_SEPARATOR)
for item in self.list_items(prefix, recursive=True, show_all=True):
if not StorageOperations.is_relative_path(item.name, prefix):
continue
if item.name == relative_path:
item_relative_path = os.path.basename(item.name)
else:
item_relative_path = StorageOperations.get_item_name(item.name, prefix + StorageOperations.PATH_SEPARATOR)
yield ('File', item.name, item_relative_path, item.size)
def folder_exists(self, relative_path, delimiter=StorageOperations.PATH_SEPARATOR):
prefix = StorageOperations.get_prefix(relative_path).rstrip(delimiter) + delimiter
for item in self.list_items(prefix, show_all=True):
if prefix.endswith(item.name):
return True
return False
@abstractmethod
def get_file_tags(self, relative_path):
pass
def get_file_size(self, relative_path):
items = self.list_items(relative_path, show_all=True, recursive=True)
for item in items:
if item.name == relative_path:
return item.size
return None
class AbstractDeleteManager:
__metaclass__ = ABCMeta
@abstractmethod
def delete_items(self, relative_path, recursive=False, exclude=[], include=[], version=None, hard_delete=False):
"""
Deletes all items under the given path.
:param relative_path: Storage relative path to be deleted.
:param recursive: Specifies if the deletion has to be recursive. The argument is required for folders deletion.
:param exclude: Exclude item pattern.
:param include: Include item pattern.
:param version: Version to be deleted.
:param hard_delete: Specifies if all item versions have to be deleted.
"""
pass
class AbstractRestoreManager:
__metaclass__ = ABCMeta
@abstractmethod
def restore_version(self, version, exclude, include, recursive):
"""
Restores item version.
:param version: Version to be restored.
"""
pass
| 39.41349 | 122 | 0.654836 |
41da4301b9f0f3407e688896b9acee7de3c6239f | 2,088 | py | Python | tardis/energy_input/tests/test_calculate_opacity.py | sonachitchyan/tardis | a8bad890d8ccd906993012e954ea7bcd683a96b7 | [
"BSD-3-Clause"
] | null | null | null | tardis/energy_input/tests/test_calculate_opacity.py | sonachitchyan/tardis | a8bad890d8ccd906993012e954ea7bcd683a96b7 | [
"BSD-3-Clause"
] | null | null | null | tardis/energy_input/tests/test_calculate_opacity.py | sonachitchyan/tardis | a8bad890d8ccd906993012e954ea7bcd683a96b7 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import numpy.testing as npt
import tardis.energy_input.calculate_opacity as calculate_opacity
@pytest.mark.parametrize(
["electron_number_density", "energy", "expected"],
[
(1.0e11, 511.0, 2.865396624016367e-14),
(1e15, 255.5, 3.743906253489761e-10),
(1e5, 511.0e7, 4.318577913631238e-26),
],
)
def test_compton_opacity_calculation(energy, electron_number_density, expected):
"""
Parameters
----------
energy : float
electron_number_density : float
"""
opacity = calculate_opacity.compton_opacity_calculation(
energy, electron_number_density
)
npt.assert_almost_equal(opacity, expected)
@pytest.mark.parametrize(
["ejecta_density", "energy", "iron_group_fraction", "expected"],
[
(1.0, 511.0, 0.0, 0.00015028056615643418),
(1e-2, 255.5, 0.5, 8.903267700390038e-05),
(1e-2, 255.5, 0.25, 5.1069068712110425e-05),
(1e5, 511.0e7, 1.0, 0.0),
],
)
def test_photoabsorption_opacity_calculation(
energy, ejecta_density, iron_group_fraction, expected
):
"""
Parameters
----------
energy : float
ejecta_density : float
iron_group_fraction : float
"""
opacity = calculate_opacity.photoabsorption_opacity_calculation(
energy, ejecta_density, iron_group_fraction
)
npt.assert_almost_equal(opacity, expected)
@pytest.mark.parametrize(
["ejecta_density", "energy", "iron_group_fraction", "expected"],
[
(1.0, 511.0, 0.0, 0.0),
(1e-2, 1500, 0.5, 2.743980356831218e-06),
(1e-2, 1200, 0.25, 8.846018943383742e-06),
(1e5, 511.0e7, 1.0, 1111355719.7411418),
],
)
def test_pair_creation_opacity_calculation(
energy, ejecta_density, iron_group_fraction, expected
):
"""
Parameters
----------
energy : float
ejecta_density : float
iron_group_fraction : float
"""
opacity = calculate_opacity.pair_creation_opacity_calculation(
energy, ejecta_density, iron_group_fraction
)
npt.assert_almost_equal(opacity, expected)
| 26.43038 | 80 | 0.660441 |
7485175556adf64b9a8bbce3fe5422d7e41dadc3 | 3,362 | py | Python | datasets/samsum/samsum.py | leondz/datasets | 4110fb6034f79c5fb470cf1043ff52180e9c63b7 | [
"Apache-2.0"
] | 3,395 | 2020-05-13T21:16:50.000Z | 2020-09-10T14:36:50.000Z | datasets/samsum/samsum.py | leondz/datasets | 4110fb6034f79c5fb470cf1043ff52180e9c63b7 | [
"Apache-2.0"
] | 370 | 2020-05-13T21:28:57.000Z | 2020-09-10T11:03:38.000Z | datasets/samsum/samsum.py | leondz/datasets | 4110fb6034f79c5fb470cf1043ff52180e9c63b7 | [
"Apache-2.0"
] | 258 | 2020-05-15T01:17:09.000Z | 2020-09-10T12:41:43.000Z | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAMSum dataset."""
import json
import py7zr
import datasets
_CITATION = """
@article{gliwa2019samsum,
title={SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization},
author={Gliwa, Bogdan and Mochol, Iwona and Biesek, Maciej and Wawer, Aleksander},
journal={arXiv preprint arXiv:1911.12237},
year={2019}
}
"""
_DESCRIPTION = """
SAMSum Corpus contains over 16k chat dialogues with manually annotated
summaries.
There are two features:
- dialogue: text of dialogue.
- summary: human written summary of the dialogue.
- id: id of a example.
"""
_HOMEPAGE = "https://arxiv.org/abs/1911.12237"
_LICENSE = "CC BY-NC-ND 4.0"
_URL = "https://huggingface.co/datasets/samsum/resolve/main/data/corpus.7z"
class Samsum(datasets.GeneratorBasedBuilder):
"""SAMSum Corpus dataset."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="samsum"),
]
def _info(self):
features = datasets.Features(
{
"id": datasets.Value("string"),
"dialogue": datasets.Value("string"),
"summary": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
path = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": (path, "train.json"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": (path, "test.json"),
"split": "test",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": (path, "val.json"),
"split": "val",
},
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
path, fname = filepath
with open(path, "rb") as f:
with py7zr.SevenZipFile(f, "r") as z:
for name, bio in z.readall().items():
if name == fname:
data = json.load(bio)
for example in data:
yield example["id"], example
| 29.752212 | 93 | 0.584176 |
fc2173739e0364c257c70cc0caacc7bd8615f90e | 3,821 | py | Python | python/hsfs/feature_group_validation.py | kouzant/feature-store-api | f6a666e11fd33ae814a79c588ff49547b942b09d | [
"Apache-2.0"
] | 1 | 2021-02-11T10:00:04.000Z | 2021-02-11T10:00:04.000Z | python/hsfs/feature_group_validation.py | kouzant/feature-store-api | f6a666e11fd33ae814a79c588ff49547b942b09d | [
"Apache-2.0"
] | 1 | 2021-02-25T11:47:03.000Z | 2021-02-25T11:47:03.000Z | python/hsfs/feature_group_validation.py | isabella232/feature-store-api | 6f90c6039519422114c35ed47e1ea8765134e7ba | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2020 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import humps
import json
from hsfs import util
class FeatureGroupValidation:
"""Metadata object representing the validation result of a feature group.
Refer to expectation_result for individual feature group expectation results.
"""
def __init__(
self,
validation_time,
expectation_results,
validation_id=None,
status=None,
validation_path=None,
commit_time=None,
href=None,
expand=None,
items=None,
count=None,
type=None,
):
self._validation_id = validation_id
self._validation_time = validation_time
self._status = status
self._expectation_results = expectation_results
self._validation_path = validation_path
self._commit_time = commit_time
@classmethod
def from_response_json(cls, json_dict):
json_decamelized = humps.decamelize(json_dict)
if "count" in json_decamelized:
if json_decamelized["count"] == 0:
return []
return [
cls(**feature_group_validation)
for feature_group_validation in json_decamelized["items"]
]
else:
return cls(**json_decamelized)
def json(self):
return json.dumps(self, cls=util.FeatureStoreEncoder)
def to_dict(self):
return {
"validationId": self._validation_id,
"validationTime": self._validation_time,
"expectationResults": self._expectation_results,
}
@property
def validation_id(self):
"""Unique id of the feature group validation."""
return self._validation_id
@validation_id.setter
def validation_id(self, validation_id):
self._validation_id = validation_id
@property
def validation_time(self):
"""Timestamp in seconds of when feature validation started."""
return self._validation_time
@validation_time.setter
def validation_time(self, validation_time):
self._validation_time = validation_time
@property
def status(self):
"""Status of the expectation after feature ingestion, one of "NONE", "SUCCESS", "WARNING", "FAILURE"."""
return self._status
@status.setter
def status(self, status):
self._status = status
@property
def expectation_results(self):
"""List of expectation results."""
return self._expectation_results
@expectation_results.setter
def expectation_results(self, expectation_results):
self._expectation_results = expectation_results
@property
def validation_path(self):
"""Path in the Hopsworks datasets where the feature group validation results are persisted."""
return self._validation_path
@validation_path.setter
def validation_path(self, validation_path):
self._validation_path = validation_path
@property
def commit_time(self):
"""Timestamp in seconds of when the feature dataframe was committed (time-travel FGs only)."""
return self._commit_time
@commit_time.setter
def commit_time(self, commit_time):
self._commit_time = commit_time
| 30.325397 | 112 | 0.669197 |
4b6696b128e11b8a49d7fc7c468bf4d419754cdb | 5,375 | py | Python | yosaipy2/core/subject/identifier.py | jellybean4/yosaipy2 | 6c57f1ed2eedf25f8c0b06773a4d2c3c2a01dc26 | [
"Apache-2.0"
] | null | null | null | yosaipy2/core/subject/identifier.py | jellybean4/yosaipy2 | 6c57f1ed2eedf25f8c0b06773a4d2c3c2a01dc26 | [
"Apache-2.0"
] | null | null | null | yosaipy2/core/subject/identifier.py | jellybean4/yosaipy2 | 6c57f1ed2eedf25f8c0b06773a4d2c3c2a01dc26 | [
"Apache-2.0"
] | null | null | null | """
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import collections
from yosaipy2.core.utils.utils import get_logger
from yosaipy2.core import (
subject_abcs,
)
class SimpleIdentifierCollection(subject_abcs.MutableIdentifierCollection):
"""
A collection of all identifying attributes associated with a corresponding
Subject. An *identifier*, known in Shiro as a *principal*, is just a
security term for an identifying attribute, such as a username or user id
or social security number or anything else that can be considered an
'identifying' attribute for a Subject.
The source name that an identifier originates from as a Subject is first
authenticated serves as key.
To obtain the identifier for a specific Source, see the from_source method.
You can also see which sources contributed to this SIC via the
source_names attribute (property).
Yosai simplifies the identifier for a realm, changing it from a set
to a scalar value.
"""
def __init__(self, source_name=None, identifier=None,
identifier_collection=None):
"""
:type source_name: String
:type identifier: String or ?
:type identifier_collection: subject_abcs.IdentifierCollection
"""
self.source_identifiers = collections.OrderedDict()
self._primary_identifier = None
self._logger = get_logger()
if identifier_collection:
self.add_collection(identifier_collection=identifier_collection)
elif source_name and identifier:
self.add(source_name=source_name,
identifier=identifier)
@property
def primary_identifier(self):
if not self._primary_identifier:
try:
# obtains the first identifier added via authentication:
identifiers = self.source_identifiers.values()
primary_identifier = next(iter(identifiers))
except (AttributeError, TypeError, StopIteration):
msg = "Failed to obtain primary identifier"
self._logger.warning(msg)
return None
self._primary_identifier = primary_identifier
return primary_identifier
return self._primary_identifier
def add(self, source_name, identifier):
"""
:type source_name: String
:type identifier: String
"""
self.source_identifiers[source_name] = identifier
def add_collection(self, identifier_collection):
"""
:type identifier_collection: a SimpleIdentifierCollection
"""
try:
new_source_identifiers = identifier_collection.source_identifiers
self.source_identifiers.update(new_source_identifiers)
except AttributeError:
msg = "Invalid identifier collection passed as argument"
raise AttributeError(msg)
# yosai.core.consolidates one_by_type with by_type:
def by_type(self, identifier_class):
"""
returns all unique instances of a type of identifier
:param identifier_class: the class to match identifier with
:returns: a tuple
"""
myidentifiers = set()
for identifier in self.source_identifiers.values():
if isinstance(identifier, identifier_class):
myidentifiers.update([identifier])
return set(myidentifiers)
def from_source(self, source_name):
return self.source_identifiers.get(source_name)
@property
def source_names(self):
return tuple(self.source_identifiers.keys()) # make immutable
@property
def is_empty(self):
return not self.source_identifiers.keys()
def clear(self):
self.source_identifiers = collections.OrderedDict()
def __eq__(self, other):
if self is other:
return True
if isinstance(other, subject_abcs.MutableIdentifierCollection):
return self.source_identifiers == other.source_identifiers
return False
def __repr__(self):
return "SimpleIdentifierCollection({0}, primary_identifier={1})".format(
self.source_identifiers, self.primary_identifier)
def __getstate__(self):
return {
'source_identifiers': [[key, value] for key, value in
self.source_identifiers.items()],
'_primary_identifier': self._primary_identifier
}
def __setstate__(self, state):
self.source_identifiers = collections.OrderedDict(state['source_identifiers'])
self._primary_identifier = state['_primary_identifier']
| 37.587413 | 86 | 0.685581 |
8ba84d23b1aba260732fdae708e0b32e9d3e1eb0 | 99 | py | Python | 21/module3.py | dunjin/DataAnalysisInAction | bec8af3763831b671a84a1355b5d220972f81137 | [
"CNRI-Python",
"Xnet",
"X11"
] | 656 | 2018-12-31T05:35:33.000Z | 2022-03-13T15:37:58.000Z | 21/module3.py | yideng2010/DataAnalysisInAction | 449e9fe961365d07052c16355d86f1a5f4748d7d | [
"CNRI-Python",
"Xnet",
"X11"
] | 14 | 2019-01-02T01:55:07.000Z | 2020-10-12T11:28:01.000Z | 21/module3.py | yideng2010/DataAnalysisInAction | 449e9fe961365d07052c16355d86f1a5f4748d7d | [
"CNRI-Python",
"Xnet",
"X11"
] | 260 | 2019-01-10T01:44:08.000Z | 2022-03-18T11:33:52.000Z | tf = TfidfVectorizer(stop_words=stop_words, max_df=0.5)
features = tf.fit_transform(train_contents) | 49.5 | 55 | 0.828283 |
a89ead4da3946b81cd7a219914a69ea0a85264b0 | 7,729 | py | Python | binarySearchTreeLazyDeletionAVL.py | AndreaG93/BST-Lazy-Deletion | 2c985a148a2bcdd20119f1d6026d48561e04c2b8 | [
"MIT"
] | 2 | 2019-01-08T20:24:28.000Z | 2020-12-08T07:16:00.000Z | binarySearchTreeLazyDeletionAVL.py | AndreaG93/BST-Lazy-Deletion | 2c985a148a2bcdd20119f1d6026d48561e04c2b8 | [
"MIT"
] | null | null | null | binarySearchTreeLazyDeletionAVL.py | AndreaG93/BST-Lazy-Deletion | 2c985a148a2bcdd20119f1d6026d48561e04c2b8 | [
"MIT"
] | 1 | 2020-12-08T07:12:17.000Z | 2020-12-08T07:12:17.000Z | '''
Created on Nov 29, 2017
@author: Andrea Graziani - matricola 0189326
@version: 1.0
'''
from binarySearchTreeNode import binarySearchTreeNodeAVLwithLazyDeletion
from binarySearchTreeAVL import binarySearchTreeAVL
class binarySearchTreeLazyDeletionAVL(binarySearchTreeAVL):
"""
This class is used to represent a BST AVL with lazy deletion.
"""
def __init__(self, rootNode=None):
'''
Constructs a newly allocated 'binarySearchTreeLazyDelectionAVL' object.
@param rootNode: It represents a 'binarySearchTreeNodeAVLwithLazyDeletion' object.
'''
binarySearchTreeAVL.__init__(self, rootNode)
def insert(self, key, value):
"""
This function is used to insert a new (key, value) pair into tree.
@param key: Represents a key.
@param value: Represents a value.
"""
# If tree is empty, newly created node become root...
# ---------------------------------------------------- #
if (not self._rootNode):
self._rootNode = binarySearchTreeNodeAVLwithLazyDeletion(key, value)
return
else:
currentNode = self._rootNode
parentNode = None
insertToLeft = True
# Search parent of newly created node...
# ---------------------------------------------------- #
while (currentNode is not None):
parentNode = currentNode
# Node is not deleted...
# ---------------------------------------------------- #
if (currentNode._isValid):
# CASE 1:
# ---------------------------------------------------- #
if (key <= currentNode._key):
currentNode = currentNode._leftSon
insertToLeft = True
# CASE 2:
# ---------------------------------------------------- #
else:
currentNode = currentNode._rightSon
insertToLeft = False
# Node is deleted...
# ---------------------------------------------------- #
else:
# CASE 1:
# ---------------------------------------------------- #
if (currentNode.hasRightSon() and (key > currentNode._rightSon._key)):
currentNode = currentNode._rightSon
insertToLeft = False
# CASE 2:
# ---------------------------------------------------- #
elif (currentNode.hasLeftSon() and (key < currentNode._leftSon._key)):
currentNode = currentNode._leftSon
insertToLeft = True
# CASE 3:
# ---------------------------------------------------- #
else:
# Copy data into deleted node...
# ---------------------------------------------------- #
currentNode._isValid = True
currentNode._key = key
currentNode._value = value
return
# Now allocate a new 'binarySearchTreeNodeAVLwithLazyDeletion' object...
# ---------------------------------------------------- #
newNode = binarySearchTreeNodeAVLwithLazyDeletion(key, value)
# Add parent...
newNode._parent = parentNode
# Insert new node...
# ---------------------------------------------------- #
if (insertToLeft):
parentNode._leftSon = newNode
newNode._isLeftSon = True
else:
parentNode._rightSon = newNode
newNode._isLeftSon = False
# If necessary, update balance factor...
# ---------------------------------------------------- #
if (parentNode.hasOnlyOneSon()):
self._updateNodeSubtreesHeightAlongTree(newNode)
def delete(self, key):
"""
This function is used to delete first occurrence of a node with specified key from tree.
@param key: Represents a key.
@return: A boolean value: 'True' if specified node is correctly deleted, otherwise 'False'.
"""
currentNode = self._rootNode
# Searching...
# ---------------------------------------------------- #
while(currentNode):
# CASE 1:
# ---------------------------------------------------- #
if (key < currentNode._key):
currentNode = currentNode._leftSon
# CASE 2:
# ---------------------------------------------------- #
elif(key > currentNode._key):
currentNode = currentNode._rightSon
# CASE 3:
# ---------------------------------------------------- #
else:
if (currentNode._isValid):
currentNode._isValid = False
return True
else:
currentNode = currentNode._leftSon
return False
def search(self, key, allowRestructuring):
"""
This function is used to search first occurrence of a node with specified key into tree.
@param key: Represents a key.
@param allowRestructuring: A boolean value used to enable or disable 'tree restructuring'.
@return: If specified node exists, returns it; otherwise it returns 'None'
"""
# This list object is used to store met invalid nodes...
invalidNodeList = list()
currentNode = self._rootNode
# Searching...
# ---------------------------------------------------- #
while(currentNode):
# Keeping track invalid nodes...
if (allowRestructuring and (not currentNode._isValid)):
invalidNodeList.append(currentNode)
# CASE 1:
# ---------------------------------------------------- #
if (key < currentNode._key):
currentNode = currentNode._leftSon
# CASE 2:
# ---------------------------------------------------- #
elif(key > currentNode._key):
currentNode = currentNode._rightSon
# CASE 3:
# ---------------------------------------------------- #
else:
if (currentNode._isValid):
break
else:
currentNode = currentNode._leftSon
# If requested, delete found invalid nodes physically...
# ---------------------------------------------------- #
for item in invalidNodeList:
self.deleteNode(item)
return currentNode
| 39.433673 | 99 | 0.379609 |
9685bbfc75f0b1708ce6b0370dd82a916f714228 | 1,628 | py | Python | tests/auth/test_mixins.py | KazakovDenis/django-extensions | ef3b3abe3c3d6563b73633bd25e3ff3ac9716661 | [
"MIT"
] | 4,057 | 2015-01-01T17:56:25.000Z | 2022-03-31T16:32:40.000Z | tests/auth/test_mixins.py | KazakovDenis/django-extensions | ef3b3abe3c3d6563b73633bd25e3ff3ac9716661 | [
"MIT"
] | 1,115 | 2015-01-01T14:59:38.000Z | 2022-03-28T22:05:55.000Z | tests/auth/test_mixins.py | KazakovDenis/django-extensions | ef3b3abe3c3d6563b73633bd25e3ff3ac9716661 | [
"MIT"
] | 951 | 2015-01-02T16:57:26.000Z | 2022-03-28T21:42:22.000Z | # -*- coding: utf-8 -*-
from django.test import TestCase, RequestFactory
from django.http import HttpResponse
from django.views.generic import DetailView
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import PermissionDenied
from django_extensions.auth.mixins import ModelUserFieldPermissionMixin
from tests.testapp.models import HasOwnerModel
class EmptyResponseView(DetailView):
model = HasOwnerModel
def get(self, request, *args, **kwargs):
return HttpResponse()
class OwnerView(ModelUserFieldPermissionMixin, EmptyResponseView):
model_permission_user_field = 'owner'
class ModelUserFieldPermissionMixinTests(TestCase):
factory = RequestFactory()
User = get_user_model()
@classmethod
def setUpTestData(cls):
cls.user = cls.User.objects.create(username="Joe", password="pass")
cls.ownerModel = HasOwnerModel.objects.create(owner=cls.user)
# Test if owner model has access
def test_permission_pass(self):
request = self.factory.get('/permission-required/' + str(self.ownerModel.id))
request.user = self.user
resp = OwnerView.as_view()(request)
self.assertEqual(resp.status_code, 200)
# # Test if non owner model is redirected
def test_permission_denied_and_redirect(self):
request = self.factory.get('/permission-required/' + str(self.ownerModel.id))
request.user = AnonymousUser()
resp = OwnerView.as_view()(request)
self.assertRaises(PermissionDenied)
self.assertEqual(resp.status_code, 302)
| 33.916667 | 85 | 0.738329 |
6c8d257ffa32f8071c2eec7d2e6ec00cf1bbdae6 | 782 | py | Python | car_detection.py | nycebyte/machine-learn | 08bd7fb19992dcc15da359e22e860ccb38aeccdf | [
"MIT"
] | 1 | 2022-03-25T08:24:09.000Z | 2022-03-25T08:24:09.000Z | car_detection.py | nycebyte/machine-learn | 08bd7fb19992dcc15da359e22e860ccb38aeccdf | [
"MIT"
] | null | null | null | car_detection.py | nycebyte/machine-learn | 08bd7fb19992dcc15da359e22e860ccb38aeccdf | [
"MIT"
] | null | null | null | # Title: Cars detection using Python and OpenCV.
"""
you need to install OpenCV by using this command in the terminal:
pip install opencv-python
and download "cars.xml" from GitHub.
"""
import cv2
cascade_src = 'cars.xml'
#The path and name of the video file that contains cars.
video_src = 'video.avi'
cap = cv2.VideoCapture(video_src)
car_cascade = cv2.CascadeClassifier(cascade_src)
while True:
ret, img = cap.read()
if (type(img) == type(None)):
break
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cars = car_cascade.detectMultiScale(gray, 1.1, 1)
for (x, y, w, h) in cars:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.imshow('nycebyte', img)
if cv2.waitKey(33) == 27:
break
cv2.destroyAllWindows()
| 22.342857 | 66 | 0.659847 |
b3b9e7681ab09c4496bf956ea791a9f81dbc1ace | 3,520 | py | Python | egs/aishell/ASR/local/compute_fbank_aishell.py | TIFOSI528/icefall | 6f7860a0a60b53026216fa4ba19048955951333e | [
"Apache-2.0"
] | 173 | 2021-07-01T03:36:53.000Z | 2022-03-30T09:17:51.000Z | egs/aishell/ASR/local/compute_fbank_aishell.py | TIFOSI528/icefall | 6f7860a0a60b53026216fa4ba19048955951333e | [
"Apache-2.0"
] | 200 | 2021-07-01T03:14:19.000Z | 2022-03-31T13:15:07.000Z | egs/aishell/ASR/local/compute_fbank_aishell.py | TIFOSI528/icefall | 6f7860a0a60b53026216fa4ba19048955951333e | [
"Apache-2.0"
] | 57 | 2021-07-15T09:38:09.000Z | 2022-03-29T02:03:48.000Z | #!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file computes fbank features of the aishell dataset.
It looks for manifests in the directory data/manifests.
The generated fbank features are saved in data/fbank.
"""
import argparse
import logging
import os
from pathlib import Path
import torch
from lhotse import CutSet, Fbank, FbankConfig, LilcomHdf5Writer
from lhotse.recipes.utils import read_manifests_if_cached
from icefall.utils import get_executor
# Torch's multithreaded behavior needs to be disabled or
# it wastes a lot of CPU and slow things down.
# Do this outside of main() in case it needs to take effect
# even when we are not invoking the main (e.g. when spawning subprocesses).
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
def compute_fbank_aishell(num_mel_bins: int = 80):
src_dir = Path("data/manifests")
output_dir = Path("data/fbank")
num_jobs = min(15, os.cpu_count())
dataset_parts = (
"train",
"dev",
"test",
)
manifests = read_manifests_if_cached(
dataset_parts=dataset_parts, output_dir=src_dir
)
assert manifests is not None
extractor = Fbank(FbankConfig(num_mel_bins=num_mel_bins))
with get_executor() as ex: # Initialize the executor only once.
for partition, m in manifests.items():
if (output_dir / f"cuts_{partition}.json.gz").is_file():
logging.info(f"{partition} already exists - skipping.")
continue
logging.info(f"Processing {partition}")
cut_set = CutSet.from_manifests(
recordings=m["recordings"],
supervisions=m["supervisions"],
)
if "train" in partition:
cut_set = (
cut_set
+ cut_set.perturb_speed(0.9)
+ cut_set.perturb_speed(1.1)
)
cut_set = cut_set.compute_and_store_features(
extractor=extractor,
storage_path=f"{output_dir}/feats_{partition}",
# when an executor is specified, make more partitions
num_jobs=num_jobs if ex is None else 80,
executor=ex,
storage_type=LilcomHdf5Writer,
)
cut_set.to_json(output_dir / f"cuts_{partition}.json.gz")
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--num-mel-bins",
type=int,
default=80,
help="""The number of mel bins for Fbank""",
)
return parser.parse_args()
if __name__ == "__main__":
formatter = (
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
)
logging.basicConfig(format=formatter, level=logging.INFO)
args = get_args()
compute_fbank_aishell(num_mel_bins=args.num_mel_bins)
| 32 | 75 | 0.653977 |
336cf298a5d3f0c5248fe50d75293f86b51c2ed8 | 3,372 | py | Python | GUI/qt/ZetCode/customwidget.py | archu2020/python-2 | 19c626ca9fd37168db8a7ac075fd80c8e2971313 | [
"Apache-2.0"
] | 48 | 2017-12-24T12:19:55.000Z | 2022-02-26T13:14:27.000Z | GUI/qt/ZetCode/customwidget.py | 17610178081/python | 3975c678d985c468deecd03560d882e9d316bb63 | [
"Apache-2.0"
] | 3 | 2018-12-05T08:48:14.000Z | 2020-07-29T01:56:16.000Z | GUI/qt/ZetCode/customwidget.py | 17610178081/python | 3975c678d985c468deecd03560d882e9d316bb63 | [
"Apache-2.0"
] | 113 | 2017-08-09T03:10:04.000Z | 2022-03-26T16:05:01.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
ZetCode PyQt5 tutorial
In this example, we create a custom widget.
Author: Jan Bodnar
Website: zetcode.com
Last edited: August 2017
"""
from PyQt5.QtWidgets import (QWidget, QSlider, QApplication,
QHBoxLayout, QVBoxLayout)
from PyQt5.QtCore import QObject, Qt, pyqtSignal
from PyQt5.QtGui import QPainter, QFont, QColor, QPen
import sys
class Communicate(QObject):
updateBW = pyqtSignal(int)
class BurningWidget(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setMinimumSize(1, 30)
self.value = 75
self.num = [75, 150, 225, 300, 375, 450, 525, 600, 675]
def setValue(self, value):
self.value = value
def paintEvent(self, e):
qp = QPainter()
qp.begin(self)
self.drawWidget(qp)
qp.end()
def drawWidget(self, qp):
MAX_CAPACITY = 700
OVER_CAPACITY = 750
font = QFont('Serif', 7, QFont.Light)
qp.setFont(font)
size = self.size()
w = size.width()
h = size.height()
step = int(round(w / 10))
till = int(((w / OVER_CAPACITY) * self.value))
full = int(((w / OVER_CAPACITY) * MAX_CAPACITY))
if self.value >= MAX_CAPACITY:
qp.setPen(QColor(255, 255, 255))
qp.setBrush(QColor(255, 255, 184))
qp.drawRect(0, 0, full, h)
qp.setPen(QColor(255, 175, 175))
qp.setBrush(QColor(255, 175, 175))
qp.drawRect(full, 0, till-full, h)
else:
qp.setPen(QColor(255, 255, 255))
qp.setBrush(QColor(255, 255, 184))
qp.drawRect(0, 0, till, h)
pen = QPen(QColor(20, 20, 20), 1,
Qt.SolidLine)
qp.setPen(pen)
qp.setBrush(Qt.NoBrush)
qp.drawRect(0, 0, w-1, h-1)
j = 0
for i in range(step, 10*step, step):
qp.drawLine(i, 0, i, 5)
metrics = qp.fontMetrics()
fw = metrics.width(str(self.num[j]))
qp.drawText(i-fw/2, h/2, str(self.num[j]))
j = j + 1
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
OVER_CAPACITY = 750
sld = QSlider(Qt.Horizontal, self)
sld.setFocusPolicy(Qt.NoFocus)
sld.setRange(1, OVER_CAPACITY)
sld.setValue(75)
sld.setGeometry(30, 40, 150, 30)
self.c = Communicate()
self.wid = BurningWidget()
self.c.updateBW[int].connect(self.wid.setValue)
sld.valueChanged[int].connect(self.changeValue)
hbox = QHBoxLayout()
hbox.addWidget(self.wid)
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox)
self.setLayout(vbox)
self.setGeometry(300, 300, 390, 210)
self.setWindowTitle('Burning widget')
self.show()
def changeValue(self, value):
self.c.updateBW.emit(value)
self.wid.repaint()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | 22.331126 | 64 | 0.532028 |
de92928fdc42f1d53567caa21ba83ade98a1fd56 | 42,867 | py | Python | graynet/run_workflow.py | raamana/graynet | 383e21024e49f8e1a8dbef18eb2706cb1fef25a5 | [
"MIT"
] | 28 | 2017-10-09T10:34:13.000Z | 2021-12-15T14:38:45.000Z | graynet/run_workflow.py | raamana/graynet | 383e21024e49f8e1a8dbef18eb2706cb1fef25a5 | [
"MIT"
] | 30 | 2017-09-28T01:47:30.000Z | 2021-08-11T16:59:01.000Z | graynet/run_workflow.py | raamana/graynet | 383e21024e49f8e1a8dbef18eb2706cb1fef25a5 | [
"MIT"
] | 6 | 2018-05-04T14:59:35.000Z | 2022-02-08T04:24:04.000Z | __all__ = ['extract', 'roiwise_stats_indiv', 'cli_run']
import argparse
import logging
import pickle
import sys
import traceback
import warnings
from functools import partial
from multiprocessing import Manager, Pool
from pathlib import Path
from sys import version_info
import hiwenet
import networkx as nx
import numpy as np
from graynet.utils import (calc_roi_statistics, check_atlas, check_num_procs,
check_params_single_edge, check_stat_methods,
check_subjects, check_weight_params, check_weights,
import_features, mask_background_roi,
save_per_subject_graph, save_summary_stats,
stamp_experiment, stamp_expt_weight, warn_nan)
if version_info.major > 2:
from graynet import utils
from graynet.volumetric import extract_per_subject_volumetric, volumetric_roi_info
from graynet.parcellate import roi_labels_centroids
from graynet import config_graynet as cfg
from graynet import __version__
else:
raise NotImplementedError('graynet supports only Python 3. '
'Upgrade to Python 3.6 or higher is required.')
np.seterr(divide='ignore', invalid='ignore')
# logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def extract(subject_id_list,
input_dir,
base_feature=cfg.default_feature_single_edge,
weight_method_list=cfg.default_weight_method,
num_bins=cfg.default_num_bins,
edge_range=cfg.default_edge_range,
atlas=cfg.default_atlas,
smoothing_param=cfg.default_smoothing_param,
node_size=cfg.default_node_size,
out_dir=None,
return_results=False,
num_procs=cfg.default_num_procs):
"""
Extracts weighted networks (matrix of pair-wise ROI distances) from gray matter features based on Freesurfer processing.
Parameters
----------
subject_id_list : str or list
must be path to a file containing subject IDs, or a list of subject IDs
input_dir : str
Path to the input directory where features can be read.
For example, this can be Freesurfer's SUBJECTS_DIR, where output processing is stored.
Or another directory with a structure that graynet can parse.
base_feature : str
Specific type of feature to read for each subject from the input directory.
weight_method : string(s), optional
Type of distance (or metric) to compute between the pair of histograms.
It must be one of the following methods:
- 'chebyshev'
- 'chebyshev_neg'
- 'chi_square'
- 'correlate'
- 'correlate_1'
- 'cosine'
- 'cosine_1'
- 'cosine_2'
- 'cosine_alt'
- 'euclidean'
- 'fidelity_based'
- 'histogram_intersection'
- 'histogram_intersection_1'
- 'jensen_shannon'
- 'kullback_leibler'
- 'manhattan'
- 'minowski'
- 'noelle_1'
- 'noelle_2'
- 'noelle_3'
- 'noelle_4'
- 'noelle_5'
- 'relative_bin_deviation'
- 'relative_deviation'
Note only the following are *metrics*:
- 'manhattan'
- 'minowski'
- 'euclidean'
- 'noelle_2'
- 'noelle_4'
- 'noelle_5'
The following are *semi- or quasi-metrics*:
- 'kullback_leibler'
- 'jensen_shannon'
- 'chi_square'
- 'chebyshev'
- 'cosine_1'
- 'chebyshev_neg'
- 'correlate_1'
- 'histogram_intersection_1'
- 'relative_deviation'
- 'relative_bin_deviation'
- 'noelle_1'
- 'noelle_3'
The following are classified to be similarity functions:
- 'histogram_intersection'
- 'correlate'
- 'cosine'
- 'cosine_2'
- 'cosine_alt'
- 'fidelity_based'
*Default* choice: 'manhattan'.
num_bins : int
Number of histogram bins to use when computing pair-wise weights based on histogram distance. Default : 25
edge_range : tuple or list
The range of edges (two finite values) within which to build the histogram e.g. ``--edge_range 0 5``.
This can be helpful (and important) to ensure correspondence across multiple invocations of graynet (e.g. for different subjects), in terms of range across all bins as well as individual bin edges.
Default :
- ( 0.0, 5.0) for ``freesurfer_thickness`` and
- (-0.3, 0.3) for ``freesurfer_curv``.
atlas : str
Name of the atlas whose parcellation to be used.
Choices for cortical parcellation: ['fsaverage', 'glasser2016'], which are primary cortical.
Volumetric whole-brain atlases will be added soon.
smoothing_param : scalar
Smoothing parameter, which could be fwhm for Freesurfer cortical features,
or another relevant for the chosen base_feature.
Default: assumed as fwhm=10mm for the default feature choice 'thickness'
node_size : scalar, optional
Parameter to indicate the size of the ROIs, subparcels or patches, depending on type of atlas or feature.
This feature is not implemented yet, just a placeholder and to enable default computation.
out_dir : str, optional
Path to output directory to store results.
Default: None, results are returned, but not saved to disk.
If this is None, return_results must be true.
return_results : bool
Flag to indicate whether to return the results to be returned.
This flag helps to reduce the memory requirements, when the number of nodes in a parcellation or
the number of subjects or weight methods are large, as it doesn't retain results for all combinations,
when running from commmand line interface (or HPC). Default: False
If this is False, out_dir must be specified to save the results to disk.
num_procs : int
Number of parallel processes to use to speed up computation.
Returns
-------
edge_weights_all : dict, None
If return_results is True, this will be a dictionary keyed in by a tuple: (weight method, subject_ID)
The value of each edge_weights_all[(weight method, subject_ID)] is
a numpy array of length p = k*(k-1)/2, with k = number of nodes in the atlas parcellation.
If return_results is False, this will be None, which is the default.
"""
# All the checks must happen here, as this is key function in the API
check_params_single_edge(base_feature, input_dir, atlas, smoothing_param,
node_size, out_dir, return_results)
atlas, atlas_name = check_atlas(atlas)
subject_id_list, num_subjects, \
max_id_width, nd_id = check_subjects(subject_id_list)
num_bins, edge_range = check_weight_params(num_bins, edge_range)
weight_method_list, num_weights, \
max_wtname_width, nd_wm = check_weights(weight_method_list)
num_procs = check_num_procs(num_procs)
pretty_print_options = (max_id_width, nd_id, num_weights, max_wtname_width, nd_wm)
# roi_labels, ctx_annot = freesurfer_roi_labels(atlas)
# uniq_rois, roi_size, num_nodes = roi_info(roi_labels)
print('\nProcessing {} features'.format(base_feature))
if not return_results:
if out_dir is None:
raise ValueError('When return_results=False, out_dir must be specified '
'to be able to save the results.')
if not out_dir.exists():
out_dir.mkdir(exist_ok=True, parents=True)
if base_feature in cfg.features_cortical:
uniq_rois, centroids, roi_labels = roi_labels_centroids(atlas, node_size)
partial_func_extract = partial(extract_per_subject_cortical, input_dir,
base_feature, roi_labels, centroids,
weight_method_list, atlas, atlas_name,
smoothing_param, node_size, num_bins,
edge_range, out_dir, return_results,
pretty_print_options)
elif base_feature in cfg.features_volumetric:
uniq_rois, centroids, roi_labels = volumetric_roi_info(atlas)
partial_func_extract = partial(extract_per_subject_volumetric,
input_dir, base_feature, roi_labels,
centroids, weight_method_list, atlas,
atlas_name, smoothing_param, node_size,
num_bins, edge_range, out_dir,
return_results, pretty_print_options)
else:
raise NotImplementedError('Chosen feature {} is not recognized as '
'either cortical or volumetric! Choose one'
'from the following options: {}'
''.format(cfg.base_feature_list))
chunk_size = int(np.ceil(num_subjects / num_procs))
with Manager():
with Pool(processes=num_procs) as pool:
edge_weights_list_dicts = pool.map(partial_func_extract, subject_id_list,
chunk_size)
if return_results:
edge_weights_all = dict()
for combo in edge_weights_list_dicts:
# each element from output of parallel loop is a dict keyed in
# by {subject, weight)
edge_weights_all.update(combo)
else:
edge_weights_all = None
print('\ngraynet computation done.')
return edge_weights_all
def extract_per_subject_cortical(input_dir, base_feature, roi_labels, centroids,
weight_method_list, atlas_spec, atlas_name,
smoothing_param, node_size,
num_bins, edge_range, out_dir, return_results,
pretty_print_options, subject=None):
# purposefully leaving subject parameter last to enable partial function creation
"""
Extracts give set of weights for one subject.
Parameters
----------
subject
input_dir
base_feature
roi_labels
weight_method_list
atlas_spec
smoothing_param
node_size
num_bins
edge_range
out_dir
return_results
pretty_print_options
Returns
-------
"""
if subject is None:
return
print('')
try:
features = import_features(input_dir,
[subject, ],
base_feature,
fwhm=smoothing_param,
atlas=atlas_spec)
except:
traceback.print_exc()
warnings.warn('Unable to read {} features for {}\n Skipping it.'.format(
base_feature, subject), UserWarning)
return
data, rois = mask_background_roi(features[subject], roi_labels, cfg.null_roi_name)
max_id_width, nd_id, num_weights, max_wtname_width, nd_wm = pretty_print_options
if return_results:
edge_weights_all = dict()
else:
edge_weights_all = None
for ww, weight_method in enumerate(weight_method_list):
# unique stamp for each subject and weight
expt_id = stamp_expt_weight(base_feature, atlas_name, smoothing_param,
node_size, weight_method)
sys.stdout.write(
'\nProcessing {sid:{id_width}} -- weight {wm:{wtname_width}} '
'({wc:{nd_wm}}/{nw:{nd_wm}}) :\n'
''.format(sid=subject, wm=weight_method, wc=ww + 1, nw=num_weights,
nd_id=nd_id, nd_wm=nd_wm, id_width=max_id_width,
wtname_width=max_wtname_width))
# actual computation of pair-wise features
try:
graph = hiwenet.extract(data,
rois,
weight_method=weight_method,
num_bins=num_bins,
edge_range=edge_range,
return_networkx_graph=True)
# retrieving edge weights
weight_vec = np.array(list(nx.get_edge_attributes(graph, 'weight').values()))
warn_nan(weight_vec)
# weight_vec = get_triu_handle_inf_nan(edge_weights)
# adding position info to nodes (for visualization later)
for roi in centroids:
graph.nodes[roi]['x'] = float(centroids[roi][0])
graph.nodes[roi]['y'] = float(centroids[roi][1])
graph.nodes[roi]['z'] = float(centroids[roi][2])
if return_results:
edge_weights_all[(weight_method, subject)] = weight_vec
# saving to disk
try:
# save(weight_vec, out_dir, subject, expt_id)
save_per_subject_graph(graph, out_dir, subject, expt_id)
except:
raise IOError('Unable to save the network or vectorized weights '
'to:\n{}'.format(out_dir))
except (RuntimeError, RuntimeWarning) as runexc:
print(runexc)
except KeyboardInterrupt:
print('Exiting on keyborad interrupt! \n'
'Abandoning the remaining processing for {} weights:\n'
'{}.'.format(num_weights - ww, weight_method_list[ww:]))
sys.exit(1)
except:
print('Unable to extract {} features for {}'.format(weight_method, subject))
traceback.print_exc()
return edge_weights_all
def roiwise_stats_indiv(subject_id_list, input_dir,
base_feature=cfg.default_feature_single_edge,
chosen_roi_stats=cfg.default_roi_statistic,
atlas=cfg.default_atlas,
smoothing_param=cfg.default_smoothing_param,
node_size=cfg.default_node_size,
out_dir=None, return_results=False):
"""
Computes the chosen summary statistics within each ROI.
These summary stats (such as median) can serve as a baseline for network-level values produced by graynet.
Options for summary statistics include 'median', 'entropy', 'kurtosis' and
any other appropriate summary statistics listed under scipy.stats:
https://docs.scipy.org/doc/scipy/reference/stats.html#statistical-functions
Parameters
----------
subject_id_list : str or list
must be path to a file containing subject IDs, or a list of subject IDs
input_dir : str
Path to the input directory where features can be read.
For example, this can be Freesurfer's SUBJECTS_DIR, where output processing is stored.
Or another directory with a structure that graynet can parse.
base_feature : str
Specific type of feature to read for each subject from the input directory.
chosen_roi_stats : list of str or callable
If requested, graynet will compute chosen summary statistics (such as median) within each ROI of the chosen parcellation (and network weight computation is skipped).
Default: 'median'. Supported summary statistics include 'median', 'mode', 'mean', 'std', 'gmean', 'hmean', 'variation',
'entropy', 'skew' and 'kurtosis'.
Other appropriate summary statistics listed under scipy.stats could used
by passing in a callable with their parameters encapsulated:
https://docs.scipy.org/doc/scipy/reference/stats.html#statistical-functions
For example, if you would like to compute 3rd k-statistic, you could construct a callable and passing ``third_kstat`` as in the argument:
.. code-block:: python
third_kstat = lambda array: scipy.stats.kstat(array, n = 3)
roi_medians = roiwise_stats_indiv(subject_id_list, fs_dir, base_feature, chosen_measure = third_kstat,
atlas, fwhm, out_dir=None, return_results=True)
Other possible options could trimmed mean estimator with 5% outliers removed or 3rd k-statistic:
.. code-block:: python
trimmed_mean = lambda array: scipy.stats.trim_mean(array, proportiontocut = 0.05)
third_kstat = lambda array: scipy.stats.kstat(array, n = 3)
Notes: 'hmean' requires all values be positive.
atlas : str
Name of the atlas whose parcellation to be used.
Available choices for cortical parcellation: ['fsaverage', 'glasser2016'].
Volumetric whole-brain atlases will be added soon.
smoothing_param : scalar
Smoothing parameter, which could be fwhm for Freesurfer cortical features,
or another relevant for the chosen base_feature.
Default: assumed as fwhm=10mm for the default feature choice 'thickness'
node_size : scalar, optional
Parameter to indicate the size of the ROIs, subparcels or patches, depending on type of atlas or feature.
Not implemented.
out_dir : str, optional
Path to output directory to store results.
Default: None, results are returned, but not saved to disk.
If this is None, return_results must be true.
return_results : bool
Flag to indicating whether to keep the results to be returned to caller method.
Helps to save memory (as it doesn't retain results all subjects and weight combinations),
when running from command line interface (or HPC). Default: False
If this is False, out_dir must be specified to save the results to disk.
Returns
-------
roi_stats_all : dict, None
If return_results is True, this will be a dictionary keyed in by subject_ID
The value of each key roi_summary_all[subject] is
a numpy array of length k, with k = number of nodes in the atlas parcellation.
If return_results is False, this will be None, which is the default.
"""
check_params_single_edge(base_feature, input_dir, atlas, smoothing_param,
node_size, out_dir, return_results)
subject_id_list, num_subjects, max_id_width, nd_id = check_subjects(subject_id_list)
stat_func_list, stat_func_names, num_stats, \
max_stat_width, nd_st = check_stat_methods(chosen_roi_stats)
if base_feature in cfg.features_cortical:
uniq_rois, centroids, roi_labels = roi_labels_centroids(atlas)
null_roi_to_be_ignored = cfg.null_roi_name
elif base_feature in cfg.features_volumetric:
uniq_rois, centroids, roi_labels = volumetric_roi_info(atlas)
null_roi_to_be_ignored = cfg.null_roi_index
else:
raise ValueError('Unrecognized type of base_feature! Must be one of {}'
''.format(cfg.base_feature_list))
print('\nProcessing {} features resampled to {} atlas,'
' smoothed at {} with node size {}'.format(base_feature, atlas,
smoothing_param, node_size))
if return_results:
roi_stats_all = dict()
else:
roi_stats_all = None
if out_dir is None:
raise ValueError('When return_results=False, out_dir must be specified '
'to be able to save the results.')
if not out_dir.exists():
out_dir.mkdir(exist_ok=True, parents=True)
for sub_idx, subject in enumerate(subject_id_list):
try:
features = import_features(input_dir, [subject, ], base_feature,
atlas=atlas,
fwhm=smoothing_param)
except:
raise IOError(
'Unable to read {} features for {}\n'
' Skipping it.'.format(base_feature, subject))
data, rois = mask_background_roi(features[subject], roi_labels,
null_roi_to_be_ignored)
for ss, stat_func in enumerate(stat_func_list):
sys.stdout.write(
'\nProcessing id {sid:{id_width}} '
'({sidnum:{nd_id}}/{numsub:{nd_id}}) -- '
'statistic {stname:{stat_name_width}} '
'({statnum:{nd_st}}/{numst:{nd_st}})'
' :'.format(sid=subject, sidnum=sub_idx + 1, numsub=num_subjects,
stname=stat_func_names[ss], statnum=ss + 1, numst=num_stats,
id_width=max_id_width, stat_name_width=max_stat_width,
nd_id=nd_id, nd_st=nd_st))
try:
roi_stats = calc_roi_statistics(data, rois, uniq_rois, stat_func)
expt_id_no_network = stamp_experiment(base_feature, stat_func_names[ss],
atlas, smoothing_param, node_size)
save_summary_stats(roi_stats, uniq_rois, stat_func_names[ss], out_dir,
subject, expt_id_no_network)
sys.stdout.write('Done.')
except KeyboardInterrupt:
print('Exiting on keyborad interrupt! \n'
'Abandoning the remaining processing for {} stats:\n'
'{}.'.format(num_stats - ss, stat_func_names[ss:]))
sys.exit(1)
except:
traceback.print_exc()
logging.debug(
'Error : unable to compute roi-wise {} for {}.'
' Skipping it.'.format(stat_func_names[ss], subject))
if return_results:
roi_stats_all[subject] = roi_stats
return roi_stats_all
def cli_run():
"command line interface!"
subject_ids_path, input_dir, base_feature_list, \
weight_method, do_multi_edge, summary_stats, multi_edge_range, \
num_bins, edge_range, atlas, out_dir, node_size, smoothing_param, \
roi_stats, num_procs, overwrite_results = parse_args()
# save options to out folder for future ref
try:
user_opt = [subject_ids_path, input_dir, base_feature_list, weight_method,
do_multi_edge, summary_stats, multi_edge_range, num_bins, edge_range,
atlas, out_dir, node_size, smoothing_param, roi_stats, num_procs,
overwrite_results]
with open(out_dir.joinpath('user_options.pkl'), 'wb') as of:
pickle.dump(user_opt, of)
except:
# ignore
traceback.print_exc()
# when run from CLI, results will not be received
# so no point in wasting memory maintaining a very big array
return_results = False
if do_multi_edge:
from graynet.multi_edge import extract_multiedge
print('Computing multiple edges ... ')
extract_multiedge(subject_ids_path, input_dir,
base_feature_list=base_feature_list,
weight_method_list=weight_method,
summary_stats=summary_stats,
num_bins=num_bins, edge_range_dict=multi_edge_range,
atlas=atlas, smoothing_param=smoothing_param,
node_size=node_size, out_dir=out_dir,
return_results=return_results, num_procs=num_procs,
overwrite_results=overwrite_results)
else:
base_feature = base_feature_list[0]
if weight_method is not None:
print('Computing single edge ... ')
extract(subject_ids_path, input_dir,
base_feature=base_feature,
weight_method_list=weight_method,
num_bins=num_bins, edge_range=edge_range,
atlas=atlas, smoothing_param=smoothing_param,
node_size=node_size, out_dir=out_dir,
return_results=return_results, num_procs=num_procs)
else:
print('Computing ROI summary stats --'
' skipping computation of any network weights.')
roiwise_stats_indiv(subject_ids_path, input_dir, base_feature,
roi_stats, atlas, smoothing_param, node_size,
out_dir, return_results)
return
def get_parser():
"Method to specify arguments and defaults. "
help_text_subject_ids = "Path to file containing list of subject IDs " \
"(one per line)"
help_text_input_dir = "Path to a folder containing input data. It could, " \
"for example, be a Freesurfer SUBJECTS_DIR, if the " \
"chosen feature is from Freesurfer output."
help_text_feature = "Type of feature to be used for analysis.\n\n" \
"Default: ``{}`` \n\nChoices: {}" \
"".format(cfg.default_feature_single_edge[0],
', '.join(cfg.base_feature_list))
help_text_multi_edge = "Option to compute multiple edges between ROIs based on " \
"different features. Default False. If True, two valid " \
"features must be specified. Use --multi_edge_range to " \
"specify edge ranges for each feature to be processed."
help_text_summary_stat = "Summary statistic [one or more] to compute on all " \
"the weights from multiple edges.This must be a " \
"string representing a method (like 'median', " \
"'prod' or 'max'), that is available as a member of " \
"numpy or scipy.stats."
help_text_weight = "List of methods used to estimate the weight of the edge " \
"between the pair of nodes.\n\nDefault : {}.\n\n" \
"Available options:" \
"\n\n **histogram**-based: {}" \
"\n\n weights on **original** features : {}" \
"".format(cfg.default_weight_method[0],
', '.join(cfg.histogram_weights),
', '.join(cfg.weights_on_original_features))
help_text_num_bins = "Number of bins used to construct the histogram within " \
"each ROI or group. " \
"Default : {}".format(cfg.default_num_bins)
help_text_edge_range = "The range of edges (two finite values) within which to " \
"bin the given values e.g. ``--edge_range 0.0 5.0`` " \
".Setting this is *crucial* to ensure correspondence " \
"across multiple invocations of graynet, for different " \
"subjects, in terms of range across all bins as well as " \
"individual bin edges.\n\n" \
"Default : {}, to automatically compute from the given values." \
"".format(cfg.default_edge_range)
help_text_multi_edge_range = """Set of edge ranges (for each of the features)
within which to bin the given values - see above. For example, \n\n
``-f freesurfer_thickness freesurfer_curv --edge_range 0.0 5.0 -0.3 +0.3``\n\n
will set the a range of [0.0, 5.0] for thickness and [-0.3, 0.3] for curv.\n\n
Default : {}.""".format(cfg.edge_range_predefined)
help_text_roi_stats = "Option to compute summary statistics within each ROI of " \
"the chosen parcellation. These statistics (such as the " \
"median) can serve as a baseline for network-level " \
"values produced by graynet. Options for summary " \
"statistics include 'median', 'entropy', 'kurtosis' and " \
"any other appropriate summary statistics listed under " \
"scipy.stats: " \
"https://docs.scipy.org/doc/scipy/reference/stats.html" \
"#statistical-functions . When this option is chosen, " \
"network computation is not allowed. You need to compute " \
"networks and ROI stats separately."
help_text_atlas = "Name or path to atlas to containing the parcellation of " \
"ROIs.\nFor cortical features, you can also specify the " \
"absolute path for the Freesurfer parcellation of that " \
"atlas. This directory must have the standard Freesurfer " \
"structure, with the following key files that must exist: " \
"``label/?h.aparc.annot`` and " \
"``surf/?h.orig``.\n\n*Cortical* atlases supported: " \
"``fsaverage`` and ``glasser2016``. In addition, you can " \
"also specify an absolute path to the Freesurfer processing " \
"of any arbitrary atlas. Read these instructions before " \
"trying: https://raamana.github.io/graynet/cortical.html" \
"#using-a-different-atlas\n\n\n*Volumetric* atlases " \
"supported for CAT12 features: ``cat_aal``, ``cat_lpba40``, " \
"and ``cat_ibsr``.In addition, you can also directly specify " \
"an absolute path to a single 3D volume. Make sure name this " \
"file properly as itwould be used to encode all the " \
"processing i.e. make it clean as well as fully reflective " \
"of the properties of the parcellation inside.\n\nDefault: " \
"``{}``".format(cfg.default_atlas)
help_text_parc_size = "Size of individual patch for the atlas parcellation, " \
"in terms of minimum number of vertices per patch. " \
"This is *ONLY* valid for cortical version of graynet, " \
"when using atlas fsaverage only, that has precomputed " \
"parcellation for a set of predetermined patch sizes.\n" \
"\nAllowed values: (None, 250, 500, 1000, 2000, 3000, " \
"5000, 10000). \n" \
"\nDefault : {}".format(cfg.default_node_size)
help_text_smoothing = "Smoothing parameter for feature. " \
"Default: FWHM of {} for Freesurfer thickness." \
"".format(cfg.default_smoothing_param)
help_text_num_procs = "Number of CPUs to use in parallel to speed up " \
"processing. " \
"Default : {}, capping at available number of CPUs in " \
"the processing node.".format(cfg.default_num_procs)
help_text_overwrite_results = "Flag to request overwriting of existing " \
"results, in case of reruns/failed jobs. " \
"By default, if the expected output file exists " \
"and is of non-zero size, " \
"its computation is skipped (assuming the file " \
"is complete, usable and not corrupted)."
parser = argparse.ArgumentParser(prog="graynet")
parser.add_argument("-s", "--subject_ids_path", action="store",
dest="subject_ids_path",
required=False, default=None,
help=help_text_subject_ids)
parser.add_argument("-i", "--input_dir", action="store", dest="input_dir",
required=True, help=help_text_input_dir)
parser.add_argument("-f", "--feature", action="store",
dest="features",
nargs='*',
default=cfg.default_feature_single_edge, required=False,
help=help_text_feature)
parser.add_argument("-o", "--out_dir", action="store", dest="out_dir",
default=None, required=False,
help="Where to save the extracted features. ")
method_selector = parser.add_argument_group(
title='Type of computation',
description='Choose one among single edge, multiedge or simply ROI ' \
'stats.')
# method_selector = parser.add_argument_group(required=True)
method_selector.add_argument("-w", "--weight_method", action="store",
dest="weight_methods",
nargs='*',
default=None, required=False, help=help_text_weight)
method_selector.add_argument("-r", "--roi_stats", action="store",
dest="roi_stats",
nargs='*', default=None, help=help_text_roi_stats)
method_selector.add_argument("-m", "--do_multi_edge", action="store_true",
dest="do_multi_edge",
default=False, required=False,
help=help_text_multi_edge)
method_params = parser.add_argument_group(title='Weight parameters',
description='Parameters relevant to '
'histogram edge weight '
'calculations')
method_params.add_argument("-b", "--num_bins", action="store", dest="num_bins",
default=cfg.default_num_bins, required=False,
help=help_text_num_bins)
method_params.add_argument("-e", "--edge_range", action="store",
dest="edge_range",
default=cfg.default_edge_range,
required=False, #TODO perhaps make this required?
# to ensure users compute it from the entire dataset!
nargs=2, metavar=('min', 'max'),
help=help_text_edge_range)
multiedge_args = parser.add_argument_group(title='Multi-edge',
description='Parameters related to '
'computation of '
'multiple edges')
multiedge_args.add_argument("-t", "--summary_stat", action="store",
dest="summary_stat",
nargs='*',
default=cfg.multi_edge_summary_func_default,
required=False,
help=help_text_summary_stat)
multiedge_args.add_argument("-l", "--multi_edge_range", action="store",
dest="multi_edge_range",
default=None, required=False, metavar=('min max'),
nargs='*', help=help_text_multi_edge_range)
atlas_params = parser.add_argument_group(title='Atlas',
description="Parameters describing "
"the atlas, "
"its parcellation and any "
"smoothing of features.")
atlas_params.add_argument("-a", "--atlas", action="store", dest="atlas",
default=cfg.default_atlas, required=False,
help=help_text_atlas)
atlas_params.add_argument("-n", "--node_size", action="store", dest="node_size",
default=cfg.default_node_size, required=False,
help=help_text_parc_size)
atlas_params.add_argument("-p", "--smoothing_param", action="store",
dest="smoothing_param",
default=cfg.default_smoothing_param, required=False,
help=help_text_smoothing)
computing_params = parser.add_argument_group(title='Computing',
description='Options related to '
'computing and '
'parallelization.')
computing_params.add_argument('-c', '--num_procs', action='store',
dest='num_procs',
default=cfg.default_num_procs, required=False,
help=help_text_num_procs)
computing_params.add_argument('-d', '--overwrite_results', action='store_true',
dest='overwrite_results',
required=False, help=help_text_overwrite_results)
computing_params.add_argument('-v', '--version', action='version',
version='%(prog)s {version}'.format(
version=__version__))
return parser
def parse_args():
"""Parser/validator for the cmd line args."""
parser = get_parser()
if len(sys.argv) < 2:
parser.print_help()
print('\nToo few arguments!')
parser.exit(1)
# parsing
try:
params = parser.parse_args()
except Exception as exc:
print(exc)
raise ValueError('Unable to parse command-line arguments.')
feature_list = utils.check_features(params.features)
input_dir = Path(params.input_dir).resolve()
if not input_dir.exists():
raise IOError("Given input directory doesn't exist!")
out_dir = params.out_dir
if out_dir is not None:
out_dir = Path(out_dir).resolve()
else:
out_dir = input_dir / "graynet"
if not out_dir.exists():
out_dir.mkdir(exist_ok=True, parents=True)
# allowing auto population of subject IDs for freesurfer directory
sub_id_list_path = params.subject_ids_path
if sub_id_list_path is None:
# this is allowed only when all features are freesurfer-related only
for feat in feature_list:
if feat not in cfg.features_freesurfer:
raise ValueError("Path to subject ID list must be specified "
"when non-Freesurfer features are being processed!")
# get all IDs in Freesurfer $SUBJECTS_DIR that are folders with surf subdir
id_list = [sub_id for sub_id in input_dir.iterdir()
if (sub_id.is_dir() and sub_id.joinpath('surf').is_dir() )]
if len(id_list) < 1:
raise ValueError('Given Freesurfer folder does not any subjects:\n{}'
''.format(input_dir))
# write to a file in out folder
try:
sub_id_list_path = input_dir / 'id_list_freesurfer_graynet.txt'
with open(sub_id_list_path, 'w') as idlf:
idlf.writelines('\n'.join(id_list))
except:
raise IOError('Unable to write auto generated id list (n={}) to disk'
' to\n {}'.format(len(id_list), sub_id_list_path))
else:
sub_id_list_path = Path(params.subject_ids_path).resolve()
if not sub_id_list_path.exists():
raise IOError("Given subject IDs file doesn't exist.")
do_multi_edge = bool(params.do_multi_edge)
summary_stat = params.summary_stat
multi_edge_range = np.array(params.multi_edge_range, dtype=float)
multi_edge_range_out = None
if do_multi_edge:
# ensure atleast two features
num_features = len(feature_list)
if num_features < 2:
raise ValueError( 'To enable multi-edge computation, specify atleast '
'two valid features.')
if multi_edge_range is not None:
nvals_per_feat = 2
if len(multi_edge_range) != nvals_per_feat * num_features:
raise ValueError(
'Insufficient specification of edge ranges for multiple features!'
'\nNeeded : {} exactly, given : {}'
''.format(nvals_per_feat *num_features, len(multi_edge_range)))
indiv_ranges = np.split(multi_edge_range,
range(nvals_per_feat, len(multi_edge_range),
nvals_per_feat))
multi_edge_range_out = dict()
for ix, feat in enumerate(feature_list):
multi_edge_range_out[feat] = indiv_ranges[ix]
utils.check_stat_methods(summary_stat)
else:
summary_stat = None
if len(feature_list) > 1:
raise ValueError('For single edge computation, '
'only one feature can be specified.')
# validating choices and doing only one of the two
weight_methods = params.weight_methods
roi_stats = params.roi_stats
if weight_methods is not None:
weight_method_list, _, _, _ = check_weights(weight_methods)
if roi_stats is not None:
print('ROI stats requested with network weights computation - not allowed.')
sys.exit(1)
roi_stats = None
elif roi_stats is not None:
roi_stats, _, _, _, _ = check_stat_methods(roi_stats)
weight_method_list = None
else:
raise ValueError('One of weight_method and roi_stats must be chosen.')
if params.node_size is not None:
node_size = int(params.node_size)
else:
node_size = None
print('\nData resampled to {} atlas, '
' smoothed at {} with node size {}'
''.format(params.atlas, params.smoothing_param, params.node_size))
atlas_spec, _ = check_atlas(params.atlas)
# num_procs will be validated inside in the functions using it.
# TODO should we check atlas compatibility with data for two subjects randomly?
# load data for subjects, check atlas parcellation is compatible in size with data
return sub_id_list_path, input_dir, \
feature_list, weight_method_list, \
do_multi_edge, summary_stat, multi_edge_range_out, \
params.num_bins, params.edge_range, \
atlas_spec, out_dir, node_size, params.smoothing_param, roi_stats, \
params.num_procs, params.overwrite_results
if __name__ == '__main__':
cli_run()
| 45.506369 | 205 | 0.582453 |
6ea0e10f0f6e932c6f338b908c6014edf0384f92 | 1,262 | py | Python | domains/nav/problems/training/problem2102_SD.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | 1 | 2021-09-28T12:56:56.000Z | 2021-09-28T12:56:56.000Z | domains/nav/problems/training/problem2102_SD.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | null | null | null | domains/nav/problems/training/problem2102_SD.py | patras91/rae_release | 0e5faffb7eb732fdb8e3bbf2c6d2f2cbd520aa30 | [
"BSD-3-Clause"
] | 1 | 2022-03-31T16:30:39.000Z | 2022-03-31T16:30:39.000Z | __author__ = 'patras'
from domain_springDoor import *
from timer import DURATION
from state import state, rv
DURATION.TIME = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
DURATION.COUNTER = {
'unlatch1': 5,
'unlatch2': 5,
'holdDoor': 2,
'passDoor': 3,
'releaseDoor': 2,
'closeDoors': 3,
'move': 7,
'take': 2,
'put': 2,
}
rv.LOCATIONS = [1, 2, 3, 4, 5, 6, 7]
rv.EDGES = {1: [7], 2: [6, 7], 3: [7], 4: [5], 5: [4, 6, 7], 6: [2, 5], 7: [1, 2, 3, 5]}
rv.DOORS = ['d1', 'd2', 'd3']
rv.DOORLOCATIONS = {(1, 7): 'd1', (2, 7): 'd2', (5, 6): 'd3'}
rv.DOORTYPES = {'d1': 'spring', 'd2': 'spring', 'd3': 'spring'}
rv.ROBOTS = ['r1', 'r2', 'r3', 'r4']
def ResetState():
state.load = {'r1': NIL, 'r2': NIL, 'r3': NIL, 'r4': NIL}
state.status = {'r1': 'free', 'r2': 'free', 'r3': 'free', 'r4': 'free'}
state.loc = {'r1': 1, 'r2': 7, 'r3': 5, 'r4': 4}
state.pos = {'o1': 3}
state.doorStatus = {'d1': 'closed', 'd2': 'closed', 'd3': 'closed', }
state.doorType = {'d1': UNK, 'd2': UNK, 'd3': UNK, }
tasks = {
9: [['fetch', 'r1', 'o1', 6]],
12: [['collision', 'r1']],
}
eventsEnv = {
} | 24.745098 | 88 | 0.484152 |
a202f803412f25b7b7f88930879a5fa3b2a0c000 | 1,489 | py | Python | tripcore/inventory/forms.py | robotlightsyou/trip | 5a58babe399febb476cfb42a530ead20937597fd | [
"MIT"
] | null | null | null | tripcore/inventory/forms.py | robotlightsyou/trip | 5a58babe399febb476cfb42a530ead20937597fd | [
"MIT"
] | null | null | null | tripcore/inventory/forms.py | robotlightsyou/trip | 5a58babe399febb476cfb42a530ead20937597fd | [
"MIT"
] | null | null | null | from django import forms
from .models import Fixture, Fixture_Type, Source, Source_Type, Manufacturer
class FixtureForm(forms.ModelForm):
class Meta:
model = Fixture
fields = ['owner', 'date_added', 'last_rented', 'last_sickbay',
'last_service', 'manufacturer', 'fixture_type',
'source', 'source_type']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['source_type'].queryset = Source_Type.objects.none()
self.fields['fixture_type'].queryset = Fixture_Type.objects.none()
if 'manufacturer' in self.data:
try:
manufacturer_id = int(self.data.get('manufacturer'))
self.fields['fixture_type'].queryset = Fixture_Type.objects.filter(manufacturer_id=manufacturer_id).order_by('name')
except (TypeError, ValueError):
pass
elif self.instance.pk:
self.fields['fixture_type'].queryset = self.instance.manufacturer.fixture_type_set.order_by('name')
if 'source' in self.data:
try:
source_id = int(self.data.get('source'))
self.fields['source_type'].queryset = Source_Type.objects.filter(source_id=source_id).order_by('name')
except (TypeError, ValueError):
pass
elif self.instance.pk:
self.fields['source_type'].queryset = self.instance.source.source_type_set.order_by('name')
| 43.794118 | 132 | 0.626595 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.