hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73588a79f039b8db733da153b11843e379f5638 | 38,694 | py | Python | test/functional/test_framework/messages.py | POPChainFoundation/Popchain | 4106c0eab2fe437ae8d12e9f8697e1b6a5bebeaa | [
"MIT"
] | 1 | 2019-05-13T01:58:35.000Z | 2019-05-13T01:58:35.000Z | test/functional/test_framework/messages.py | POPCHAINFoundation/POPCHAIN | 4106c0eab2fe437ae8d12e9f8697e1b6a5bebeaa | [
"MIT"
] | null | null | null | test/functional/test_framework/messages.py | POPCHAINFoundation/POPCHAIN | 4106c0eab2fe437ae8d12e9f8697e1b6a5bebeaa | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message strcutures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70914 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nAccumulatorCheckpoint = header.nAccumulatorCheckpoint
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nAccumulatorCheckpoint = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nAccumulatorCheckpoint = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# PCH Uniqueness
def get_uniqueness(self, prevout):
r = b""
r += struct.pack("<I", prevout.n)
r += ser_uint256(prevout.hash)
return r
def solve_stake(self, prevouts):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for prevout in prevouts:
nvalue, txBlockTime, stakeModifier, hashStake = prevouts[prevout]
target = int(target0 * nvalue / 100) % 2**256
data = b""
data += ser_uint64(stakeModifier)
data += struct.pack("<I", txBlockTime)
# prevout for zPoS is serial hashes hex strings
if isinstance(prevout, COutPoint):
data += self.get_uniqueness(prevout)
else:
data += ser_uint256(uint256_from_str(bytes.fromhex(hashStake)[::-1]))
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = prevout
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
data += ser_uint256(self.nAccumulatorCheckpoint)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
| 29.447489 | 262 | 0.596527 |
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70914
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000
NODE_NETWORK = (1 << 0)
NODE_BLOOM = (1 << 2)
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nAccumulatorCheckpoint = header.nAccumulatorCheckpoint
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nAccumulatorCheckpoint = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nAccumulatorCheckpoint = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# PCH Uniqueness
def get_uniqueness(self, prevout):
r = b""
r += struct.pack("<I", prevout.n)
r += ser_uint256(prevout.hash)
return r
def solve_stake(self, prevouts):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for prevout in prevouts:
nvalue, txBlockTime, stakeModifier, hashStake = prevouts[prevout]
target = int(target0 * nvalue / 100) % 2**256
data = b""
data += ser_uint64(stakeModifier)
data += struct.pack("<I", txBlockTime)
# prevout for zPoS is serial hashes hex strings
if isinstance(prevout, COutPoint):
data += self.get_uniqueness(prevout)
else:
data += ser_uint256(uint256_from_str(bytes.fromhex(hashStake)[::-1]))
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = prevout
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
data += ser_uint256(self.nAccumulatorCheckpoint)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
| true | true |
f7358b894c9e2d136a400ac7dfc5cf2f4426b4be | 32,488 | py | Python | benchmarks/analysisScripts/VivadoGraphUtil.py | magic3007/AMF-Placer | b6fbc10c37c3259c2b4f99ce0bb03c9d96bc29bd | [
"Apache-2.0"
] | 37 | 2021-09-25T04:31:27.000Z | 2022-03-24T13:46:52.000Z | benchmarks/analysisScripts/VivadoGraphUtil.py | magic3007/AMF-Placer | b6fbc10c37c3259c2b4f99ce0bb03c9d96bc29bd | [
"Apache-2.0"
] | 5 | 2021-11-01T13:27:46.000Z | 2022-03-10T07:52:51.000Z | benchmarks/analysisScripts/VivadoGraphUtil.py | magic3007/AMF-Placer | b6fbc10c37c3259c2b4f99ce0bb03c9d96bc29bd | [
"Apache-2.0"
] | 7 | 2021-09-26T07:34:09.000Z | 2022-02-15T08:06:20.000Z | import networkx as nx
class VivadoCell(object):
def __init__(self, id, name, refType, pins, netStr, drivepinStr):
self.id = id
self.name = name
self.refType = refType
self.pins = pins
self.netStr = netStr
self.drivepinStr = drivepinStr
self.drivepins_fromOthers = set()
def bindPinDriveObjs(self, pinName2Obj):
self.drivepins = []
assert(len(self.drivepinStr) == len(self.pins))
for i in range(len(self.drivepinStr)):
tmpName = self.drivepinStr[i]
pin = self.pins[i]
if ((tmpName in pinName2Obj.keys()) and (not pin is None)):
self.drivepins.append(pinName2Obj[tmpName])
else:
if (tmpName.find("VCC") < 0 and tmpName.find("GND") < 0 and len(tmpName) > 0):
if (tmpName.find("en_sig_1/G") < 0):
print("pin might have problem: ", tmpName)
self.drivepins.append(None)
continue
if (pin.dir):
self.drivepins_fromOthers.add(self.drivepins[-1])
class VivadoPin(object):
def __init__(self, name, refName, nameOnCell, dir, cell):
self.name = name
self.dir = dir
self.cell = cell
self.refName = refName
self.nameOnCell = nameOnCell
class VivadoNet(object):
def __init__(self, name, inputpins, outputpins):
self.name = name
self.inputpins = inputpins
self.outputpins = outputpins
class VivadoCoreCluster(object):
def __init__(self, coreType, coreNodes, patternStr, nodeInCluster):
self.coreType = coreType
self.coreNodes = coreNodes
self.patternStr = patternStr
self.nodeInCluster = nodeInCluster
class VivadoPatternCluster(object):
def __init__(self, initPatternStr, unextendedNodes, extendedNodes, clusterId):
self.patternExtensionTrace = initPatternStr.replace(
"\'", "").replace("\\", "")
self.unextendedNodes = unextendedNodes
self.extendedNodes = extendedNodes
self.disabled = False
self.clusterId = clusterId
class VivadoPatternClusterSeq(object):
def __init__(self, initPatternStr, patternClusters):
self.patternExtensionTrace = initPatternStr.replace(
"\'", "").replace("\\", "")
self.patternClusters = patternClusters
def loadCellInfoFromFile(textFile):
firstCell = True
VivadoCells = []
cellName2Obj = dict()
pinName2Obj = dict()
idCnt = 0
for line in textFile.decode('utf-8').split("\n"):
# print(line)
if (line.find("curCell=> ") >= 0):
if (not firstCell):
if (not (curCell is None)):
if (not curCell.name in cellName2Obj.keys()):
VivadoCells.append(curCell)
cellName2Obj[curCell.name] = curCell
else:
print("WARNING duplicate cell:", curCell.name)
firstCell = False
name_type = line.replace("curCell=> ", "").split(" type=> ")
name = name_type[0]
refType = name_type[1]
if (name in cellName2Obj.keys()):
curCell = None
else:
curCell = VivadoCell(idCnt, name, refType, [], [], [])
idCnt += 1
continue
if (line.find(" pin=> ") >= 0):
if (curCell is None):
continue
pin_refpin_dir_net_drivepin = line.replace(" pin=> ", "").replace(" refpin=> ", ";").replace(
" dir=> ", ";").replace(" net=> ", ";").replace(" drivepin=> ", ";").split(";")
if (len(pin_refpin_dir_net_drivepin) > 5):
assert(False) # we don't expect multi-driver
if (pin_refpin_dir_net_drivepin[4].replace("\n", "") != ""):
curCell.netStr.append(
pin_refpin_dir_net_drivepin[3].replace("\n", ""))
curCell.drivepinStr.append(
pin_refpin_dir_net_drivepin[4].replace("\n", ""))
if (pin_refpin_dir_net_drivepin[2] == "OUT"):
if (pin_refpin_dir_net_drivepin[0] != pin_refpin_dir_net_drivepin[4].replace("\n", "")):
curPin = VivadoPin(pin_refpin_dir_net_drivepin[4].replace(
"\n", ""), pin_refpin_dir_net_drivepin[1], pin_refpin_dir_net_drivepin[0], pin_refpin_dir_net_drivepin[2] == "IN", curCell)
else:
curPin = VivadoPin(pin_refpin_dir_net_drivepin[0], pin_refpin_dir_net_drivepin[1],
pin_refpin_dir_net_drivepin[0], pin_refpin_dir_net_drivepin[2] == "IN", curCell)
else:
curPin = VivadoPin(pin_refpin_dir_net_drivepin[0], pin_refpin_dir_net_drivepin[1],
pin_refpin_dir_net_drivepin[0], pin_refpin_dir_net_drivepin[2] == "IN", curCell)
if (curPin.name == ""):
continue
if (not curPin.name in pinName2Obj.keys()):
pinName2Obj[curPin.name] = curPin
curCell.pins.append(curPin)
VivadoCells.append(curCell)
cellName2Obj[curCell.name] = curCell
for curCell in VivadoCells:
curCell.bindPinDriveObjs(pinName2Obj)
return VivadoCells
def VivadoGraphExctractionAndInitialPatternDetect(VivadoCells):
inputPatterns = dict()
inputPatterns_Cells = dict()
cellTypes = dict()
outputPatternCnt = dict()
netlist = []
netset = set()
netstr2Tuple = dict()
netstr2netnum = dict()
cellname2id = dict()
nodetype = dict()
nodename = dict()
node2id = dict()
for tmpCell in VivadoCells:
outputPatternCnt[tmpCell.id] = dict()
VivadoGraph = nx.DiGraph()
edgeAttributes = []
for tmpCell in VivadoCells:
nodetype[tmpCell.id] = tmpCell.refType
nodename[tmpCell.id] = tmpCell.name
node2id[tmpCell.id] = tmpCell.id
cellname2id[tmpCell.name] = tmpCell.id
VivadoGraph.add_node(tmpCell.id)
if (not(tmpCell.refType in cellTypes.keys())):
cellTypes[tmpCell.refType] = 0
cellTypes[tmpCell.refType] += 1
driverTypeCnt = dict()
driverCheck = set()
for pin, net, drivepin in zip(tmpCell.pins, tmpCell.netStr, tmpCell.drivepins):
if (not (drivepin is None)):
if (pin.dir and net != ""):
# if (tmpCell.name == "design_1_i/face_detect_0/inst/grp_processImage_fu_371/SI_V_10_49_1_reg_26649_reg"):
# print("pin=>", pin.name, "drivepin.cell=>", drivepin.cell.name, "drivepin=>", drivepin.name)
tmp_netTuple = (drivepin.cell.id, tmpCell.id)
edgeAttributes.append(
(drivepin.cell.id, tmpCell.id, drivepin.cell.refType+"-"+drivepin.refName))
tmp_netTuple_Str = tmp_netTuple
if (not(tmp_netTuple_Str in netset)):
netset.add(tmp_netTuple_Str)
netstr2Tuple[tmp_netTuple_Str] = tmp_netTuple
netstr2netnum[tmp_netTuple_Str] = 0
netstr2netnum[tmp_netTuple_Str] += 1
for key in netstr2netnum.keys():
netlist.append(
(netstr2Tuple[key][0], netstr2Tuple[key][1], netstr2netnum[key]))
VivadoGraph.add_weighted_edges_from(netlist)
for a, b, driverPinType in edgeAttributes:
VivadoGraph[a][b]['driverPinType'] = driverPinType
nx.set_node_attributes(VivadoGraph, name="name", values=nodename)
nx.set_node_attributes(VivadoGraph, name="type", values=nodetype)
nx.set_node_attributes(VivadoGraph, name="id", values=node2id)
print("#nodes:", VivadoGraph.number_of_nodes())
print("#edges:", VivadoGraph.number_of_edges())
return VivadoGraph
def getInitalSingleCorePatterns(VivadoGraph, careTypeList, coreType="CARRY", checkDirection="both", allowOverlap=True, onlyRecordInput=False):
careTypeList = set(careTypeList)
singleCorePattern = None
if (checkDirection == "both"):
inCheck = True
outCheck = True
elif (checkDirection == "in"):
inCheck = True
outCheck = False
elif (checkDirection == "out"):
inCheck = False
outCheck = True
else:
assert(False)
singleCorePattern = dict()
traversedNodes = set()
for nodeId in VivadoGraph.nodes():
curNode = VivadoGraph.nodes()[nodeId]
nodeType = curNode['type']
if (nodeType.find(coreType) >= 0):
if ((nodeId in traversedNodes) and (not allowOverlap)):
continue
nodeInPattern = set()
nodeInPattern.add(nodeId)
traversedNodes.add(nodeId)
predTypeCnt = dict()
if (inCheck):
for pred in VivadoGraph.predecessors(nodeId):
if ((pred in traversedNodes) and (not allowOverlap)):
continue
nodeInPattern.add(pred)
traversedNodes.add(pred)
predNode = VivadoGraph.nodes()[pred]
if (not predNode['type'] in careTypeList):
continue
if (not (predNode['type'] in predTypeCnt.keys())):
predTypeCnt[predNode['type']] = 0
predTypeCnt[predNode['type']
] += VivadoGraph[pred][nodeId]['weight']
predTypeList = []
for key in predTypeCnt.keys():
predTypeList.append(key+"-"+str(predTypeCnt[key]))
predTypeList.sort()
predPatternStr = '-'.join(predTypeList) + ">="
succTypeCnt = dict()
if (outCheck):
for succ in VivadoGraph.successors(nodeId):
if ((succ in traversedNodes) and (not allowOverlap)):
continue
if (not onlyRecordInput):
nodeInPattern.add(succ)
traversedNodes.add(succ)
succNode = VivadoGraph.nodes()[succ]
if (not succNode['type'] in careTypeList):
continue
if (not (succNode['type'] in succTypeCnt.keys())):
succTypeCnt[succNode['type']] = 0
succTypeCnt[succNode['type']
] += VivadoGraph[nodeId][succ]['weight']
succTypeList = []
for key in succTypeCnt.keys():
succTypeList.append(key+"-"+str(succTypeCnt[key]))
succTypeList.sort()
succPatternStr = ">=" + '-'.join(succTypeList)
overallPatternStr = predPatternStr + nodeType + succPatternStr
if (coreType == "MUXF7" and overallPatternStr.find("MUXF8") >= 0):
continue
if (not (overallPatternStr in singleCorePattern.keys())):
singleCorePattern[overallPatternStr] = []
singleCorePattern[overallPatternStr].append(
(curNode, nodeInPattern))
numSingleCorePattern = dict()
for key in singleCorePattern.keys():
numSingleCorePattern[key] = len(singleCorePattern[key])
sortedSingleCorePattern = []
for w in sorted(numSingleCorePattern, key=numSingleCorePattern.get, reverse=True):
# if (len(singleCorePattern[w]) >= 5):
# print(w, len(singleCorePattern[w]))
sortedSingleCorePattern.append((w, singleCorePattern[w]))
return singleCorePattern, sortedSingleCorePattern
def chainBFS(VivadoGraph, nodeId):
coreType = VivadoGraph.nodes()[nodeId]['type']
curId = nodeId
reachSet = set()
curSet = set({curId})
while (len(curSet) > 0):
reachSet = reachSet | curSet
nextSet = set()
for curId in curSet:
for succ in VivadoGraph.successors(curId):
if (coreType == VivadoGraph.nodes()[succ]['type']):
if (not (succ in reachSet)):
nextSet.add(succ)
for pred in VivadoGraph.predecessors(curId):
if (coreType == VivadoGraph.nodes()[pred]['type']):
if (not (pred in reachSet)):
nextSet.add(pred)
curSet = nextSet
return reachSet
def clusterNodeChain(VivadoGraph, coreType="CARRY8"):
chainedSet = set()
chains = []
for nodeId in VivadoGraph.nodes():
curNode = VivadoGraph.nodes()[nodeId]
nodeType = curNode['type']
if (nodeType.find(coreType) >= 0):
if (nodeId in chainedSet):
continue
coreNodes = chainBFS(VivadoGraph, nodeId)
chainedSet = chainedSet | coreNodes
chains.append(VivadoCoreCluster(nodeType, coreNodes, None, None))
return chains
def clusterNodeWithCommonFanin(VivadoGraph, VivadoCells, targetType="RAM32M16"):
clusteredSet = set()
allNodesInType = []
for nodeId in VivadoGraph.nodes():
curNode = VivadoGraph.nodes()[nodeId]
nodeType = curNode['type']
if (nodeType.find(targetType) >= 0):
allNodesInType.append(nodeId)
graphForCluster = nx.Graph()
for nodeId in allNodesInType:
graphForCluster.add_node(nodeId)
edges = []
for listId, nodeId in enumerate(allNodesInType):
for other_nodeId in allNodesInType[listId+1:]:
driverPins = (VivadoCells[nodeId].drivepins_fromOthers) & (
VivadoCells[other_nodeId].drivepins_fromOthers)
edges.append((nodeId, other_nodeId, len(driverPins)))
edges.sort(key=lambda tup: tup[2], reverse=True)
node2NumCommonFanin = dict()
for edge in edges:
nodeA = edge[0]
nodeB = edge[1]
numCommonFanin = edge[2]
if (not nodeA in node2NumCommonFanin.keys()):
node2NumCommonFanin[nodeA] = numCommonFanin
if (not nodeB in node2NumCommonFanin.keys()):
node2NumCommonFanin[nodeB] = numCommonFanin
if (node2NumCommonFanin[nodeA] == node2NumCommonFanin[nodeB]):
graphForCluster.add_edge(nodeA, nodeB)
clusterSet_list = []
for edge in edges:
nodeA = edge[0]
nodeB = edge[1]
if (not nodeA in clusteredSet):
overlapDriverPin = None
newCluster = set()
clusteredSet.add(nodeA)
newCluster.add(nodeA)
for nodeId in graphForCluster.neighbors(nodeA):
if (not nodeId in clusteredSet):
driverPins = (VivadoCells[nodeId].drivepins_fromOthers) & (
VivadoCells[nodeA].drivepins_fromOthers)
if (overlapDriverPin is None):
overlapDriverPin = driverPins
if (overlapDriverPin == driverPins):
clusteredSet.add(nodeId)
newCluster.add(nodeId)
clusterSet_list.append(newCluster)
continue
if (not nodeB in clusteredSet):
overlapDriverPin = None
newCluster = set()
clusteredSet.add(nodeB)
newCluster.add(nodeB)
for nodeId in graphForCluster.neighbors(nodeB):
if (not nodeId in clusteredSet):
driverPins = (VivadoCells[nodeId].drivepins_fromOthers) & (
VivadoCells[nodeB].drivepins_fromOthers)
if (overlapDriverPin is None):
overlapDriverPin = driverPins
if (overlapDriverPin == driverPins):
clusteredSet.add(nodeId)
newCluster.add(nodeId)
clusterSet_list.append(newCluster)
continue
clusters = []
# print("comps:")
for comp in clusterSet_list: # nx.connected_components(graphForCluster#
clusters.append(VivadoCoreCluster(targetType, comp, None, None))
# for id in comp:
# print(node2NumCommonFanin[id], VivadoGraph.nodes()[id]['name'])
# print("================================")
# exit()
return clusters
def clusterExtendPatterns(VivadoGraph, chains, largeCluserIntoPattern=False, allowOverlap=False, largeCluserThredhold=2):
patternStr2Chains = dict()
traversedNodes = set()
largeCnt = 0
for chain in chains:
corePatternStr = chain.coreType + "-" + str(len(chain.coreNodes))
if (largeCluserIntoPattern and len(chain.coreNodes) > largeCluserThredhold):
corePatternStr += "largeParallel("+str(largeCnt)+")"
largeCnt += 1
for nodeId in chain.coreNodes:
if ((nodeId in traversedNodes) and (not allowOverlap)):
continue
traversedNodes.add(nodeId)
nodeInPattern = set([nodeId])
newCluster = VivadoCoreCluster(
chain.coreType, set(), None, None)
# predTypeCnt = dict()
# for pred in VivadoGraph.predecessors(nodeId):
# if ((pred in traversedNodes) and (not allowOverlap)):
# continue
# traversedNodes.add(pred)
# nodeInPattern.add(pred)
# predNode = VivadoGraph.nodes()[pred]
# if (not (predNode['type'] in predTypeCnt.keys())):
# predTypeCnt[predNode['type']] = 0
# predTypeCnt[predNode['type']] += VivadoGraph[pred][nodeId]['weight']
# predTypeList = []
# for key in predTypeCnt.keys():
# predTypeList.append(key+"-"+str(predTypeCnt[key]))
# predTypeList.sort()
# predPatternStr = '-'.join(predTypeList) + ">="
# succTypeCnt = dict()
# for succ in VivadoGraph.successors(nodeId):
# if ((succ in traversedNodes) and (not allowOverlap)):
# continue
# traversedNodes.add(succ)
# nodeInPattern.add(succ)
# succNode = VivadoGraph.nodes()[succ]
# if (not (succNode['type'] in succTypeCnt.keys())):
# succTypeCnt[succNode['type']] = 0
# succTypeCnt[succNode['type']] += VivadoGraph[nodeId][succ]['weight']
# succTypeList = []
# for key in succTypeCnt.keys():
# succTypeList.append(key+"-"+str(succTypeCnt[key]))
# succTypeList.sort()
# succPatternStr = ">=" + '-'.join(succTypeList)
# overallPatternStr = predPatternStr + corePatternStr + succPatternStr
overallPatternStr = ">=" + corePatternStr + ">="
newCluster.patternStr = overallPatternStr
newCluster.nodeInCluster = nodeInPattern
if (not (overallPatternStr in patternStr2Chains.keys())):
patternStr2Chains[overallPatternStr] = []
patternStr2Chains[overallPatternStr].append(newCluster)
else:
nodeInPattern = chain.coreNodes.copy() - traversedNodes
predTypeCnt = dict()
for nodeId in chain.coreNodes - traversedNodes:
for pred in VivadoGraph.predecessors(nodeId):
if ((pred in traversedNodes) and (not allowOverlap)):
continue
traversedNodes.add(pred)
nodeInPattern.add(pred)
predNode = VivadoGraph.nodes()[pred]
if (not (predNode['type'] in predTypeCnt.keys())):
predTypeCnt[predNode['type']] = 0
predTypeCnt[predNode['type']
] += VivadoGraph[pred][nodeId]['weight']
predTypeList = []
for key in predTypeCnt.keys():
predTypeList.append(key+"-"+str(predTypeCnt[key]))
predTypeList.sort()
predPatternStr = '-'.join(predTypeList) + ">="
succTypeCnt = dict()
for nodeId in chain.coreNodes - traversedNodes:
for succ in VivadoGraph.successors(nodeId):
if ((succ in traversedNodes) and (not allowOverlap)):
continue
traversedNodes.add(succ)
nodeInPattern.add(succ)
succNode = VivadoGraph.nodes()[succ]
if (not (succNode['type'] in succTypeCnt.keys())):
succTypeCnt[succNode['type']] = 0
succTypeCnt[succNode['type']
] += VivadoGraph[nodeId][succ]['weight']
succTypeList = []
for key in succTypeCnt.keys():
succTypeList.append(key+"-"+str(succTypeCnt[key]))
succTypeList.sort()
succPatternStr = ">=" + '-'.join(succTypeList)
overallPatternStr = predPatternStr + corePatternStr + succPatternStr
chain.patternStr = overallPatternStr
chain.nodeInCluster = nodeInPattern
if (not (overallPatternStr in patternStr2Chains.keys())):
patternStr2Chains[overallPatternStr] = []
patternStr2Chains[overallPatternStr].append(chain)
numPatternStr2Chains = dict()
for key in patternStr2Chains.keys():
numPatternStr2Chains[key] = len(patternStr2Chains[key])
sortedPatternStr2Chains = []
for w in sorted(numPatternStr2Chains, key=numPatternStr2Chains.get, reverse=True):
# if (len(patternStr2Chains[w]) >= 2):
# print(w, len(patternStr2Chains[w]))
sortedPatternStr2Chains.append((w, patternStr2Chains[w]))
return patternStr2Chains, sortedPatternStr2Chains
def printOutSimplePatterns(VivadoGraph, singleCorePattern):
numSingleCorePattern = dict()
for key in singleCorePattern.keys():
numSingleCorePattern[key] = len(singleCorePattern[key])
for w in sorted(numSingleCorePattern, key=numSingleCorePattern.get, reverse=True):
nodeInPattern = set()
overlappedSet = set()
for curnode, neighbornodes in singleCorePattern[w]:
nodeInPattern = nodeInPattern | neighbornodes
for curNodeIdX, (curnodeX, neighbornodesX) in enumerate(singleCorePattern[w]):
for curNodeIdY, (curnodeY, neighbornodesY) in enumerate(singleCorePattern[w]):
if (curNodeIdX <= curNodeIdY):
break
overlappedSet = overlappedSet | (
neighbornodesX & neighbornodesY)
cnt = 0
for comp in nx.algorithms.weakly_connected_components(VivadoGraph.subgraph(nodeInPattern)):
cnt += 1
if (numSingleCorePattern[w] <= 1):
continue
print("pattern: ", w, ":", numSingleCorePattern[w], ":", cnt)
print(" overlap nodes:")
for nodeId in overlappedSet:
print(" nodeName:", VivadoGraph.nodes()[nodeId]['name'], "type", VivadoGraph.nodes()[
nodeId]['type'], "degree", VivadoGraph.degree(nodeId))
print(" anchor nodes:")
for curnode, neighbornodes in singleCorePattern[w]:
print(" nodeName:",
curnode['name'], " id:", curnode['id'], "type: ", curnode['type'])
for nNode in neighbornodes:
print(" NeighborNodeName:", VivadoGraph.nodes()[
nNode]['name'], " id:", VivadoGraph.nodes()[nNode]['id'], "type: ", VivadoGraph.nodes()[nNode]['type'])
for nodeId in neighbornodes:
print(
" highlight_objects -color red [get_cells ", VivadoGraph.nodes()[nodeId]['name'], "]")
def printOutChainPatterns(VivadoGraph, patternStr2Chains):
print("======================================\nprinting out chains' patterns")
numpatternStr2Chains = dict()
for key in patternStr2Chains.keys():
numpatternStr2Chains[key] = len(patternStr2Chains[key])
for w in sorted(numpatternStr2Chains, key=numpatternStr2Chains.get, reverse=True):
nodeInPattern = set()
overlappedSet = set()
for chain in patternStr2Chains[w]:
nodeInPattern = nodeInPattern | chain.nodeInCluster
for curChainIdX, curChainX in enumerate(patternStr2Chains[w]):
for curChainIdY, curChainY in enumerate(patternStr2Chains[w]):
if (curChainIdX <= curChainIdY):
break
overlappedSet = overlappedSet | (
curChainX.nodeInCluster & curChainY.nodeInCluster)
cnt = 0
for comp in nx.algorithms.weakly_connected_components(VivadoGraph.subgraph(nodeInPattern)):
cnt += 1
if (numpatternStr2Chains[w] <= 1):
continue
print("pattern: ", w, ":", numpatternStr2Chains[w], ":", cnt)
for comp in nx.algorithms.weakly_connected_components(VivadoGraph.subgraph(nodeInPattern)):
for nodeId in comp:
print(
" highlight_objects -color red [get_cells ", VivadoGraph.nodes()[nodeId]['name'], "]")
break
print(" overlap nodes:")
for nodeId in overlappedSet:
print(" nodeName:", VivadoGraph.nodes()[nodeId]['name'], "type", VivadoGraph.nodes()[
nodeId]['type'], "degree", VivadoGraph.degree(nodeId))
print(" anchor chains:")
for chain in patternStr2Chains[w]:
print(" coreNodes:", chain.coreNodes)
for nodeId in chain.coreNodes:
print(" ", VivadoGraph.nodes()
[nodeId]['name'], " id:", nodeId)
for nodeId in chain.coreNodes:
print(
" highlight_objects -color red [get_cells ", VivadoGraph.nodes()[nodeId]['name'], "]")
def instantiatePatternClusters(VivadoGraph, sortedSingleCorePattern, lastClusterId):
res = []
clusterColorIdInitial = dict()
for node in VivadoGraph.nodes():
clusterColorIdInitial[node] = -1
nx.set_node_attributes(
G=VivadoGraph, values=clusterColorIdInitial, name="clusterColorId")
for w, seqs in sortedSingleCorePattern:
patternClusters = []
if (len(seqs) == 0):
assert(False)
if (isinstance(seqs[0], VivadoCoreCluster)):
for chain in seqs:
unextendedNodes = set(chain.nodeInCluster)-set(chain.coreNodes)
extendedNodes = set(chain.coreNodes)
if (len(unextendedNodes) == 0):
unextendedNodes = extendedNodes
patternClusters.append(VivadoPatternCluster(initPatternStr=chain.patternStr,
unextendedNodes=unextendedNodes, extendedNodes=extendedNodes, clusterId=lastClusterId))
for nodeInSet in patternClusters[-1].extendedNodes | patternClusters[-1].unextendedNodes:
if (VivadoGraph.nodes()[nodeInSet]['clusterColorId'] < 0):
VivadoGraph.nodes()[
nodeInSet]['clusterColorId'] = lastClusterId
lastClusterId += 1
else:
for curNode, neighborNodes in seqs:
unextendedNodes = set(neighborNodes)-set([curNode['id']])
extendedNodes = set([curNode['id']])
if (len(unextendedNodes) == 0):
unextendedNodes = extendedNodes
patternClusters.append(VivadoPatternCluster(
initPatternStr=w, unextendedNodes=unextendedNodes, extendedNodes=extendedNodes, clusterId=lastClusterId))
for nodeInSet in patternClusters[-1].extendedNodes | patternClusters[-1].unextendedNodes:
if (VivadoGraph.nodes()[nodeInSet]['clusterColorId'] < 0):
VivadoGraph.nodes()[
nodeInSet]['clusterColorId'] = lastClusterId
lastClusterId += 1
res.append(VivadoPatternClusterSeq(
initPatternStr=w, patternClusters=patternClusters))
return res, lastClusterId
def accumulateClusterForce(Patterns):
pass
# for pattern in Patterns:
# for cluster in pattern:
# accumulateInterconnectionForceAmongNodes(cluster)
def printOutPatterns(patterns):
print("printOutPatterns:")
cntSum = 0
for curPatternSeq in patterns:
print(curPatternSeq.patternExtensionTrace.replace("\'", "").replace("\\", ""), len(curPatternSeq.patternClusters), len(
curPatternSeq.patternClusters[0].extendedNodes | curPatternSeq.patternClusters[0].unextendedNodes))
cntSum += len(curPatternSeq.patternClusters)
print("Total Clusters:", cntSum)
return cntSum
def BFSCheckEdgeAttributes(VivadoGraph, VivadoCells, curCell):
edgeAttributeCnt = dict()
cellQ = [curCell]
depQ = [0]
visitedSet = {curCell}
nextSet = set([curCell])
while (len(cellQ) > 0):
curCell = cellQ.pop()
curDepth = depQ.pop()
visitedSet.add(curCell)
if(curDepth >= 5):
continue
# if (len(set(VivadoGraph.successors(curCell)))<4):
# for succ in VivadoGraph.successors(curCell):
# if (not (succ in nextSet)):
# if (VivadoGraph[curCell][succ]['driverPinType'].find("[")>=0):
# if (VivadoGraph[curCell][succ]['driverPinType'] in edgeAttributeCnt.keys()):
# edgeAttributeCnt[VivadoGraph[curCell][succ]['driverPinType']] += 1
# else:
# edgeAttributeCnt[VivadoGraph[curCell][succ]['driverPinType']] = 1
# else:
# nextSet.add(succ)
# depQ.append(curDepth+1)
# cellQ.append(succ)
if (len(set(VivadoGraph.predecessors(curCell))) < 32):
for pred in VivadoGraph.predecessors(curCell):
if (not (pred in nextSet)):
if (VivadoGraph[pred][curCell]['driverPinType'].find("[") >= 0):
if (VivadoGraph[pred][curCell]['driverPinType'] in edgeAttributeCnt.keys()):
edgeAttributeCnt[VivadoGraph[pred]
[curCell]['driverPinType']] += 1
else:
edgeAttributeCnt[VivadoGraph[pred]
[curCell]['driverPinType']] = 1
else:
nextSet.add(pred)
depQ.append(curDepth+1)
cellQ.append(pred)
sortedEdgeAttributeCnt = []
for w in sorted(edgeAttributeCnt, key=edgeAttributeCnt.get, reverse=True):
sortedEdgeAttributeCnt.append((w, edgeAttributeCnt[w]))
if (len(sortedEdgeAttributeCnt) > 4):
sortedEdgeAttributeCnt = sortedEdgeAttributeCnt[:4]
return sortedEdgeAttributeCnt
def loadClusters(name2id, clusterFileName):
clusters = []
id2cluster = dict()
clusterCnt = 0
clusterFile = open(clusterFileName, "r")
for line in clusterFile.readlines():
clusterCellNames = line.split(" ")
ids = set()
for name in clusterCellNames:
if (name != ""):
if (name in name2id.keys()):
ids.add(name2id[name])
id2cluster[name2id[name]] = clusterCnt
clusterCnt += 1
clusters.append(ids)
return clusters, id2cluster
def loadClocks(clockFileName):
clockNames = []
clusterFile = open(clockFileName, "r")
for line in clusterFile.readlines():
clockNames.append(line.replace("\n", "").replace("/O", ""))
return clockNames
def loadFixedBlocks(fixedUnitFileName):
fixedUnitNames = []
fixedUnitFile = open(fixedUnitFileName, "r")
for line in fixedUnitFile.readlines():
fixedUnitNames.append(line.replace("\n", "").split(" ")[1])
return fixedUnitNames
| 39.523114 | 151 | 0.566886 | import networkx as nx
class VivadoCell(object):
def __init__(self, id, name, refType, pins, netStr, drivepinStr):
self.id = id
self.name = name
self.refType = refType
self.pins = pins
self.netStr = netStr
self.drivepinStr = drivepinStr
self.drivepins_fromOthers = set()
def bindPinDriveObjs(self, pinName2Obj):
self.drivepins = []
assert(len(self.drivepinStr) == len(self.pins))
for i in range(len(self.drivepinStr)):
tmpName = self.drivepinStr[i]
pin = self.pins[i]
if ((tmpName in pinName2Obj.keys()) and (not pin is None)):
self.drivepins.append(pinName2Obj[tmpName])
else:
if (tmpName.find("VCC") < 0 and tmpName.find("GND") < 0 and len(tmpName) > 0):
if (tmpName.find("en_sig_1/G") < 0):
print("pin might have problem: ", tmpName)
self.drivepins.append(None)
continue
if (pin.dir):
self.drivepins_fromOthers.add(self.drivepins[-1])
class VivadoPin(object):
def __init__(self, name, refName, nameOnCell, dir, cell):
self.name = name
self.dir = dir
self.cell = cell
self.refName = refName
self.nameOnCell = nameOnCell
class VivadoNet(object):
def __init__(self, name, inputpins, outputpins):
self.name = name
self.inputpins = inputpins
self.outputpins = outputpins
class VivadoCoreCluster(object):
def __init__(self, coreType, coreNodes, patternStr, nodeInCluster):
self.coreType = coreType
self.coreNodes = coreNodes
self.patternStr = patternStr
self.nodeInCluster = nodeInCluster
class VivadoPatternCluster(object):
def __init__(self, initPatternStr, unextendedNodes, extendedNodes, clusterId):
self.patternExtensionTrace = initPatternStr.replace(
"\'", "").replace("\\", "")
self.unextendedNodes = unextendedNodes
self.extendedNodes = extendedNodes
self.disabled = False
self.clusterId = clusterId
class VivadoPatternClusterSeq(object):
def __init__(self, initPatternStr, patternClusters):
self.patternExtensionTrace = initPatternStr.replace(
"\'", "").replace("\\", "")
self.patternClusters = patternClusters
def loadCellInfoFromFile(textFile):
firstCell = True
VivadoCells = []
cellName2Obj = dict()
pinName2Obj = dict()
idCnt = 0
for line in textFile.decode('utf-8').split("\n"):
if (line.find("curCell=> ") >= 0):
if (not firstCell):
if (not (curCell is None)):
if (not curCell.name in cellName2Obj.keys()):
VivadoCells.append(curCell)
cellName2Obj[curCell.name] = curCell
else:
print("WARNING duplicate cell:", curCell.name)
firstCell = False
name_type = line.replace("curCell=> ", "").split(" type=> ")
name = name_type[0]
refType = name_type[1]
if (name in cellName2Obj.keys()):
curCell = None
else:
curCell = VivadoCell(idCnt, name, refType, [], [], [])
idCnt += 1
continue
if (line.find(" pin=> ") >= 0):
if (curCell is None):
continue
pin_refpin_dir_net_drivepin = line.replace(" pin=> ", "").replace(" refpin=> ", ";").replace(
" dir=> ", ";").replace(" net=> ", ";").replace(" drivepin=> ", ";").split(";")
if (len(pin_refpin_dir_net_drivepin) > 5):
assert(False)
if (pin_refpin_dir_net_drivepin[4].replace("\n", "") != ""):
curCell.netStr.append(
pin_refpin_dir_net_drivepin[3].replace("\n", ""))
curCell.drivepinStr.append(
pin_refpin_dir_net_drivepin[4].replace("\n", ""))
if (pin_refpin_dir_net_drivepin[2] == "OUT"):
if (pin_refpin_dir_net_drivepin[0] != pin_refpin_dir_net_drivepin[4].replace("\n", "")):
curPin = VivadoPin(pin_refpin_dir_net_drivepin[4].replace(
"\n", ""), pin_refpin_dir_net_drivepin[1], pin_refpin_dir_net_drivepin[0], pin_refpin_dir_net_drivepin[2] == "IN", curCell)
else:
curPin = VivadoPin(pin_refpin_dir_net_drivepin[0], pin_refpin_dir_net_drivepin[1],
pin_refpin_dir_net_drivepin[0], pin_refpin_dir_net_drivepin[2] == "IN", curCell)
else:
curPin = VivadoPin(pin_refpin_dir_net_drivepin[0], pin_refpin_dir_net_drivepin[1],
pin_refpin_dir_net_drivepin[0], pin_refpin_dir_net_drivepin[2] == "IN", curCell)
if (curPin.name == ""):
continue
if (not curPin.name in pinName2Obj.keys()):
pinName2Obj[curPin.name] = curPin
curCell.pins.append(curPin)
VivadoCells.append(curCell)
cellName2Obj[curCell.name] = curCell
for curCell in VivadoCells:
curCell.bindPinDriveObjs(pinName2Obj)
return VivadoCells
def VivadoGraphExctractionAndInitialPatternDetect(VivadoCells):
inputPatterns = dict()
inputPatterns_Cells = dict()
cellTypes = dict()
outputPatternCnt = dict()
netlist = []
netset = set()
netstr2Tuple = dict()
netstr2netnum = dict()
cellname2id = dict()
nodetype = dict()
nodename = dict()
node2id = dict()
for tmpCell in VivadoCells:
outputPatternCnt[tmpCell.id] = dict()
VivadoGraph = nx.DiGraph()
edgeAttributes = []
for tmpCell in VivadoCells:
nodetype[tmpCell.id] = tmpCell.refType
nodename[tmpCell.id] = tmpCell.name
node2id[tmpCell.id] = tmpCell.id
cellname2id[tmpCell.name] = tmpCell.id
VivadoGraph.add_node(tmpCell.id)
if (not(tmpCell.refType in cellTypes.keys())):
cellTypes[tmpCell.refType] = 0
cellTypes[tmpCell.refType] += 1
driverTypeCnt = dict()
driverCheck = set()
for pin, net, drivepin in zip(tmpCell.pins, tmpCell.netStr, tmpCell.drivepins):
if (not (drivepin is None)):
if (pin.dir and net != ""):
# if (tmpCell.name == "design_1_i/face_detect_0/inst/grp_processImage_fu_371/SI_V_10_49_1_reg_26649_reg"):
# print("pin=>", pin.name, "drivepin.cell=>", drivepin.cell.name, "drivepin=>", drivepin.name)
tmp_netTuple = (drivepin.cell.id, tmpCell.id)
edgeAttributes.append(
(drivepin.cell.id, tmpCell.id, drivepin.cell.refType+"-"+drivepin.refName))
tmp_netTuple_Str = tmp_netTuple
if (not(tmp_netTuple_Str in netset)):
netset.add(tmp_netTuple_Str)
netstr2Tuple[tmp_netTuple_Str] = tmp_netTuple
netstr2netnum[tmp_netTuple_Str] = 0
netstr2netnum[tmp_netTuple_Str] += 1
for key in netstr2netnum.keys():
netlist.append(
(netstr2Tuple[key][0], netstr2Tuple[key][1], netstr2netnum[key]))
VivadoGraph.add_weighted_edges_from(netlist)
for a, b, driverPinType in edgeAttributes:
VivadoGraph[a][b]['driverPinType'] = driverPinType
nx.set_node_attributes(VivadoGraph, name="name", values=nodename)
nx.set_node_attributes(VivadoGraph, name="type", values=nodetype)
nx.set_node_attributes(VivadoGraph, name="id", values=node2id)
print("#nodes:", VivadoGraph.number_of_nodes())
print("#edges:", VivadoGraph.number_of_edges())
return VivadoGraph
def getInitalSingleCorePatterns(VivadoGraph, careTypeList, coreType="CARRY", checkDirection="both", allowOverlap=True, onlyRecordInput=False):
careTypeList = set(careTypeList)
singleCorePattern = None
if (checkDirection == "both"):
inCheck = True
outCheck = True
elif (checkDirection == "in"):
inCheck = True
outCheck = False
elif (checkDirection == "out"):
inCheck = False
outCheck = True
else:
assert(False)
singleCorePattern = dict()
traversedNodes = set()
for nodeId in VivadoGraph.nodes():
curNode = VivadoGraph.nodes()[nodeId]
nodeType = curNode['type']
if (nodeType.find(coreType) >= 0):
if ((nodeId in traversedNodes) and (not allowOverlap)):
continue
nodeInPattern = set()
nodeInPattern.add(nodeId)
traversedNodes.add(nodeId)
predTypeCnt = dict()
if (inCheck):
for pred in VivadoGraph.predecessors(nodeId):
if ((pred in traversedNodes) and (not allowOverlap)):
continue
nodeInPattern.add(pred)
traversedNodes.add(pred)
predNode = VivadoGraph.nodes()[pred]
if (not predNode['type'] in careTypeList):
continue
if (not (predNode['type'] in predTypeCnt.keys())):
predTypeCnt[predNode['type']] = 0
predTypeCnt[predNode['type']
] += VivadoGraph[pred][nodeId]['weight']
predTypeList = []
for key in predTypeCnt.keys():
predTypeList.append(key+"-"+str(predTypeCnt[key]))
predTypeList.sort()
predPatternStr = '-'.join(predTypeList) + ">="
succTypeCnt = dict()
if (outCheck):
for succ in VivadoGraph.successors(nodeId):
if ((succ in traversedNodes) and (not allowOverlap)):
continue
if (not onlyRecordInput):
nodeInPattern.add(succ)
traversedNodes.add(succ)
succNode = VivadoGraph.nodes()[succ]
if (not succNode['type'] in careTypeList):
continue
if (not (succNode['type'] in succTypeCnt.keys())):
succTypeCnt[succNode['type']] = 0
succTypeCnt[succNode['type']
] += VivadoGraph[nodeId][succ]['weight']
succTypeList = []
for key in succTypeCnt.keys():
succTypeList.append(key+"-"+str(succTypeCnt[key]))
succTypeList.sort()
succPatternStr = ">=" + '-'.join(succTypeList)
overallPatternStr = predPatternStr + nodeType + succPatternStr
if (coreType == "MUXF7" and overallPatternStr.find("MUXF8") >= 0):
continue
if (not (overallPatternStr in singleCorePattern.keys())):
singleCorePattern[overallPatternStr] = []
singleCorePattern[overallPatternStr].append(
(curNode, nodeInPattern))
numSingleCorePattern = dict()
for key in singleCorePattern.keys():
numSingleCorePattern[key] = len(singleCorePattern[key])
sortedSingleCorePattern = []
for w in sorted(numSingleCorePattern, key=numSingleCorePattern.get, reverse=True):
# if (len(singleCorePattern[w]) >= 5):
# print(w, len(singleCorePattern[w]))
sortedSingleCorePattern.append((w, singleCorePattern[w]))
return singleCorePattern, sortedSingleCorePattern
def chainBFS(VivadoGraph, nodeId):
coreType = VivadoGraph.nodes()[nodeId]['type']
curId = nodeId
reachSet = set()
curSet = set({curId})
while (len(curSet) > 0):
reachSet = reachSet | curSet
nextSet = set()
for curId in curSet:
for succ in VivadoGraph.successors(curId):
if (coreType == VivadoGraph.nodes()[succ]['type']):
if (not (succ in reachSet)):
nextSet.add(succ)
for pred in VivadoGraph.predecessors(curId):
if (coreType == VivadoGraph.nodes()[pred]['type']):
if (not (pred in reachSet)):
nextSet.add(pred)
curSet = nextSet
return reachSet
def clusterNodeChain(VivadoGraph, coreType="CARRY8"):
chainedSet = set()
chains = []
for nodeId in VivadoGraph.nodes():
curNode = VivadoGraph.nodes()[nodeId]
nodeType = curNode['type']
if (nodeType.find(coreType) >= 0):
if (nodeId in chainedSet):
continue
coreNodes = chainBFS(VivadoGraph, nodeId)
chainedSet = chainedSet | coreNodes
chains.append(VivadoCoreCluster(nodeType, coreNodes, None, None))
return chains
def clusterNodeWithCommonFanin(VivadoGraph, VivadoCells, targetType="RAM32M16"):
clusteredSet = set()
allNodesInType = []
for nodeId in VivadoGraph.nodes():
curNode = VivadoGraph.nodes()[nodeId]
nodeType = curNode['type']
if (nodeType.find(targetType) >= 0):
allNodesInType.append(nodeId)
graphForCluster = nx.Graph()
for nodeId in allNodesInType:
graphForCluster.add_node(nodeId)
edges = []
for listId, nodeId in enumerate(allNodesInType):
for other_nodeId in allNodesInType[listId+1:]:
driverPins = (VivadoCells[nodeId].drivepins_fromOthers) & (
VivadoCells[other_nodeId].drivepins_fromOthers)
edges.append((nodeId, other_nodeId, len(driverPins)))
edges.sort(key=lambda tup: tup[2], reverse=True)
node2NumCommonFanin = dict()
for edge in edges:
nodeA = edge[0]
nodeB = edge[1]
numCommonFanin = edge[2]
if (not nodeA in node2NumCommonFanin.keys()):
node2NumCommonFanin[nodeA] = numCommonFanin
if (not nodeB in node2NumCommonFanin.keys()):
node2NumCommonFanin[nodeB] = numCommonFanin
if (node2NumCommonFanin[nodeA] == node2NumCommonFanin[nodeB]):
graphForCluster.add_edge(nodeA, nodeB)
clusterSet_list = []
for edge in edges:
nodeA = edge[0]
nodeB = edge[1]
if (not nodeA in clusteredSet):
overlapDriverPin = None
newCluster = set()
clusteredSet.add(nodeA)
newCluster.add(nodeA)
for nodeId in graphForCluster.neighbors(nodeA):
if (not nodeId in clusteredSet):
driverPins = (VivadoCells[nodeId].drivepins_fromOthers) & (
VivadoCells[nodeA].drivepins_fromOthers)
if (overlapDriverPin is None):
overlapDriverPin = driverPins
if (overlapDriverPin == driverPins):
clusteredSet.add(nodeId)
newCluster.add(nodeId)
clusterSet_list.append(newCluster)
continue
if (not nodeB in clusteredSet):
overlapDriverPin = None
newCluster = set()
clusteredSet.add(nodeB)
newCluster.add(nodeB)
for nodeId in graphForCluster.neighbors(nodeB):
if (not nodeId in clusteredSet):
driverPins = (VivadoCells[nodeId].drivepins_fromOthers) & (
VivadoCells[nodeB].drivepins_fromOthers)
if (overlapDriverPin is None):
overlapDriverPin = driverPins
if (overlapDriverPin == driverPins):
clusteredSet.add(nodeId)
newCluster.add(nodeId)
clusterSet_list.append(newCluster)
continue
clusters = []
# print("comps:")
for comp in clusterSet_list: # nx.connected_components(graphForCluster#
clusters.append(VivadoCoreCluster(targetType, comp, None, None))
# for id in comp:
# print(node2NumCommonFanin[id], VivadoGraph.nodes()[id]['name'])
# print("================================")
# exit()
return clusters
def clusterExtendPatterns(VivadoGraph, chains, largeCluserIntoPattern=False, allowOverlap=False, largeCluserThredhold=2):
patternStr2Chains = dict()
traversedNodes = set()
largeCnt = 0
for chain in chains:
corePatternStr = chain.coreType + "-" + str(len(chain.coreNodes))
if (largeCluserIntoPattern and len(chain.coreNodes) > largeCluserThredhold):
corePatternStr += "largeParallel("+str(largeCnt)+")"
largeCnt += 1
for nodeId in chain.coreNodes:
if ((nodeId in traversedNodes) and (not allowOverlap)):
continue
traversedNodes.add(nodeId)
nodeInPattern = set([nodeId])
newCluster = VivadoCoreCluster(
chain.coreType, set(), None, None)
# predTypeCnt = dict()
# for pred in VivadoGraph.predecessors(nodeId):
# if ((pred in traversedNodes) and (not allowOverlap)):
# continue
# traversedNodes.add(pred)
# nodeInPattern.add(pred)
# predNode = VivadoGraph.nodes()[pred]
# if (not (predNode['type'] in predTypeCnt.keys())):
# predTypeCnt[predNode['type']] = 0
# predTypeCnt[predNode['type']] += VivadoGraph[pred][nodeId]['weight']
# predTypeList = []
# for key in predTypeCnt.keys():
# predTypeList.append(key+"-"+str(predTypeCnt[key]))
# predTypeList.sort()
# predPatternStr = '-'.join(predTypeList) + ">="
# succTypeCnt = dict()
# for succ in VivadoGraph.successors(nodeId):
# if ((succ in traversedNodes) and (not allowOverlap)):
# continue
# traversedNodes.add(succ)
# nodeInPattern.add(succ)
# succNode = VivadoGraph.nodes()[succ]
# if (not (succNode['type'] in succTypeCnt.keys())):
# succTypeCnt[succNode['type']] = 0
# succTypeCnt[succNode['type']] += VivadoGraph[nodeId][succ]['weight']
# succTypeList = []
# for key in succTypeCnt.keys():
# succTypeList.append(key+"-"+str(succTypeCnt[key]))
# succTypeList.sort()
# succPatternStr = ">=" + '-'.join(succTypeList)
# overallPatternStr = predPatternStr + corePatternStr + succPatternStr
overallPatternStr = ">=" + corePatternStr + ">="
newCluster.patternStr = overallPatternStr
newCluster.nodeInCluster = nodeInPattern
if (not (overallPatternStr in patternStr2Chains.keys())):
patternStr2Chains[overallPatternStr] = []
patternStr2Chains[overallPatternStr].append(newCluster)
else:
nodeInPattern = chain.coreNodes.copy() - traversedNodes
predTypeCnt = dict()
for nodeId in chain.coreNodes - traversedNodes:
for pred in VivadoGraph.predecessors(nodeId):
if ((pred in traversedNodes) and (not allowOverlap)):
continue
traversedNodes.add(pred)
nodeInPattern.add(pred)
predNode = VivadoGraph.nodes()[pred]
if (not (predNode['type'] in predTypeCnt.keys())):
predTypeCnt[predNode['type']] = 0
predTypeCnt[predNode['type']
] += VivadoGraph[pred][nodeId]['weight']
predTypeList = []
for key in predTypeCnt.keys():
predTypeList.append(key+"-"+str(predTypeCnt[key]))
predTypeList.sort()
predPatternStr = '-'.join(predTypeList) + ">="
succTypeCnt = dict()
for nodeId in chain.coreNodes - traversedNodes:
for succ in VivadoGraph.successors(nodeId):
if ((succ in traversedNodes) and (not allowOverlap)):
continue
traversedNodes.add(succ)
nodeInPattern.add(succ)
succNode = VivadoGraph.nodes()[succ]
if (not (succNode['type'] in succTypeCnt.keys())):
succTypeCnt[succNode['type']] = 0
succTypeCnt[succNode['type']
] += VivadoGraph[nodeId][succ]['weight']
succTypeList = []
for key in succTypeCnt.keys():
succTypeList.append(key+"-"+str(succTypeCnt[key]))
succTypeList.sort()
succPatternStr = ">=" + '-'.join(succTypeList)
overallPatternStr = predPatternStr + corePatternStr + succPatternStr
chain.patternStr = overallPatternStr
chain.nodeInCluster = nodeInPattern
if (not (overallPatternStr in patternStr2Chains.keys())):
patternStr2Chains[overallPatternStr] = []
patternStr2Chains[overallPatternStr].append(chain)
numPatternStr2Chains = dict()
for key in patternStr2Chains.keys():
numPatternStr2Chains[key] = len(patternStr2Chains[key])
sortedPatternStr2Chains = []
for w in sorted(numPatternStr2Chains, key=numPatternStr2Chains.get, reverse=True):
# if (len(patternStr2Chains[w]) >= 2):
# print(w, len(patternStr2Chains[w]))
sortedPatternStr2Chains.append((w, patternStr2Chains[w]))
return patternStr2Chains, sortedPatternStr2Chains
def printOutSimplePatterns(VivadoGraph, singleCorePattern):
numSingleCorePattern = dict()
for key in singleCorePattern.keys():
numSingleCorePattern[key] = len(singleCorePattern[key])
for w in sorted(numSingleCorePattern, key=numSingleCorePattern.get, reverse=True):
nodeInPattern = set()
overlappedSet = set()
for curnode, neighbornodes in singleCorePattern[w]:
nodeInPattern = nodeInPattern | neighbornodes
for curNodeIdX, (curnodeX, neighbornodesX) in enumerate(singleCorePattern[w]):
for curNodeIdY, (curnodeY, neighbornodesY) in enumerate(singleCorePattern[w]):
if (curNodeIdX <= curNodeIdY):
break
overlappedSet = overlappedSet | (
neighbornodesX & neighbornodesY)
cnt = 0
for comp in nx.algorithms.weakly_connected_components(VivadoGraph.subgraph(nodeInPattern)):
cnt += 1
if (numSingleCorePattern[w] <= 1):
continue
print("pattern: ", w, ":", numSingleCorePattern[w], ":", cnt)
print(" overlap nodes:")
for nodeId in overlappedSet:
print(" nodeName:", VivadoGraph.nodes()[nodeId]['name'], "type", VivadoGraph.nodes()[
nodeId]['type'], "degree", VivadoGraph.degree(nodeId))
print(" anchor nodes:")
for curnode, neighbornodes in singleCorePattern[w]:
print(" nodeName:",
curnode['name'], " id:", curnode['id'], "type: ", curnode['type'])
for nNode in neighbornodes:
print(" NeighborNodeName:", VivadoGraph.nodes()[
nNode]['name'], " id:", VivadoGraph.nodes()[nNode]['id'], "type: ", VivadoGraph.nodes()[nNode]['type'])
for nodeId in neighbornodes:
print(
" highlight_objects -color red [get_cells ", VivadoGraph.nodes()[nodeId]['name'], "]")
def printOutChainPatterns(VivadoGraph, patternStr2Chains):
print("======================================\nprinting out chains' patterns")
numpatternStr2Chains = dict()
for key in patternStr2Chains.keys():
numpatternStr2Chains[key] = len(patternStr2Chains[key])
for w in sorted(numpatternStr2Chains, key=numpatternStr2Chains.get, reverse=True):
nodeInPattern = set()
overlappedSet = set()
for chain in patternStr2Chains[w]:
nodeInPattern = nodeInPattern | chain.nodeInCluster
for curChainIdX, curChainX in enumerate(patternStr2Chains[w]):
for curChainIdY, curChainY in enumerate(patternStr2Chains[w]):
if (curChainIdX <= curChainIdY):
break
overlappedSet = overlappedSet | (
curChainX.nodeInCluster & curChainY.nodeInCluster)
cnt = 0
for comp in nx.algorithms.weakly_connected_components(VivadoGraph.subgraph(nodeInPattern)):
cnt += 1
if (numpatternStr2Chains[w] <= 1):
continue
print("pattern: ", w, ":", numpatternStr2Chains[w], ":", cnt)
for comp in nx.algorithms.weakly_connected_components(VivadoGraph.subgraph(nodeInPattern)):
for nodeId in comp:
print(
" highlight_objects -color red [get_cells ", VivadoGraph.nodes()[nodeId]['name'], "]")
break
print(" overlap nodes:")
for nodeId in overlappedSet:
print(" nodeName:", VivadoGraph.nodes()[nodeId]['name'], "type", VivadoGraph.nodes()[
nodeId]['type'], "degree", VivadoGraph.degree(nodeId))
print(" anchor chains:")
for chain in patternStr2Chains[w]:
print(" coreNodes:", chain.coreNodes)
for nodeId in chain.coreNodes:
print(" ", VivadoGraph.nodes()
[nodeId]['name'], " id:", nodeId)
for nodeId in chain.coreNodes:
print(
" highlight_objects -color red [get_cells ", VivadoGraph.nodes()[nodeId]['name'], "]")
def instantiatePatternClusters(VivadoGraph, sortedSingleCorePattern, lastClusterId):
res = []
clusterColorIdInitial = dict()
for node in VivadoGraph.nodes():
clusterColorIdInitial[node] = -1
nx.set_node_attributes(
G=VivadoGraph, values=clusterColorIdInitial, name="clusterColorId")
for w, seqs in sortedSingleCorePattern:
patternClusters = []
if (len(seqs) == 0):
assert(False)
if (isinstance(seqs[0], VivadoCoreCluster)):
for chain in seqs:
unextendedNodes = set(chain.nodeInCluster)-set(chain.coreNodes)
extendedNodes = set(chain.coreNodes)
if (len(unextendedNodes) == 0):
unextendedNodes = extendedNodes
patternClusters.append(VivadoPatternCluster(initPatternStr=chain.patternStr,
unextendedNodes=unextendedNodes, extendedNodes=extendedNodes, clusterId=lastClusterId))
for nodeInSet in patternClusters[-1].extendedNodes | patternClusters[-1].unextendedNodes:
if (VivadoGraph.nodes()[nodeInSet]['clusterColorId'] < 0):
VivadoGraph.nodes()[
nodeInSet]['clusterColorId'] = lastClusterId
lastClusterId += 1
else:
for curNode, neighborNodes in seqs:
unextendedNodes = set(neighborNodes)-set([curNode['id']])
extendedNodes = set([curNode['id']])
if (len(unextendedNodes) == 0):
unextendedNodes = extendedNodes
patternClusters.append(VivadoPatternCluster(
initPatternStr=w, unextendedNodes=unextendedNodes, extendedNodes=extendedNodes, clusterId=lastClusterId))
for nodeInSet in patternClusters[-1].extendedNodes | patternClusters[-1].unextendedNodes:
if (VivadoGraph.nodes()[nodeInSet]['clusterColorId'] < 0):
VivadoGraph.nodes()[
nodeInSet]['clusterColorId'] = lastClusterId
lastClusterId += 1
res.append(VivadoPatternClusterSeq(
initPatternStr=w, patternClusters=patternClusters))
return res, lastClusterId
def accumulateClusterForce(Patterns):
pass
def printOutPatterns(patterns):
print("printOutPatterns:")
cntSum = 0
for curPatternSeq in patterns:
print(curPatternSeq.patternExtensionTrace.replace("\'", "").replace("\\", ""), len(curPatternSeq.patternClusters), len(
curPatternSeq.patternClusters[0].extendedNodes | curPatternSeq.patternClusters[0].unextendedNodes))
cntSum += len(curPatternSeq.patternClusters)
print("Total Clusters:", cntSum)
return cntSum
def BFSCheckEdgeAttributes(VivadoGraph, VivadoCells, curCell):
edgeAttributeCnt = dict()
cellQ = [curCell]
depQ = [0]
visitedSet = {curCell}
nextSet = set([curCell])
while (len(cellQ) > 0):
curCell = cellQ.pop()
curDepth = depQ.pop()
visitedSet.add(curCell)
if(curDepth >= 5):
continue
# if (len(set(VivadoGraph.successors(curCell)))<4):
# for succ in VivadoGraph.successors(curCell):
# if (not (succ in nextSet)):
# if (VivadoGraph[curCell][succ]['driverPinType'].find("[")>=0):
# if (VivadoGraph[curCell][succ]['driverPinType'] in edgeAttributeCnt.keys()):
# edgeAttributeCnt[VivadoGraph[curCell][succ]['driverPinType']] += 1
# else:
# edgeAttributeCnt[VivadoGraph[curCell][succ]['driverPinType']] = 1
# else:
# nextSet.add(succ)
# depQ.append(curDepth+1)
# cellQ.append(succ)
if (len(set(VivadoGraph.predecessors(curCell))) < 32):
for pred in VivadoGraph.predecessors(curCell):
if (not (pred in nextSet)):
if (VivadoGraph[pred][curCell]['driverPinType'].find("[") >= 0):
if (VivadoGraph[pred][curCell]['driverPinType'] in edgeAttributeCnt.keys()):
edgeAttributeCnt[VivadoGraph[pred]
[curCell]['driverPinType']] += 1
else:
edgeAttributeCnt[VivadoGraph[pred]
[curCell]['driverPinType']] = 1
else:
nextSet.add(pred)
depQ.append(curDepth+1)
cellQ.append(pred)
sortedEdgeAttributeCnt = []
for w in sorted(edgeAttributeCnt, key=edgeAttributeCnt.get, reverse=True):
sortedEdgeAttributeCnt.append((w, edgeAttributeCnt[w]))
if (len(sortedEdgeAttributeCnt) > 4):
sortedEdgeAttributeCnt = sortedEdgeAttributeCnt[:4]
return sortedEdgeAttributeCnt
def loadClusters(name2id, clusterFileName):
clusters = []
id2cluster = dict()
clusterCnt = 0
clusterFile = open(clusterFileName, "r")
for line in clusterFile.readlines():
clusterCellNames = line.split(" ")
ids = set()
for name in clusterCellNames:
if (name != ""):
if (name in name2id.keys()):
ids.add(name2id[name])
id2cluster[name2id[name]] = clusterCnt
clusterCnt += 1
clusters.append(ids)
return clusters, id2cluster
def loadClocks(clockFileName):
clockNames = []
clusterFile = open(clockFileName, "r")
for line in clusterFile.readlines():
clockNames.append(line.replace("\n", "").replace("/O", ""))
return clockNames
def loadFixedBlocks(fixedUnitFileName):
fixedUnitNames = []
fixedUnitFile = open(fixedUnitFileName, "r")
for line in fixedUnitFile.readlines():
fixedUnitNames.append(line.replace("\n", "").split(" ")[1])
return fixedUnitNames
| true | true |
f7358caf50482925da5a23ef389374bc85f3fa4f | 623 | py | Python | streetteam/apps/twilio_integration/tests/factories.py | alysivji/street-team | fe891d738b449956d56fe5e53535b98fa04d9a3a | [
"MIT"
] | 2 | 2020-01-22T17:49:10.000Z | 2021-06-18T19:35:23.000Z | streetteam/apps/twilio_integration/tests/factories.py | alysivji/street-team | fe891d738b449956d56fe5e53535b98fa04d9a3a | [
"MIT"
] | 41 | 2019-11-08T18:28:16.000Z | 2022-03-12T00:28:51.000Z | streetteam/apps/twilio_integration/tests/factories.py | alysivji/street-team | fe891d738b449956d56fe5e53535b98fa04d9a3a | [
"MIT"
] | null | null | null | from faker_e164.providers import E164Provider
import factory
from apps.twilio_integration.models import PhoneNumber, ReceivedMessage
from apps.users.tests.factories import UserFactory
faker = factory.Faker._get_faker()
faker.add_provider(E164Provider)
class PhoneNumberFactory(factory.DjangoModelFactory):
class Meta:
model = PhoneNumber
number = factory.Faker("safe_e164", region_code="US")
user = factory.SubFactory(UserFactory)
class ReceivedMessageFactory(factory.DjangoModelFactory):
class Meta:
model = ReceivedMessage
phone_number = factory.SubFactory(PhoneNumberFactory)
| 25.958333 | 71 | 0.788122 | from faker_e164.providers import E164Provider
import factory
from apps.twilio_integration.models import PhoneNumber, ReceivedMessage
from apps.users.tests.factories import UserFactory
faker = factory.Faker._get_faker()
faker.add_provider(E164Provider)
class PhoneNumberFactory(factory.DjangoModelFactory):
class Meta:
model = PhoneNumber
number = factory.Faker("safe_e164", region_code="US")
user = factory.SubFactory(UserFactory)
class ReceivedMessageFactory(factory.DjangoModelFactory):
class Meta:
model = ReceivedMessage
phone_number = factory.SubFactory(PhoneNumberFactory)
| true | true |
f7358de2b3a85d446fdedfc665ba390ed788f26b | 9,066 | py | Python | themes/qutebrowser/themes/minimal/base16-black-metal-burzum.config.py | base16-fork/base16-fork | 79856b7e6195dde0874a9e6d191101ac6c5c74f5 | [
"MIT"
] | 95 | 2018-05-28T18:06:48.000Z | 2022-03-14T21:36:05.000Z | themes/qutebrowser/themes/minimal/base16-black-metal-burzum.config.py | base16-fork/base16-fork | 79856b7e6195dde0874a9e6d191101ac6c5c74f5 | [
"MIT"
] | 18 | 2018-08-26T00:57:20.000Z | 2022-02-19T08:29:29.000Z | themes/qutebrowser/themes/minimal/base16-black-metal-burzum.config.py | base16-fork/base16-fork | 79856b7e6195dde0874a9e6d191101ac6c5c74f5 | [
"MIT"
] | 20 | 2018-06-21T12:41:47.000Z | 2022-03-04T22:06:20.000Z | # base16-qutebrowser (https://github.com/theova/base16-qutebrowser)
# Base16 qutebrowser template by theova and Daniel Mulford
# Black Metal (Burzum) scheme by metalelf0 (https://github.com/metalelf0)
base00 = "#000000"
base01 = "#121212"
base02 = "#222222"
base03 = "#333333"
base04 = "#999999"
base05 = "#c1c1c1"
base06 = "#999999"
base07 = "#c1c1c1"
base08 = "#5f8787"
base09 = "#aaaaaa"
base0A = "#99bbaa"
base0B = "#ddeecc"
base0C = "#aaaaaa"
base0D = "#888888"
base0E = "#999999"
base0F = "#444444"
# set qutebrowser colors
# Text color of the completion widget. May be a single color to use for
# all columns or a list of three colors, one for each column.
c.colors.completion.fg = base05
# Background color of the completion widget for odd rows.
c.colors.completion.odd.bg = base00
# Background color of the completion widget for even rows.
c.colors.completion.even.bg = base00
# Foreground color of completion widget category headers.
c.colors.completion.category.fg = base0D
# Background color of the completion widget category headers.
c.colors.completion.category.bg = base00
# Top border color of the completion widget category headers.
c.colors.completion.category.border.top = base00
# Bottom border color of the completion widget category headers.
c.colors.completion.category.border.bottom = base00
# Foreground color of the selected completion item.
c.colors.completion.item.selected.fg = base05
# Background color of the selected completion item.
c.colors.completion.item.selected.bg = base02
# Top border color of the selected completion item.
c.colors.completion.item.selected.border.top = base02
# Bottom border color of the selected completion item.
c.colors.completion.item.selected.border.bottom = base02
# Foreground color of the matched text in the selected completion item.
c.colors.completion.item.selected.match.fg = base05
# Foreground color of the matched text in the completion.
c.colors.completion.match.fg = base09
# Color of the scrollbar handle in the completion view.
c.colors.completion.scrollbar.fg = base05
# Color of the scrollbar in the completion view.
c.colors.completion.scrollbar.bg = base00
# Background color of disabled items in the context menu.
c.colors.contextmenu.disabled.bg = base01
# Foreground color of disabled items in the context menu.
c.colors.contextmenu.disabled.fg = base04
# Background color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.bg = base00
# Foreground color of the context menu. If set to null, the Qt default is used.
c.colors.contextmenu.menu.fg = base05
# Background color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.bg = base02
#Foreground color of the context menu’s selected item. If set to null, the Qt default is used.
c.colors.contextmenu.selected.fg = base05
# Background color for the download bar.
c.colors.downloads.bar.bg = base00
# Color gradient start for download text.
c.colors.downloads.start.fg = base00
# Color gradient start for download backgrounds.
c.colors.downloads.start.bg = base0D
# Color gradient end for download text.
c.colors.downloads.stop.fg = base00
# Color gradient stop for download backgrounds.
c.colors.downloads.stop.bg = base0C
# Foreground color for downloads with errors.
c.colors.downloads.error.fg = base08
# Font color for hints.
c.colors.hints.fg = base00
# Background color for hints. Note that you can use a `rgba(...)` value
# for transparency.
c.colors.hints.bg = base0A
# Font color for the matched part of hints.
c.colors.hints.match.fg = base05
# Text color for the keyhint widget.
c.colors.keyhint.fg = base05
# Highlight color for keys to complete the current keychain.
c.colors.keyhint.suffix.fg = base05
# Background color of the keyhint widget.
c.colors.keyhint.bg = base00
# Foreground color of an error message.
c.colors.messages.error.fg = base00
# Background color of an error message.
c.colors.messages.error.bg = base08
# Border color of an error message.
c.colors.messages.error.border = base08
# Foreground color of a warning message.
c.colors.messages.warning.fg = base00
# Background color of a warning message.
c.colors.messages.warning.bg = base0E
# Border color of a warning message.
c.colors.messages.warning.border = base0E
# Foreground color of an info message.
c.colors.messages.info.fg = base05
# Background color of an info message.
c.colors.messages.info.bg = base00
# Border color of an info message.
c.colors.messages.info.border = base00
# Foreground color for prompts.
c.colors.prompts.fg = base05
# Border used around UI elements in prompts.
c.colors.prompts.border = base00
# Background color for prompts.
c.colors.prompts.bg = base00
# Background color for the selected item in filename prompts.
c.colors.prompts.selected.bg = base02
# Foreground color for the selected item in filename prompts.
c.colors.prompts.selected.fg = base05
# Foreground color of the statusbar.
c.colors.statusbar.normal.fg = base05
# Background color of the statusbar.
c.colors.statusbar.normal.bg = base00
# Foreground color of the statusbar in insert mode.
c.colors.statusbar.insert.fg = base0C
# Background color of the statusbar in insert mode.
c.colors.statusbar.insert.bg = base00
# Foreground color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.fg = base0A
# Background color of the statusbar in passthrough mode.
c.colors.statusbar.passthrough.bg = base00
# Foreground color of the statusbar in private browsing mode.
c.colors.statusbar.private.fg = base0E
# Background color of the statusbar in private browsing mode.
c.colors.statusbar.private.bg = base00
# Foreground color of the statusbar in command mode.
c.colors.statusbar.command.fg = base04
# Background color of the statusbar in command mode.
c.colors.statusbar.command.bg = base01
# Foreground color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.fg = base0E
# Background color of the statusbar in private browsing + command mode.
c.colors.statusbar.command.private.bg = base01
# Foreground color of the statusbar in caret mode.
c.colors.statusbar.caret.fg = base0D
# Background color of the statusbar in caret mode.
c.colors.statusbar.caret.bg = base00
# Foreground color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.fg = base0D
# Background color of the statusbar in caret mode with a selection.
c.colors.statusbar.caret.selection.bg = base00
# Background color of the progress bar.
c.colors.statusbar.progress.bg = base0D
# Default foreground color of the URL in the statusbar.
c.colors.statusbar.url.fg = base05
# Foreground color of the URL in the statusbar on error.
c.colors.statusbar.url.error.fg = base08
# Foreground color of the URL in the statusbar for hovered links.
c.colors.statusbar.url.hover.fg = base09
# Foreground color of the URL in the statusbar on successful load
# (http).
c.colors.statusbar.url.success.http.fg = base0B
# Foreground color of the URL in the statusbar on successful load
# (https).
c.colors.statusbar.url.success.https.fg = base0B
# Foreground color of the URL in the statusbar when there's a warning.
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base00
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Background color of pinned unselected even tabs.
c.colors.tabs.pinned.even.bg = base0B
# Foreground color of pinned unselected even tabs.
c.colors.tabs.pinned.even.fg = base00
# Background color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.bg = base0B
# Foreground color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.fg = base00
# Background color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.bg = base02
# Foreground color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.fg = base05
# Background color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.bg = base02
# Foreground color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.fg = base05
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base05
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base02
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base05
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base02
# Background color for webpages if unset (or empty to use the theme's
# color).
c.colors.webpage.bg = base00
| 30.119601 | 95 | 0.771785 |
base00 = "#000000"
base01 = "#121212"
base02 = "#222222"
base03 = "#333333"
base04 = "#999999"
base05 = "#c1c1c1"
base06 = "#999999"
base07 = "#c1c1c1"
base08 = "#5f8787"
base09 = "#aaaaaa"
base0A = "#99bbaa"
base0B = "#ddeecc"
base0C = "#aaaaaa"
base0D = "#888888"
base0E = "#999999"
base0F = "#444444"
c.colors.completion.fg = base05
c.colors.completion.odd.bg = base00
c.colors.completion.even.bg = base00
c.colors.completion.category.fg = base0D
c.colors.completion.category.bg = base00
c.colors.completion.category.border.top = base00
c.colors.completion.category.border.bottom = base00
c.colors.completion.item.selected.fg = base05
c.colors.completion.item.selected.bg = base02
c.colors.completion.item.selected.border.top = base02
c.colors.completion.item.selected.border.bottom = base02
c.colors.completion.item.selected.match.fg = base05
c.colors.completion.match.fg = base09
c.colors.completion.scrollbar.fg = base05
c.colors.completion.scrollbar.bg = base00
c.colors.contextmenu.disabled.bg = base01
c.colors.contextmenu.disabled.fg = base04
c.colors.contextmenu.menu.bg = base00
c.colors.contextmenu.menu.fg = base05
c.colors.contextmenu.selected.bg = base02
c.colors.contextmenu.selected.fg = base05
c.colors.downloads.bar.bg = base00
c.colors.downloads.start.fg = base00
c.colors.downloads.start.bg = base0D
c.colors.downloads.stop.fg = base00
c.colors.downloads.stop.bg = base0C
c.colors.downloads.error.fg = base08
c.colors.hints.fg = base00
c.colors.hints.bg = base0A
c.colors.hints.match.fg = base05
c.colors.keyhint.fg = base05
c.colors.keyhint.suffix.fg = base05
c.colors.keyhint.bg = base00
c.colors.messages.error.fg = base00
c.colors.messages.error.bg = base08
c.colors.messages.error.border = base08
c.colors.messages.warning.fg = base00
c.colors.messages.warning.bg = base0E
c.colors.messages.warning.border = base0E
c.colors.messages.info.fg = base05
c.colors.messages.info.bg = base00
c.colors.messages.info.border = base00
c.colors.prompts.fg = base05
c.colors.prompts.border = base00
c.colors.prompts.bg = base00
c.colors.prompts.selected.bg = base02
c.colors.prompts.selected.fg = base05
c.colors.statusbar.normal.fg = base05
c.colors.statusbar.normal.bg = base00
c.colors.statusbar.insert.fg = base0C
c.colors.statusbar.insert.bg = base00
c.colors.statusbar.passthrough.fg = base0A
c.colors.statusbar.passthrough.bg = base00
c.colors.statusbar.private.fg = base0E
c.colors.statusbar.private.bg = base00
c.colors.statusbar.command.fg = base04
c.colors.statusbar.command.bg = base01
c.colors.statusbar.command.private.fg = base0E
c.colors.statusbar.command.private.bg = base01
c.colors.statusbar.caret.fg = base0D
c.colors.statusbar.caret.bg = base00
c.colors.statusbar.caret.selection.fg = base0D
c.colors.statusbar.caret.selection.bg = base00
c.colors.statusbar.progress.bg = base0D
c.colors.statusbar.url.fg = base05
c.colors.statusbar.url.error.fg = base08
c.colors.statusbar.url.hover.fg = base09
c.colors.statusbar.url.success.http.fg = base0B
c.colors.statusbar.url.success.https.fg = base0B
c.colors.statusbar.url.warn.fg = base0E
# Background color of the tab bar.
c.colors.tabs.bar.bg = base00
# Color gradient start for the tab indicator.
c.colors.tabs.indicator.start = base0D
# Color gradient end for the tab indicator.
c.colors.tabs.indicator.stop = base0C
# Color for the tab indicator on errors.
c.colors.tabs.indicator.error = base08
# Foreground color of unselected odd tabs.
c.colors.tabs.odd.fg = base05
# Background color of unselected odd tabs.
c.colors.tabs.odd.bg = base00
# Foreground color of unselected even tabs.
c.colors.tabs.even.fg = base05
# Background color of unselected even tabs.
c.colors.tabs.even.bg = base00
# Background color of pinned unselected even tabs.
c.colors.tabs.pinned.even.bg = base0B
# Foreground color of pinned unselected even tabs.
c.colors.tabs.pinned.even.fg = base00
# Background color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.bg = base0B
# Foreground color of pinned unselected odd tabs.
c.colors.tabs.pinned.odd.fg = base00
# Background color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.bg = base02
# Foreground color of pinned selected even tabs.
c.colors.tabs.pinned.selected.even.fg = base05
# Background color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.bg = base02
# Foreground color of pinned selected odd tabs.
c.colors.tabs.pinned.selected.odd.fg = base05
# Foreground color of selected odd tabs.
c.colors.tabs.selected.odd.fg = base05
# Background color of selected odd tabs.
c.colors.tabs.selected.odd.bg = base02
# Foreground color of selected even tabs.
c.colors.tabs.selected.even.fg = base05
# Background color of selected even tabs.
c.colors.tabs.selected.even.bg = base02
# Background color for webpages if unset (or empty to use the theme's
c.colors.webpage.bg = base00
| true | true |
f7358e33875f1666cf2ed6367496e9ffeb2ca861 | 243 | py | Python | bootcamp/__init__.py | jmelloy/bootcamp | 3710324efd4dbc54bb37df13ee589ef8a71c6401 | [
"MIT"
] | null | null | null | bootcamp/__init__.py | jmelloy/bootcamp | 3710324efd4dbc54bb37df13ee589ef8a71c6401 | [
"MIT"
] | null | null | null | bootcamp/__init__.py | jmelloy/bootcamp | 3710324efd4dbc54bb37df13ee589ef8a71c6401 | [
"MIT"
] | null | null | null | __version__ = "2.1.1"
__version_info__ = tuple(
[
int(num) if num.isdigit() else num
for num in __version__.replace("-", ".", 1).split(".")
]
)
import os
PHOTOLOGUE_APP_DIR = os.path.dirname(os.path.abspath(__file__))
| 22.090909 | 63 | 0.625514 | __version__ = "2.1.1"
__version_info__ = tuple(
[
int(num) if num.isdigit() else num
for num in __version__.replace("-", ".", 1).split(".")
]
)
import os
PHOTOLOGUE_APP_DIR = os.path.dirname(os.path.abspath(__file__))
| true | true |
f7358e70170d77fd5aaed0097cbc37b6df6184c7 | 2,647 | py | Python | tests/core/test_configuration.py | sirjamesmeddel-gitty/intuition | cd517e6b3b315a743eb4d0d0dc294e264ab913ce | [
"Apache-2.0"
] | 81 | 2015-01-13T15:16:43.000Z | 2021-11-12T20:51:56.000Z | tests/core/test_configuration.py | sirjamesmeddel-gitty/intuition | cd517e6b3b315a743eb4d0d0dc294e264ab913ce | [
"Apache-2.0"
] | 1 | 2015-07-30T06:17:55.000Z | 2015-07-30T08:09:14.000Z | tests/core/test_configuration.py | sirjamesmeddel-gitty/intuition | cd517e6b3b315a743eb4d0d0dc294e264ab913ce | [
"Apache-2.0"
] | 30 | 2015-03-26T11:55:46.000Z | 2021-07-22T22:16:39.000Z | '''
Tests for intuition.core.configuration
'''
import unittest
from nose.tools import raises
import dna.test_utils as test_utils
import pandas as pd
import intuition.core.configuration as configuration
from dna.errors import DynamicImportFailed
from intuition.errors import InvalidConfiguration
class ConfigurationUtilsTestCase(unittest.TestCase):
def test_logfile(self):
logfile = configuration.logfile('fake_id')
if 'tmp' in logfile:
self.assertEqual('/tmp/logs/fake_id.log', logfile)
else:
self.assertIn('.intuition/logs/fake_id.log', logfile)
class ContextLoadTestCase(unittest.TestCase):
def setUp(self):
test_utils.setup_logger(self)
self.good_driver = \
'intuition.test_utils.FakeContext://localhost/path?valid=true'
self.bad_driver = \
'no.file.FileContext://localhost/path?valid=true'
self.bad_config = \
'intuition.test_utils.FakeContext://localhost/path?valid=false'
self.bad_formatted_config = \
'intuition.test_utils.FakeContext://localhost/path?format=false'
def tearDown(self):
test_utils.teardown_logger(self)
def test_load_context(self):
with configuration.Context(self.good_driver) as context:
self.assertIsInstance(context, dict)
self.assertIsInstance(context['strategy'], dict)
self.assertIsInstance(context['config'], dict)
@raises(InvalidConfiguration)
def test_validate_bad_config(self):
bad_config = {}
ctx = configuration.Context(self.bad_driver)
ctx._validate(bad_config)
def test_validate_good_config(self):
good_config = {
'universe': 'nasdaq,4',
'index': pd.date_range('2014/2/3', periods=30),
'modules': {
'algorithm': 'dualma'
}
}
ctx = configuration.Context(self.bad_driver)
self.assertIsNone(ctx._validate(good_config))
@raises(InvalidConfiguration)
def test_load_bad_configuration(self):
ctx = configuration.Context(self.bad_formatted_config)
ctx.__enter__()
def test_loaded_configuration(self):
with configuration.Context(self.good_driver) as context:
for field in ['manager', 'algorithm', 'data']:
self.assertIn(field, context['strategy'])
for field in ['index', 'live']:
self.assertIn(field, context['config'])
@raises(DynamicImportFailed)
def test_absent_driver_context_load(self):
ctx = configuration.Context(self.bad_driver)
ctx.__enter__()
| 33.506329 | 76 | 0.661504 |
import unittest
from nose.tools import raises
import dna.test_utils as test_utils
import pandas as pd
import intuition.core.configuration as configuration
from dna.errors import DynamicImportFailed
from intuition.errors import InvalidConfiguration
class ConfigurationUtilsTestCase(unittest.TestCase):
def test_logfile(self):
logfile = configuration.logfile('fake_id')
if 'tmp' in logfile:
self.assertEqual('/tmp/logs/fake_id.log', logfile)
else:
self.assertIn('.intuition/logs/fake_id.log', logfile)
class ContextLoadTestCase(unittest.TestCase):
def setUp(self):
test_utils.setup_logger(self)
self.good_driver = \
'intuition.test_utils.FakeContext://localhost/path?valid=true'
self.bad_driver = \
'no.file.FileContext://localhost/path?valid=true'
self.bad_config = \
'intuition.test_utils.FakeContext://localhost/path?valid=false'
self.bad_formatted_config = \
'intuition.test_utils.FakeContext://localhost/path?format=false'
def tearDown(self):
test_utils.teardown_logger(self)
def test_load_context(self):
with configuration.Context(self.good_driver) as context:
self.assertIsInstance(context, dict)
self.assertIsInstance(context['strategy'], dict)
self.assertIsInstance(context['config'], dict)
@raises(InvalidConfiguration)
def test_validate_bad_config(self):
bad_config = {}
ctx = configuration.Context(self.bad_driver)
ctx._validate(bad_config)
def test_validate_good_config(self):
good_config = {
'universe': 'nasdaq,4',
'index': pd.date_range('2014/2/3', periods=30),
'modules': {
'algorithm': 'dualma'
}
}
ctx = configuration.Context(self.bad_driver)
self.assertIsNone(ctx._validate(good_config))
@raises(InvalidConfiguration)
def test_load_bad_configuration(self):
ctx = configuration.Context(self.bad_formatted_config)
ctx.__enter__()
def test_loaded_configuration(self):
with configuration.Context(self.good_driver) as context:
for field in ['manager', 'algorithm', 'data']:
self.assertIn(field, context['strategy'])
for field in ['index', 'live']:
self.assertIn(field, context['config'])
@raises(DynamicImportFailed)
def test_absent_driver_context_load(self):
ctx = configuration.Context(self.bad_driver)
ctx.__enter__()
| true | true |
f7358ed7f0d201bb0afca8493fb1a699d5fdbefa | 1,892 | py | Python | pyheatmap/inc/cf.py | feixuexue731/pyheatmap | 5dcd774113e3fe65c5e891c8c686f991310f4835 | [
"MIT"
] | 90 | 2015-02-25T09:09:45.000Z | 2022-03-22T06:12:41.000Z | pyheatmap/inc/cf.py | feixuexue731/pyheatmap | 5dcd774113e3fe65c5e891c8c686f991310f4835 | [
"MIT"
] | 8 | 2016-01-17T12:30:45.000Z | 2021-02-20T02:19:42.000Z | pyheatmap/inc/cf.py | feixuexue731/pyheatmap | 5dcd774113e3fe65c5e891c8c686f991310f4835 | [
"MIT"
] | 45 | 2015-09-07T03:13:38.000Z | 2021-09-08T09:32:04.000Z | # -*- coding: utf-8 -*-
#
# author: oldj
# blog: http://oldj.net
# email: oldj.wu@gmail.com
#
def get_max_size(data):
max_w = 0
max_h = 0
for hit in data:
w = hit[0]
h = hit[1]
if w > max_w:
max_w = w
if h > max_h:
max_h = h
return max_w + 1, max_h + 1
def mk_circle(r, w):
u"""根据半径r以及图片宽度 w ,产生一个圆的list
@see http://oldj.net/article/bresenham-algorithm/
"""
# __clist = set()
__tmp = {}
def c8(ix, iy, v=1):
# 8对称性
ps = (
(ix, iy),
(-ix, iy),
(ix, -iy),
(-ix, -iy),
(iy, ix),
(-iy, ix),
(iy, -ix),
(-iy, -ix),
)
for x2, y2 in ps:
p = w * y2 + x2
__tmp.setdefault(p, v)
# __clist.add((p, v))
# 中点圆画法
x = 0
y = r
d = 3 - (r << 1)
while x <= y:
for _y in range(x, y + 1):
c8(x, _y, y + 1 - _y)
if d < 0:
d += (x << 2) + 6
else:
d += ((x - y) << 2) + 10
y -= 1
x += 1
# __clist = __tmp.items()
return __tmp.items()
def mk_colors(n=240):
u"""生成色盘
@see http://oldj.net/article/heat-map-colors/
TODO: 根据 http://oldj.net/article/hsl-to-rgb/ 将 HSL 转为 RGBA
"""
colors = []
n1 = int(n * 0.4)
n2 = n - n1
for i in range(n1):
color = "hsl(240, 100%%, %d%%)" % (100 * (n1 - i / 2) / n1)
# color = 255 * i / n1
colors.append(color)
for i in range(n2):
color = "hsl(%.0f, 100%%, 50%%)" % (240 * (1.0 - float(i) / n2))
colors.append(color)
return colors
def is_num(v):
u"""判断是否为数字,兼容Py2/Py3"""
if type(v) in (int, float):
return True
if ("%d" % v).isdigit():
# 兼容Py2的long类型
return True
return False
| 18.732673 | 72 | 0.415433 |
def get_max_size(data):
max_w = 0
max_h = 0
for hit in data:
w = hit[0]
h = hit[1]
if w > max_w:
max_w = w
if h > max_h:
max_h = h
return max_w + 1, max_h + 1
def mk_circle(r, w):
__tmp = {}
def c8(ix, iy, v=1):
ps = (
(ix, iy),
(-ix, iy),
(ix, -iy),
(-ix, -iy),
(iy, ix),
(-iy, ix),
(iy, -ix),
(-iy, -ix),
)
for x2, y2 in ps:
p = w * y2 + x2
__tmp.setdefault(p, v)
x = 0
y = r
d = 3 - (r << 1)
while x <= y:
for _y in range(x, y + 1):
c8(x, _y, y + 1 - _y)
if d < 0:
d += (x << 2) + 6
else:
d += ((x - y) << 2) + 10
y -= 1
x += 1
return __tmp.items()
def mk_colors(n=240):
colors = []
n1 = int(n * 0.4)
n2 = n - n1
for i in range(n1):
color = "hsl(240, 100%%, %d%%)" % (100 * (n1 - i / 2) / n1)
colors.append(color)
for i in range(n2):
color = "hsl(%.0f, 100%%, 50%%)" % (240 * (1.0 - float(i) / n2))
colors.append(color)
return colors
def is_num(v):
if type(v) in (int, float):
return True
if ("%d" % v).isdigit():
return True
return False
| true | true |
f7358f102d8b55267e43a51cd253bac4d62bfe84 | 11,512 | py | Python | aiodocker/images.py | truenas/aiodocker | 8edefe73832d62d70fa11a4846594bc74ae12244 | [
"Apache-2.0"
] | 330 | 2017-04-12T19:36:03.000Z | 2022-03-29T09:24:53.000Z | aiodocker/images.py | truenas/aiodocker | 8edefe73832d62d70fa11a4846594bc74ae12244 | [
"Apache-2.0"
] | 623 | 2017-04-13T02:49:16.000Z | 2022-03-29T12:21:48.000Z | aiodocker/images.py | truenas/aiodocker | 8edefe73832d62d70fa11a4846594bc74ae12244 | [
"Apache-2.0"
] | 76 | 2017-04-22T08:00:18.000Z | 2021-11-23T04:34:06.000Z | import io
import json
import warnings
from typing import (
Any,
AsyncIterator,
BinaryIO,
Dict,
List,
Mapping,
MutableMapping,
Optional,
Union,
overload,
)
from typing_extensions import Literal
from .jsonstream import json_stream_list, json_stream_stream
from .utils import clean_map, compose_auth_header
class DockerImages(object):
def __init__(self, docker):
self.docker = docker
async def list(self, **params) -> Mapping:
"""
List of images
"""
response = await self.docker._query_json("images/json", "GET", params=params)
return response
async def inspect(self, name: str) -> Mapping:
"""
Return low-level information about an image
Args:
name: name of the image
"""
response = await self.docker._query_json("images/{name}/json".format(name=name))
return response
async def get(self, name: str) -> Mapping:
warnings.warn(
"""images.get is deprecated and will be removed in the next release,
please use images.inspect instead.""",
DeprecationWarning,
stacklevel=2,
)
return await self.inspect(name)
async def history(self, name: str) -> Mapping:
response = await self.docker._query_json(
"images/{name}/history".format(name=name)
)
return response
@overload
async def pull(
self,
from_image: str,
*,
auth: Optional[Union[MutableMapping, str, bytes]] = None,
tag: str = None,
repo: str = None,
stream: Literal[False] = False,
) -> Dict[str, Any]:
pass
@overload # noqa: F811
def pull(
self,
from_image: str,
*,
auth: Optional[Union[MutableMapping, str, bytes]] = None,
tag: str = None,
repo: str = None,
stream: Literal[True],
) -> AsyncIterator[Dict[str, Any]]:
pass
def pull( # noqa: F811
self,
from_image: str,
*,
auth: Optional[Union[MutableMapping, str, bytes]] = None,
tag: str = None,
repo: str = None,
stream: bool = False,
) -> Any:
"""
Similar to `docker pull`, pull an image locally
Args:
fromImage: name of the image to pull
repo: repository name given to an image when it is imported
tag: if empty when pulling an image all tags
for the given image to be pulled
auth: special {'auth': base64} pull private repo
"""
image = from_image # TODO: clean up
params = {"fromImage": image}
headers = {}
if repo:
params["repo"] = repo
if tag:
params["tag"] = tag
if auth is not None:
registry, has_registry_host, _ = image.partition("/")
if not has_registry_host:
raise ValueError(
"Image should have registry host "
"when auth information is provided"
)
# TODO: assert registry == repo?
headers["X-Registry-Auth"] = compose_auth_header(auth, registry)
cm = self.docker._query("images/create", "POST", params=params, headers=headers)
return self._handle_response(cm, stream)
def _handle_response(self, cm, stream):
if stream:
return self._handle_stream(cm)
else:
return self._handle_list(cm)
async def _handle_stream(self, cm):
async with cm as response:
async for item in json_stream_stream(response):
yield item
async def _handle_list(self, cm):
async with cm as response:
return await json_stream_list(response)
@overload
async def push(
self,
name: str,
*,
auth: Union[MutableMapping, str, bytes] = None,
tag: str = None,
stream: Literal[False] = False,
) -> Dict[str, Any]:
pass
@overload # noqa: F811
def push(
self,
name: str,
*,
auth: Union[MutableMapping, str, bytes] = None,
tag: str = None,
stream: Literal[True],
) -> AsyncIterator[Dict[str, Any]]:
pass
def push( # noqa: F811
self,
name: str,
*,
auth: Union[MutableMapping, str, bytes] = None,
tag: str = None,
stream: bool = False,
) -> Any:
params = {}
headers = {
# Anonymous push requires a dummy auth header.
"X-Registry-Auth": "placeholder"
}
if tag:
params["tag"] = tag
if auth is not None:
registry, has_registry_host, _ = name.partition("/")
if not has_registry_host:
raise ValueError(
"Image should have registry host "
"when auth information is provided"
)
headers["X-Registry-Auth"] = compose_auth_header(auth, registry)
cm = self.docker._query(
"images/{name}/push".format(name=name),
"POST",
params=params,
headers=headers,
)
return self._handle_response(cm, stream)
async def tag(self, name: str, repo: str, *, tag: str = None) -> bool:
"""
Tag the given image so that it becomes part of a repository.
Args:
repo: the repository to tag in
tag: the name for the new tag
"""
params = {"repo": repo}
if tag:
params["tag"] = tag
async with self.docker._query(
"images/{name}/tag".format(name=name),
"POST",
params=params,
headers={"content-type": "application/json"},
):
return True
async def delete(
self, name: str, *, force: bool = False, noprune: bool = False
) -> List:
"""
Remove an image along with any untagged parent
images that were referenced by that image
Args:
name: name/id of the image to delete
force: remove the image even if it is being used
by stopped containers or has other tags
noprune: don't delete untagged parent images
Returns:
List of deleted images
"""
params = {"force": force, "noprune": noprune}
return await self.docker._query_json(
"images/{name}".format(name=name), "DELETE", params=params
)
@staticmethod
async def _stream(fileobj: BinaryIO) -> AsyncIterator[bytes]:
chunk = fileobj.read(io.DEFAULT_BUFFER_SIZE)
while chunk:
yield chunk
chunk = fileobj.read(io.DEFAULT_BUFFER_SIZE)
@overload
async def build(
self,
*,
remote: str = None,
fileobj: BinaryIO = None,
path_dockerfile: str = None,
tag: str = None,
quiet: bool = False,
nocache: bool = False,
buildargs: Mapping = None,
pull: bool = False,
rm: bool = True,
forcerm: bool = False,
labels: Mapping = None,
stream: Literal[False] = False,
encoding: str = None,
) -> Dict[str, Any]:
pass
@overload # noqa: F811
def build(
self,
*,
remote: str = None,
fileobj: BinaryIO = None,
path_dockerfile: str = None,
tag: str = None,
quiet: bool = False,
nocache: bool = False,
buildargs: Mapping = None,
pull: bool = False,
rm: bool = True,
forcerm: bool = False,
labels: Mapping = None,
stream: Literal[True],
encoding: str = None,
) -> AsyncIterator[Dict[str, Any]]:
pass
def build( # noqa: F811
self,
*,
remote: str = None,
fileobj: BinaryIO = None,
path_dockerfile: str = None,
tag: str = None,
quiet: bool = False,
nocache: bool = False,
buildargs: Mapping = None,
pull: bool = False,
rm: bool = True,
forcerm: bool = False,
labels: Mapping = None,
stream: bool = False,
encoding: str = None,
) -> Any:
"""
Build an image given a remote Dockerfile
or a file object with a Dockerfile inside
Args:
path_dockerfile: path within the build context to the Dockerfile
remote: a Git repository URI or HTTP/HTTPS context URI
quiet: suppress verbose build output
nocache: do not use the cache when building the image
rm: remove intermediate containers after a successful build
pull: downloads any updates to the FROM image in Dockerfiles
encoding: set `Content-Encoding` for the file object your send
forcerm: always remove intermediate containers, even upon failure
labels: arbitrary key/value labels to set on the image
fileobj: a tar archive compressed or not
"""
headers = {}
params = {
"t": tag,
"rm": rm,
"q": quiet,
"pull": pull,
"remote": remote,
"nocache": nocache,
"forcerm": forcerm,
"dockerfile": path_dockerfile,
}
if remote is None and fileobj is None:
raise ValueError("You need to specify either remote or fileobj")
if fileobj and remote:
raise ValueError("You cannot specify both fileobj and remote")
if fileobj and not encoding:
raise ValueError("You need to specify an encoding")
data = None
if fileobj:
data = self._stream(fileobj)
headers["content-type"] = "application/x-tar"
if fileobj and encoding:
headers["Content-Encoding"] = encoding
if buildargs:
params.update({"buildargs": json.dumps(buildargs)})
if labels:
params.update({"labels": json.dumps(labels)})
cm = self.docker._query(
"build",
"POST",
params=clean_map(params),
headers=headers,
data=data,
)
return self._handle_response(cm, stream)
def export_image(self, name: str):
"""
Get a tarball of an image by name or id.
Args:
name: name/id of the image to be exported
Returns:
Streamreader of tarball image
"""
return _ExportCM(
self.docker._query("images/{name}/get".format(name=name), "GET")
)
def import_image(self, data, stream: bool = False):
"""
Import tarball of image to docker.
Args:
data: tarball data of image to be imported
Returns:
Tarball of the image
"""
headers = {"Content-Type": "application/x-tar"}
cm = self.docker._query_chunked_post(
"images/load", "POST", data=data, headers=headers
)
return self._handle_response(cm, stream)
class _ExportCM:
def __init__(self, cm):
self._cm = cm
async def __aenter__(self):
resp = await self._cm.__aenter__()
return resp.content
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self._cm.__aexit__(exc_type, exc_val, exc_tb)
| 28.997481 | 88 | 0.54491 | import io
import json
import warnings
from typing import (
Any,
AsyncIterator,
BinaryIO,
Dict,
List,
Mapping,
MutableMapping,
Optional,
Union,
overload,
)
from typing_extensions import Literal
from .jsonstream import json_stream_list, json_stream_stream
from .utils import clean_map, compose_auth_header
class DockerImages(object):
def __init__(self, docker):
self.docker = docker
async def list(self, **params) -> Mapping:
response = await self.docker._query_json("images/json", "GET", params=params)
return response
async def inspect(self, name: str) -> Mapping:
response = await self.docker._query_json("images/{name}/json".format(name=name))
return response
async def get(self, name: str) -> Mapping:
warnings.warn(
"""images.get is deprecated and will be removed in the next release,
please use images.inspect instead.""",
DeprecationWarning,
stacklevel=2,
)
return await self.inspect(name)
async def history(self, name: str) -> Mapping:
response = await self.docker._query_json(
"images/{name}/history".format(name=name)
)
return response
@overload
async def pull(
self,
from_image: str,
*,
auth: Optional[Union[MutableMapping, str, bytes]] = None,
tag: str = None,
repo: str = None,
stream: Literal[False] = False,
) -> Dict[str, Any]:
pass
@overload
def pull(
self,
from_image: str,
*,
auth: Optional[Union[MutableMapping, str, bytes]] = None,
tag: str = None,
repo: str = None,
stream: Literal[True],
) -> AsyncIterator[Dict[str, Any]]:
pass
def pull(
self,
from_image: str,
*,
auth: Optional[Union[MutableMapping, str, bytes]] = None,
tag: str = None,
repo: str = None,
stream: bool = False,
) -> Any:
image = from_image
params = {"fromImage": image}
headers = {}
if repo:
params["repo"] = repo
if tag:
params["tag"] = tag
if auth is not None:
registry, has_registry_host, _ = image.partition("/")
if not has_registry_host:
raise ValueError(
"Image should have registry host "
"when auth information is provided"
)
headers["X-Registry-Auth"] = compose_auth_header(auth, registry)
cm = self.docker._query("images/create", "POST", params=params, headers=headers)
return self._handle_response(cm, stream)
def _handle_response(self, cm, stream):
if stream:
return self._handle_stream(cm)
else:
return self._handle_list(cm)
async def _handle_stream(self, cm):
async with cm as response:
async for item in json_stream_stream(response):
yield item
async def _handle_list(self, cm):
async with cm as response:
return await json_stream_list(response)
@overload
async def push(
self,
name: str,
*,
auth: Union[MutableMapping, str, bytes] = None,
tag: str = None,
stream: Literal[False] = False,
) -> Dict[str, Any]:
pass
@overload
def push(
self,
name: str,
*,
auth: Union[MutableMapping, str, bytes] = None,
tag: str = None,
stream: Literal[True],
) -> AsyncIterator[Dict[str, Any]]:
pass
def push(
self,
name: str,
*,
auth: Union[MutableMapping, str, bytes] = None,
tag: str = None,
stream: bool = False,
) -> Any:
params = {}
headers = {
"X-Registry-Auth": "placeholder"
}
if tag:
params["tag"] = tag
if auth is not None:
registry, has_registry_host, _ = name.partition("/")
if not has_registry_host:
raise ValueError(
"Image should have registry host "
"when auth information is provided"
)
headers["X-Registry-Auth"] = compose_auth_header(auth, registry)
cm = self.docker._query(
"images/{name}/push".format(name=name),
"POST",
params=params,
headers=headers,
)
return self._handle_response(cm, stream)
async def tag(self, name: str, repo: str, *, tag: str = None) -> bool:
params = {"repo": repo}
if tag:
params["tag"] = tag
async with self.docker._query(
"images/{name}/tag".format(name=name),
"POST",
params=params,
headers={"content-type": "application/json"},
):
return True
async def delete(
self, name: str, *, force: bool = False, noprune: bool = False
) -> List:
params = {"force": force, "noprune": noprune}
return await self.docker._query_json(
"images/{name}".format(name=name), "DELETE", params=params
)
@staticmethod
async def _stream(fileobj: BinaryIO) -> AsyncIterator[bytes]:
chunk = fileobj.read(io.DEFAULT_BUFFER_SIZE)
while chunk:
yield chunk
chunk = fileobj.read(io.DEFAULT_BUFFER_SIZE)
@overload
async def build(
self,
*,
remote: str = None,
fileobj: BinaryIO = None,
path_dockerfile: str = None,
tag: str = None,
quiet: bool = False,
nocache: bool = False,
buildargs: Mapping = None,
pull: bool = False,
rm: bool = True,
forcerm: bool = False,
labels: Mapping = None,
stream: Literal[False] = False,
encoding: str = None,
) -> Dict[str, Any]:
pass
@overload
def build(
self,
*,
remote: str = None,
fileobj: BinaryIO = None,
path_dockerfile: str = None,
tag: str = None,
quiet: bool = False,
nocache: bool = False,
buildargs: Mapping = None,
pull: bool = False,
rm: bool = True,
forcerm: bool = False,
labels: Mapping = None,
stream: Literal[True],
encoding: str = None,
) -> AsyncIterator[Dict[str, Any]]:
pass
def build(
self,
*,
remote: str = None,
fileobj: BinaryIO = None,
path_dockerfile: str = None,
tag: str = None,
quiet: bool = False,
nocache: bool = False,
buildargs: Mapping = None,
pull: bool = False,
rm: bool = True,
forcerm: bool = False,
labels: Mapping = None,
stream: bool = False,
encoding: str = None,
) -> Any:
headers = {}
params = {
"t": tag,
"rm": rm,
"q": quiet,
"pull": pull,
"remote": remote,
"nocache": nocache,
"forcerm": forcerm,
"dockerfile": path_dockerfile,
}
if remote is None and fileobj is None:
raise ValueError("You need to specify either remote or fileobj")
if fileobj and remote:
raise ValueError("You cannot specify both fileobj and remote")
if fileobj and not encoding:
raise ValueError("You need to specify an encoding")
data = None
if fileobj:
data = self._stream(fileobj)
headers["content-type"] = "application/x-tar"
if fileobj and encoding:
headers["Content-Encoding"] = encoding
if buildargs:
params.update({"buildargs": json.dumps(buildargs)})
if labels:
params.update({"labels": json.dumps(labels)})
cm = self.docker._query(
"build",
"POST",
params=clean_map(params),
headers=headers,
data=data,
)
return self._handle_response(cm, stream)
def export_image(self, name: str):
return _ExportCM(
self.docker._query("images/{name}/get".format(name=name), "GET")
)
def import_image(self, data, stream: bool = False):
headers = {"Content-Type": "application/x-tar"}
cm = self.docker._query_chunked_post(
"images/load", "POST", data=data, headers=headers
)
return self._handle_response(cm, stream)
class _ExportCM:
def __init__(self, cm):
self._cm = cm
async def __aenter__(self):
resp = await self._cm.__aenter__()
return resp.content
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self._cm.__aexit__(exc_type, exc_val, exc_tb)
| true | true |
f7358f1fc61f6b30105dd15fc7f8b44549bc1982 | 258 | py | Python | blob_upload/file_helpers.py | benchling/integration-examples | 450dc8b712cca9d037f65fd97b2f41ba2010cdc9 | [
"MIT"
] | 22 | 2019-08-01T13:30:34.000Z | 2021-07-05T18:39:48.000Z | blob_upload/file_helpers.py | benchling/integration-examples | 450dc8b712cca9d037f65fd97b2f41ba2010cdc9 | [
"MIT"
] | 6 | 2020-12-11T20:23:18.000Z | 2021-07-14T14:46:09.000Z | blob_upload/file_helpers.py | benchling/integration-examples | 450dc8b712cca9d037f65fd97b2f41ba2010cdc9 | [
"MIT"
] | 10 | 2019-10-24T20:13:04.000Z | 2022-03-07T23:48:53.000Z | import base64
import hashlib
def encode_base64(input: bytes, charset: str = "utf-8") -> str:
file_bytes = base64.encodebytes(input)
return str(file_bytes, charset)
def calculate_md5(input: bytes) -> str:
return hashlib.md5(input).hexdigest()
| 21.5 | 63 | 0.717054 | import base64
import hashlib
def encode_base64(input: bytes, charset: str = "utf-8") -> str:
file_bytes = base64.encodebytes(input)
return str(file_bytes, charset)
def calculate_md5(input: bytes) -> str:
return hashlib.md5(input).hexdigest()
| true | true |
f7359067e433351f05d256c61799cc63ae1d5082 | 838 | py | Python | src/hydratk/translation/lib/network/rpc/client/en/messages.py | hydratk/hydratk-lib-network | 79b698998bac9a04b5a345e5d3212c87b5564af3 | [
"BSD-3-Clause"
] | null | null | null | src/hydratk/translation/lib/network/rpc/client/en/messages.py | hydratk/hydratk-lib-network | 79b698998bac9a04b5a345e5d3212c87b5564af3 | [
"BSD-3-Clause"
] | null | null | null | src/hydratk/translation/lib/network/rpc/client/en/messages.py | hydratk/hydratk-lib-network | 79b698998bac9a04b5a345e5d3212c87b5564af3 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""This code is a part of Hydra Toolkit
.. module:: hydratk.translation.lib.network.rpc.client.en.messages
:platform: Unix
:synopsis: English language translation for RPC client messages
.. moduleauthor:: Petr Rašek <bowman@hydratk.org>
"""
language = {
'name': 'English',
'ISO-639-1': 'en'
}
from hydratk.core import const
HIGHLIGHT_START = chr(27) + chr(91) + "1m"
HIGHLIGHT_US = chr(27) + chr(91) + "4m"
HIGHLIGHT_END = chr(27) + chr(91) + "0m"
msg = {
'htk_rpc_init_proxy': ["Initializing proxy remote object on URL: '{0}'"],
'htk_rpc_proxy_initialized': ["Proxy initialized"],
'htk_rpc_proxy_not_init': ["Proxy not initialized yet"],
'htk_rpc_call_method': ["Calling remote method: '{0}' with parameters: '{1}'"],
'htk_rpc_method_called': ["Method returned: '{0}'"]
}
| 27.933333 | 83 | 0.663484 |
language = {
'name': 'English',
'ISO-639-1': 'en'
}
from hydratk.core import const
HIGHLIGHT_START = chr(27) + chr(91) + "1m"
HIGHLIGHT_US = chr(27) + chr(91) + "4m"
HIGHLIGHT_END = chr(27) + chr(91) + "0m"
msg = {
'htk_rpc_init_proxy': ["Initializing proxy remote object on URL: '{0}'"],
'htk_rpc_proxy_initialized': ["Proxy initialized"],
'htk_rpc_proxy_not_init': ["Proxy not initialized yet"],
'htk_rpc_call_method': ["Calling remote method: '{0}' with parameters: '{1}'"],
'htk_rpc_method_called': ["Method returned: '{0}'"]
}
| true | true |
f735914711797365715ac1d0348526c100afbf54 | 3,760 | py | Python | models/PS_FCN_run.py | rkripa/PS-FCN | eb8ddbd60964830c06432a734a2cf6dce34f70f0 | [
"MIT"
] | null | null | null | models/PS_FCN_run.py | rkripa/PS-FCN | eb8ddbd60964830c06432a734a2cf6dce34f70f0 | [
"MIT"
] | null | null | null | models/PS_FCN_run.py | rkripa/PS-FCN | eb8ddbd60964830c06432a734a2cf6dce34f70f0 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torch.nn.init import kaiming_normal_
from models import model_utils
class FeatExtractor(nn.Module):
def __init__(self, batchNorm=False, c_in=3, other={}):
super(FeatExtractor, self).__init__()
self.other = other
self.conv1 = model_utils.conv(batchNorm, c_in, 64, k=3, stride=1, pad=1)
self.conv2 = model_utils.conv(batchNorm, 64, 128, k=3, stride=2, pad=1)
self.conv3 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)
self.conv4 = model_utils.conv(batchNorm, 128, 256, k=3, stride=2, pad=1)
self.conv5 = model_utils.conv(batchNorm, 256, 256, k=3, stride=1, pad=1)
self.conv6 = model_utils.deconv(256, 128)
self.conv7 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.conv5(out)
out = self.conv6(out)
out_feat = self.conv7(out)
n, c, h, w = out_feat.data.shape
out_feat = out_feat.view(-1)
return out_feat, [n, c, h, w]
class Regressor(nn.Module):
def __init__(self, batchNorm=False, other={}):
super(Regressor, self).__init__()
self.other = other
self.deconv1 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)
self.deconv2 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)
self.deconv3 = model_utils.deconv(128, 64)
self.est_normal= self._make_output(64, 3, k=3, stride=1, pad=1)
self.other = other
def _make_output(self, cin, cout, k=3, stride=1, pad=1):
return nn.Sequential(
nn.Conv2d(cin, cout, kernel_size=k, stride=stride, padding=pad, bias=False))
def forward(self, x, shape):
x = x.view(shape[0], shape[1], shape[2], shape[3])
out = self.deconv1(x)
out = self.deconv2(out)
out = self.deconv3(out)
normal = self.est_normal(out)
normal = torch.nn.functional.normalize(normal, 2, 1)
return normal
class PS_FCN(nn.Module):
def __init__(self, fuse_type='max', batchNorm=False, c_in=3, other={}):
super(PS_FCN, self).__init__()
self.extractor = FeatExtractor(batchNorm, c_in, other)
self.regressor = Regressor(batchNorm, other)
self.c_in = c_in
self.fuse_type = fuse_type
self.other = other
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
img = x[0]
img_split = torch.split(img, 3, 1)
if len(x) > 1: # Have lighting
light = x[1]
light_split = torch.split(light, 3, 1)
feats = torch.Tensor()
for i in range(len(img_split)):
net_in = img_split[i] if len(x) == 1 else torch.cat([img_split[i], light_split[i]], 1)
feat, shape = self.extractor(net_in)
if i == 0:
feats = feat
else:
if self.fuse_type == 'mean':
feats = torch.stack([feats, feat], 1).sum(1)
elif self.fuse_type == 'max':
feats, _ = torch.stack([feats, feat], 1).max(1)
if self.fuse_type == 'mean':
feats = feats / len(img_split)
feat_fused = feats
normal = self.regressor(feat_fused, shape)
return normal
| 40 | 98 | 0.573404 | import torch
import torch.nn as nn
from torch.nn.init import kaiming_normal_
from models import model_utils
class FeatExtractor(nn.Module):
def __init__(self, batchNorm=False, c_in=3, other={}):
super(FeatExtractor, self).__init__()
self.other = other
self.conv1 = model_utils.conv(batchNorm, c_in, 64, k=3, stride=1, pad=1)
self.conv2 = model_utils.conv(batchNorm, 64, 128, k=3, stride=2, pad=1)
self.conv3 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)
self.conv4 = model_utils.conv(batchNorm, 128, 256, k=3, stride=2, pad=1)
self.conv5 = model_utils.conv(batchNorm, 256, 256, k=3, stride=1, pad=1)
self.conv6 = model_utils.deconv(256, 128)
self.conv7 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.conv5(out)
out = self.conv6(out)
out_feat = self.conv7(out)
n, c, h, w = out_feat.data.shape
out_feat = out_feat.view(-1)
return out_feat, [n, c, h, w]
class Regressor(nn.Module):
def __init__(self, batchNorm=False, other={}):
super(Regressor, self).__init__()
self.other = other
self.deconv1 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)
self.deconv2 = model_utils.conv(batchNorm, 128, 128, k=3, stride=1, pad=1)
self.deconv3 = model_utils.deconv(128, 64)
self.est_normal= self._make_output(64, 3, k=3, stride=1, pad=1)
self.other = other
def _make_output(self, cin, cout, k=3, stride=1, pad=1):
return nn.Sequential(
nn.Conv2d(cin, cout, kernel_size=k, stride=stride, padding=pad, bias=False))
def forward(self, x, shape):
x = x.view(shape[0], shape[1], shape[2], shape[3])
out = self.deconv1(x)
out = self.deconv2(out)
out = self.deconv3(out)
normal = self.est_normal(out)
normal = torch.nn.functional.normalize(normal, 2, 1)
return normal
class PS_FCN(nn.Module):
def __init__(self, fuse_type='max', batchNorm=False, c_in=3, other={}):
super(PS_FCN, self).__init__()
self.extractor = FeatExtractor(batchNorm, c_in, other)
self.regressor = Regressor(batchNorm, other)
self.c_in = c_in
self.fuse_type = fuse_type
self.other = other
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
kaiming_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
img = x[0]
img_split = torch.split(img, 3, 1)
if len(x) > 1:
light = x[1]
light_split = torch.split(light, 3, 1)
feats = torch.Tensor()
for i in range(len(img_split)):
net_in = img_split[i] if len(x) == 1 else torch.cat([img_split[i], light_split[i]], 1)
feat, shape = self.extractor(net_in)
if i == 0:
feats = feat
else:
if self.fuse_type == 'mean':
feats = torch.stack([feats, feat], 1).sum(1)
elif self.fuse_type == 'max':
feats, _ = torch.stack([feats, feat], 1).max(1)
if self.fuse_type == 'mean':
feats = feats / len(img_split)
feat_fused = feats
normal = self.regressor(feat_fused, shape)
return normal
| true | true |
f7359199ff6a7aec6e7054079162b69f2e952aac | 5,964 | py | Python | schema_salad/codegen.py | common-workflow-language/schema_salad | 4f7023a177e0fc575812ef5e6b997b575617d826 | [
"Apache-2.0"
] | 58 | 2015-09-08T03:06:59.000Z | 2022-01-26T21:04:54.000Z | schema_salad/codegen.py | common-workflow-language/schema_salad | 4f7023a177e0fc575812ef5e6b997b575617d826 | [
"Apache-2.0"
] | 396 | 2015-08-25T20:22:49.000Z | 2022-03-25T07:32:04.000Z | schema_salad/codegen.py | common-workflow-language/schema_salad | 4f7023a177e0fc575812ef5e6b997b575617d826 | [
"Apache-2.0"
] | 59 | 2015-12-02T16:22:00.000Z | 2022-02-13T19:16:24.000Z | """Generate langauge specific loaders for a particular SALAD schema."""
import sys
from io import TextIOWrapper
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
TextIO,
Union,
)
from . import schema
from .codegen_base import CodeGenBase
from .exceptions import SchemaSaladException
from .java_codegen import JavaCodeGen
from .python_codegen import PythonCodeGen
from .ref_resolver import Loader
from .schema import shortname
from .utils import aslist
FIELD_SORT_ORDER = ["id", "class", "name"]
def codegen(
lang: str,
i: List[Dict[str, str]],
schema_metadata: Dict[str, Any],
loader: Loader,
target: Optional[str] = None,
examples: Optional[str] = None,
package: Optional[str] = None,
copyright: Optional[str] = None,
) -> None:
"""Generate classes with loaders for the given Schema Salad description."""
j = schema.extend_and_specialize(i, loader)
gen = None # type: Optional[CodeGenBase]
if lang == "python":
if target:
dest: Union[TextIOWrapper, TextIO] = open(
target, mode="w", encoding="utf-8"
)
else:
dest = sys.stdout
gen = PythonCodeGen(dest, copyright=copyright)
elif lang == "java":
gen = JavaCodeGen(
schema_metadata.get("$base", schema_metadata.get("id")),
target=target,
examples=examples,
package=package,
copyright=copyright,
)
else:
raise SchemaSaladException(f"Unsupported code generation language '{lang}'")
gen.prologue()
document_roots = []
for rec in j:
if rec["type"] in ("enum", "record"):
gen.type_loader(rec)
gen.add_vocab(shortname(rec["name"]), rec["name"])
for rec in j:
if rec["type"] == "enum":
for symbol in rec["symbols"]:
gen.add_vocab(shortname(symbol), symbol)
if rec["type"] == "record":
if rec.get("documentRoot"):
document_roots.append(rec["name"])
field_names = []
optional_fields = set()
for field in rec.get("fields", []):
field_name = shortname(field["name"])
field_names.append(field_name)
tp = field["type"]
if (
isinstance(tp, MutableSequence)
and tp[0] == "https://w3id.org/cwl/salad#null"
):
optional_fields.add(field_name)
idfield = ""
for field in rec.get("fields", []):
if field.get("jsonldPredicate") == "@id":
idfield = field.get("name")
gen.begin_class(
rec["name"],
aslist(rec.get("extends", [])),
rec.get("doc", ""),
rec.get("abstract", False),
field_names,
idfield,
optional_fields,
)
gen.add_vocab(shortname(rec["name"]), rec["name"])
sorted_fields = sorted(
rec.get("fields", []),
key=lambda i: FIELD_SORT_ORDER.index(i["name"].split("/")[-1])
if i["name"].split("/")[-1] in FIELD_SORT_ORDER
else 100,
)
for field in sorted_fields:
if field.get("jsonldPredicate") == "@id":
subscope = field.get("subscope")
fieldpred = field["name"]
optional = bool("https://w3id.org/cwl/salad#null" in field["type"])
uri_loader = gen.uri_loader(
gen.type_loader(field["type"]), True, False, None
)
gen.declare_id_field(
fieldpred, uri_loader, field.get("doc"), optional, subscope
)
break
for field in sorted_fields:
optional = bool("https://w3id.org/cwl/salad#null" in field["type"])
type_loader = gen.type_loader(field["type"])
jld = field.get("jsonldPredicate")
fieldpred = field["name"]
subscope = None
if isinstance(jld, MutableMapping):
ref_scope = jld.get("refScope")
if jld.get("typeDSL"):
type_loader = gen.typedsl_loader(type_loader, ref_scope)
elif jld.get("secondaryFilesDSL"):
type_loader = gen.secondaryfilesdsl_loader(type_loader)
elif jld.get("subscope"):
subscope = jld.get("subscope")
elif jld.get("_type") == "@id":
type_loader = gen.uri_loader(
type_loader, jld.get("identity", False), False, ref_scope
)
elif jld.get("_type") == "@vocab":
type_loader = gen.uri_loader(
type_loader, False, True, ref_scope
)
map_subject = jld.get("mapSubject")
if map_subject:
type_loader = gen.idmap_loader(
field["name"],
type_loader,
map_subject,
jld.get("mapPredicate"),
)
if "_id" in jld and jld["_id"][0] != "@":
fieldpred = jld["_id"]
if jld == "@id":
continue
gen.declare_field(fieldpred, type_loader, field.get("doc"), optional)
gen.end_class(rec["name"], field_names)
root_type = list(document_roots)
root_type.append({"type": "array", "items": document_roots})
gen.epilogue(gen.type_loader(root_type))
| 34.275862 | 87 | 0.499497 | import sys
from io import TextIOWrapper
from typing import (
Any,
Dict,
List,
MutableMapping,
MutableSequence,
Optional,
TextIO,
Union,
)
from . import schema
from .codegen_base import CodeGenBase
from .exceptions import SchemaSaladException
from .java_codegen import JavaCodeGen
from .python_codegen import PythonCodeGen
from .ref_resolver import Loader
from .schema import shortname
from .utils import aslist
FIELD_SORT_ORDER = ["id", "class", "name"]
def codegen(
lang: str,
i: List[Dict[str, str]],
schema_metadata: Dict[str, Any],
loader: Loader,
target: Optional[str] = None,
examples: Optional[str] = None,
package: Optional[str] = None,
copyright: Optional[str] = None,
) -> None:
j = schema.extend_and_specialize(i, loader)
gen = None
if lang == "python":
if target:
dest: Union[TextIOWrapper, TextIO] = open(
target, mode="w", encoding="utf-8"
)
else:
dest = sys.stdout
gen = PythonCodeGen(dest, copyright=copyright)
elif lang == "java":
gen = JavaCodeGen(
schema_metadata.get("$base", schema_metadata.get("id")),
target=target,
examples=examples,
package=package,
copyright=copyright,
)
else:
raise SchemaSaladException(f"Unsupported code generation language '{lang}'")
gen.prologue()
document_roots = []
for rec in j:
if rec["type"] in ("enum", "record"):
gen.type_loader(rec)
gen.add_vocab(shortname(rec["name"]), rec["name"])
for rec in j:
if rec["type"] == "enum":
for symbol in rec["symbols"]:
gen.add_vocab(shortname(symbol), symbol)
if rec["type"] == "record":
if rec.get("documentRoot"):
document_roots.append(rec["name"])
field_names = []
optional_fields = set()
for field in rec.get("fields", []):
field_name = shortname(field["name"])
field_names.append(field_name)
tp = field["type"]
if (
isinstance(tp, MutableSequence)
and tp[0] == "https://w3id.org/cwl/salad#null"
):
optional_fields.add(field_name)
idfield = ""
for field in rec.get("fields", []):
if field.get("jsonldPredicate") == "@id":
idfield = field.get("name")
gen.begin_class(
rec["name"],
aslist(rec.get("extends", [])),
rec.get("doc", ""),
rec.get("abstract", False),
field_names,
idfield,
optional_fields,
)
gen.add_vocab(shortname(rec["name"]), rec["name"])
sorted_fields = sorted(
rec.get("fields", []),
key=lambda i: FIELD_SORT_ORDER.index(i["name"].split("/")[-1])
if i["name"].split("/")[-1] in FIELD_SORT_ORDER
else 100,
)
for field in sorted_fields:
if field.get("jsonldPredicate") == "@id":
subscope = field.get("subscope")
fieldpred = field["name"]
optional = bool("https://w3id.org/cwl/salad#null" in field["type"])
uri_loader = gen.uri_loader(
gen.type_loader(field["type"]), True, False, None
)
gen.declare_id_field(
fieldpred, uri_loader, field.get("doc"), optional, subscope
)
break
for field in sorted_fields:
optional = bool("https://w3id.org/cwl/salad#null" in field["type"])
type_loader = gen.type_loader(field["type"])
jld = field.get("jsonldPredicate")
fieldpred = field["name"]
subscope = None
if isinstance(jld, MutableMapping):
ref_scope = jld.get("refScope")
if jld.get("typeDSL"):
type_loader = gen.typedsl_loader(type_loader, ref_scope)
elif jld.get("secondaryFilesDSL"):
type_loader = gen.secondaryfilesdsl_loader(type_loader)
elif jld.get("subscope"):
subscope = jld.get("subscope")
elif jld.get("_type") == "@id":
type_loader = gen.uri_loader(
type_loader, jld.get("identity", False), False, ref_scope
)
elif jld.get("_type") == "@vocab":
type_loader = gen.uri_loader(
type_loader, False, True, ref_scope
)
map_subject = jld.get("mapSubject")
if map_subject:
type_loader = gen.idmap_loader(
field["name"],
type_loader,
map_subject,
jld.get("mapPredicate"),
)
if "_id" in jld and jld["_id"][0] != "@":
fieldpred = jld["_id"]
if jld == "@id":
continue
gen.declare_field(fieldpred, type_loader, field.get("doc"), optional)
gen.end_class(rec["name"], field_names)
root_type = list(document_roots)
root_type.append({"type": "array", "items": document_roots})
gen.epilogue(gen.type_loader(root_type))
| true | true |
f73593201aaa44d50a228f9e03517ab39c039014 | 487 | py | Python | mepo.d/utilities/shellcmd.py | GEOS-ESM/mepo | 41c20c286e7a9a11995f48fb5f3036e469d935e6 | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | null | null | null | mepo.d/utilities/shellcmd.py | GEOS-ESM/mepo | 41c20c286e7a9a11995f48fb5f3036e469d935e6 | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 61 | 2020-01-08T20:01:16.000Z | 2022-03-24T19:44:38.000Z | mepo.d/utilities/shellcmd.py | GEOS-ESM/mepo | 41c20c286e7a9a11995f48fb5f3036e469d935e6 | [
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-02-28T14:29:12.000Z | 2020-07-08T15:56:55.000Z | import subprocess as sp
def run(cmd, output=None, stdout=None, status=None):
result = sp.run(
cmd,
stdout = sp.PIPE,
stderr = sp.PIPE,
universal_newlines = True # result byte sequence -> string
)
if status:
return result.returncode
elif result.returncode != 0:
print(result.stderr)
result.check_returncode()
if stdout:
return result.stdout
if output:
return result.stdout + result.stderr
| 23.190476 | 66 | 0.609856 | import subprocess as sp
def run(cmd, output=None, stdout=None, status=None):
result = sp.run(
cmd,
stdout = sp.PIPE,
stderr = sp.PIPE,
universal_newlines = True
)
if status:
return result.returncode
elif result.returncode != 0:
print(result.stderr)
result.check_returncode()
if stdout:
return result.stdout
if output:
return result.stdout + result.stderr
| true | true |
f735937b46eaaf17127373cb48ab6ef736904d0a | 752 | py | Python | ExploreDjango/mytestsite/mytestsite/urls.py | Devenc234/RecommenderSystem | dc64bdc132489c681aa672eb1baa1754c3ba5446 | [
"MIT"
] | null | null | null | ExploreDjango/mytestsite/mytestsite/urls.py | Devenc234/RecommenderSystem | dc64bdc132489c681aa672eb1baa1754c3ba5446 | [
"MIT"
] | null | null | null | ExploreDjango/mytestsite/mytestsite/urls.py | Devenc234/RecommenderSystem | dc64bdc132489c681aa672eb1baa1754c3ba5446 | [
"MIT"
] | null | null | null | """mytestsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.181818 | 77 | 0.710106 | from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| true | true |
f7359403d676c6fc69eb4c7737770aa0243ffaac | 27,648 | py | Python | testflows/_core/contrib/pygments/lexers/basic.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | 3 | 2020-06-25T19:23:19.000Z | 2021-10-20T19:29:56.000Z | testflows/_core/contrib/pygments/lexers/basic.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | null | null | null | testflows/_core/contrib/pygments/lexers/basic.py | testflows/TestFlows-Core | 0aa17247dffd2f7199465031ab16cc4f12c9cfb0 | [
"Apache-2.0"
] | 1 | 2020-02-24T12:31:45.000Z | 2020-02-24T12:31:45.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.basic
~~~~~~~~~~~~~~~~~~~~~
Lexers for BASIC like languages (other than VB.net).
:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from testflows._core.contrib.pygments.lexer import RegexLexer, bygroups, default, words, include
from testflows._core.contrib.pygments.token import Comment, Error, Keyword, Name, Number, \
Punctuation, Operator, String, Text, Whitespace
from testflows._core.contrib.pygments.lexers import _vbscript_builtins
__all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer',
'QBasicLexer', 'VBScriptLexer', 'BBCBasicLexer']
class BlitzMaxLexer(RegexLexer):
"""
For `BlitzMax <http://blitzbasic.com>`_ source code.
.. versionadded:: 1.4
"""
name = 'BlitzMax'
aliases = ['blitzmax', 'bmax']
filenames = ['*.bmx']
mimetypes = ['text/x-bmx']
bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
bmax_sktypes = r'@{1,2}|[!#$%]'
bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
bmax_name = r'[a-z_]\w*'
bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)'
r'|([ \t]*)(:)([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \
(bmax_name, bmax_sktypes, bmax_lktypes, bmax_name)
bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
(r'\.\.\n', Text), # Line continuation
# Comments
(r"'.*?\n", Comment.Single),
(r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]*(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' %
(bmax_vopwords), Operator),
(r'[(),.:\[\]]', Punctuation),
(r'(?:#[\w \t]*)', Name.Label),
(r'(?:\?[\w \t]*)', Comment.Preproc),
# Identifiers
(r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)),
(r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' %
(bmax_name, bmax_name),
bygroups(Keyword.Reserved, Text, Keyword.Namespace)),
(bmax_func, bygroups(Name.Function, Text, Keyword.Type,
Operator, Text, Punctuation, Text,
Keyword.Type, Name.Class, Text,
Keyword.Type, Text, Punctuation)),
(bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator,
Text, Punctuation, Text, Keyword.Type,
Name.Class, Text, Keyword.Type)),
(r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Ptr)\b', Keyword.Type),
(r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
(words((
'TNullMethodException', 'TNullFunctionException',
'TNullObjectException', 'TArrayBoundsException',
'TRuntimeException'), prefix=r'\b', suffix=r'\b'), Name.Exception),
(words((
'Strict', 'SuperStrict', 'Module', 'ModuleInfo',
'End', 'Return', 'Continue', 'Exit', 'Public', 'Private',
'Var', 'VarPtr', 'Chr', 'Len', 'Asc', 'SizeOf', 'Sgn', 'Abs', 'Min', 'Max',
'New', 'Release', 'Delete', 'Incbin', 'IncbinPtr', 'IncbinLen',
'Framework', 'Include', 'Import', 'Extern', 'EndExtern',
'Function', 'EndFunction', 'Type', 'EndType', 'Extends', 'Method', 'EndMethod',
'Abstract', 'Final', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'EachIn', 'While', 'Wend', 'EndWhile',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', 'EndSelect',
'Try', 'Catch', 'EndTry', 'Throw', 'Assert', 'Goto', 'DefData', 'ReadData',
'RestoreData'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
(r'(%s)' % (bmax_name), Name.Variable),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class BlitzBasicLexer(RegexLexer):
"""
For `BlitzBasic <http://blitzbasic.com>`_ source code.
.. versionadded:: 2.0
"""
name = 'BlitzBasic'
aliases = ['blitzbasic', 'b3d', 'bplus']
filenames = ['*.bb', '*.decls']
mimetypes = ['text/x-bb']
bb_sktypes = r'@{1,2}|[#$%]'
bb_name = r'[a-z]\w*'
bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \
(bb_name, bb_sktypes, bb_name)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
# Comments
(r";.*?\n", Comment.Single),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(words(('Shl', 'Shr', 'Sar', 'Mod', 'Or', 'And', 'Not',
'Abs', 'Sgn', 'Handle', 'Int', 'Float', 'Str',
'First', 'Last', 'Before', 'After'),
prefix=r'\b', suffix=r'\b'),
Operator),
(r'([+\-*/~=<>^])', Operator),
(r'[(),:\[\]\\]', Punctuation),
(r'\.([ \t]*)(%s)' % bb_name, Name.Label),
# Identifiers
(r'\b(New)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Label)),
(r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name),
bygroups(Operator, Text, Punctuation, Text, Name.Class)),
(r'\b%s\b([ \t]*)(\()' % bb_var,
bygroups(Name.Function, Text, Keyword.Type, Text, Punctuation,
Text, Name.Class, Text, Punctuation)),
(r'\b(Function)\b([ \t]+)%s' % bb_var,
bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
(r'\b(Type)([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Pi|True|False|Null)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
(words((
'End', 'Return', 'Exit', 'Chr', 'Len', 'Asc', 'New', 'Delete', 'Insert',
'Include', 'Function', 'Type', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'Each', 'While', 'Wend',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default',
'Goto', 'Gosub', 'Data', 'Read', 'Restore'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
# (r'(%s)' % (bb_name), Name.Variable),
(bb_var, bygroups(Name.Variable, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class MonkeyLexer(RegexLexer):
"""
For
`Monkey <https://en.wikipedia.org/wiki/Monkey_(programming_language)>`_
source code.
.. versionadded:: 1.6
"""
name = 'Monkey'
aliases = ['monkey']
filenames = ['*.monkey']
mimetypes = ['text/x-monkey']
name_variable = r'[a-z_]\w*'
name_function = r'[A-Z]\w*'
name_constant = r'[A-Z_][A-Z0-9_]*'
name_class = r'[A-Z]\w*'
name_module = r'[a-z0-9_]*'
keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)'
# ? == Bool // % == Int // # == Float // $ == String
keyword_type_special = r'[?%#$]'
flags = re.MULTILINE
tokens = {
'root': [
# Text
(r'\s+', Text),
# Comments
(r"'.*", Comment),
(r'(?i)^#rem\b', Comment.Multiline, 'comment'),
# preprocessor directives
(r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc),
# preprocessor variable (any line starting with '#' that is not a directive)
(r'^#', Comment.Preproc, 'variables'),
# String
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-fA-Z]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Native data types
(r'\b%s\b' % keyword_type, Keyword.Type),
# Exception handling
(r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved),
(r'Throwable', Name.Exception),
# Builtins
(r'(?i)\b(?:Null|True|False)\b', Name.Builtin),
(r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo),
(r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant),
# Keywords
(r'(?i)^(Import)(\s+)(.*)(\n)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text)),
(r'(?i)^Strict\b.*\n', Keyword.Reserved),
(r'(?i)(Const|Local|Global|Field)(\s+)',
bygroups(Keyword.Declaration, Text), 'variables'),
(r'(?i)(New|Class|Interface|Extends|Implements)(\s+)',
bygroups(Keyword.Reserved, Text), 'classname'),
(r'(?i)(Function|Method)(\s+)',
bygroups(Keyword.Reserved, Text), 'funcname'),
(r'(?i)(?:End|Return|Public|Private|Extern|Property|'
r'Final|Abstract)\b', Keyword.Reserved),
# Flow Control stuff
(r'(?i)(?:If|Then|Else|ElseIf|EndIf|'
r'Select|Case|Default|'
r'While|Wend|'
r'Repeat|Until|Forever|'
r'For|To|Until|Step|EachIn|Next|'
r'Exit|Continue)\s+', Keyword.Reserved),
# not used yet
(r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved),
# Array
(r'[\[\]]', Punctuation),
# Other
(r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator),
(r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word),
(r'[(){}!#,.:]', Punctuation),
# catch the rest
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_function, Name.Function),
(r'%s\b' % name_variable, Name.Variable),
],
'funcname': [
(r'(?i)%s\b' % name_function, Name.Function),
(r':', Punctuation, 'classname'),
(r'\s+', Text),
(r'\(', Punctuation, 'variables'),
(r'\)', Punctuation, '#pop')
],
'classname': [
(r'%s\.' % name_module, Name.Namespace),
(r'%s\b' % keyword_type, Keyword.Type),
(r'%s\b' % name_class, Name.Class),
# array (of given size)
(r'(\[)(\s*)(\d*)(\s*)(\])',
bygroups(Punctuation, Text, Number.Integer, Text, Punctuation)),
# generics
(r'\s+(?!<)', Text, '#pop'),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
default('#pop')
],
'variables': [
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_variable, Name.Variable),
(r'%s' % keyword_type_special, Keyword.Type),
(r'\s+', Text),
(r':', Punctuation, 'classname'),
(r',', Punctuation, '#push'),
default('#pop')
],
'string': [
(r'[^"~]+', String.Double),
(r'~q|~n|~r|~t|~z|~~', String.Escape),
(r'"', String.Double, '#pop'),
],
'comment': [
(r'(?i)^#rem.*?', Comment.Multiline, "#push"),
(r'(?i)^#end.*?', Comment.Multiline, "#pop"),
(r'\n', Comment.Multiline),
(r'.+', Comment.Multiline),
],
}
class CbmBasicV2Lexer(RegexLexer):
"""
For CBM BASIC V2 sources.
.. versionadded:: 1.6
"""
name = 'CBM BASIC V2'
aliases = ['cbmbas']
filenames = ['*.bas']
flags = re.IGNORECASE
tokens = {
'root': [
(r'rem.*\n', Comment.Single),
(r'\s+', Text),
(r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont'
r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?'
r'|list|clr|cmd|open|close|get#?', Keyword.Reserved),
(r'data|restore|dim|let|def|fn', Keyword.Declaration),
(r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn'
r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin),
(r'[-+*/^<>=]', Operator),
(r'not|and|or', Operator.Word),
(r'"[^"\n]*.', String),
(r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float),
(r'[(),:;]', Punctuation),
(r'\w+[$%]?', Name),
]
}
def analyse_text(self, text):
# if it starts with a line number, it shouldn't be a "modern" Basic
# like VB.net
if re.match(r'\d+', text):
return 0.2
class QBasicLexer(RegexLexer):
"""
For
`QBasic <http://en.wikipedia.org/wiki/QBasic>`_
source code.
.. versionadded:: 2.0
"""
name = 'QBasic'
aliases = ['qbasic', 'basic']
filenames = ['*.BAS', '*.bas']
mimetypes = ['text/basic']
declarations = ('DATA', 'LET')
functions = (
'ABS', 'ASC', 'ATN', 'CDBL', 'CHR$', 'CINT', 'CLNG',
'COMMAND$', 'COS', 'CSNG', 'CSRLIN', 'CVD', 'CVDMBF', 'CVI',
'CVL', 'CVS', 'CVSMBF', 'DATE$', 'ENVIRON$', 'EOF', 'ERDEV',
'ERDEV$', 'ERL', 'ERR', 'EXP', 'FILEATTR', 'FIX', 'FRE',
'FREEFILE', 'HEX$', 'INKEY$', 'INP', 'INPUT$', 'INSTR', 'INT',
'IOCTL$', 'LBOUND', 'LCASE$', 'LEFT$', 'LEN', 'LOC', 'LOF',
'LOG', 'LPOS', 'LTRIM$', 'MID$', 'MKD$', 'MKDMBF$', 'MKI$',
'MKL$', 'MKS$', 'MKSMBF$', 'OCT$', 'PEEK', 'PEN', 'PLAY',
'PMAP', 'POINT', 'POS', 'RIGHT$', 'RND', 'RTRIM$', 'SADD',
'SCREEN', 'SEEK', 'SETMEM', 'SGN', 'SIN', 'SPACE$', 'SPC',
'SQR', 'STICK', 'STR$', 'STRIG', 'STRING$', 'TAB', 'TAN',
'TIME$', 'TIMER', 'UBOUND', 'UCASE$', 'VAL', 'VARPTR',
'VARPTR$', 'VARSEG'
)
metacommands = ('$DYNAMIC', '$INCLUDE', '$STATIC')
operators = ('AND', 'EQV', 'IMP', 'NOT', 'OR', 'XOR')
statements = (
'BEEP', 'BLOAD', 'BSAVE', 'CALL', 'CALL ABSOLUTE',
'CALL INTERRUPT', 'CALLS', 'CHAIN', 'CHDIR', 'CIRCLE', 'CLEAR',
'CLOSE', 'CLS', 'COLOR', 'COM', 'COMMON', 'CONST', 'DATA',
'DATE$', 'DECLARE', 'DEF FN', 'DEF SEG', 'DEFDBL', 'DEFINT',
'DEFLNG', 'DEFSNG', 'DEFSTR', 'DEF', 'DIM', 'DO', 'LOOP',
'DRAW', 'END', 'ENVIRON', 'ERASE', 'ERROR', 'EXIT', 'FIELD',
'FILES', 'FOR', 'NEXT', 'FUNCTION', 'GET', 'GOSUB', 'GOTO',
'IF', 'THEN', 'INPUT', 'INPUT #', 'IOCTL', 'KEY', 'KEY',
'KILL', 'LET', 'LINE', 'LINE INPUT', 'LINE INPUT #', 'LOCATE',
'LOCK', 'UNLOCK', 'LPRINT', 'LSET', 'MID$', 'MKDIR', 'NAME',
'ON COM', 'ON ERROR', 'ON KEY', 'ON PEN', 'ON PLAY',
'ON STRIG', 'ON TIMER', 'ON UEVENT', 'ON', 'OPEN', 'OPEN COM',
'OPTION BASE', 'OUT', 'PAINT', 'PALETTE', 'PCOPY', 'PEN',
'PLAY', 'POKE', 'PRESET', 'PRINT', 'PRINT #', 'PRINT USING',
'PSET', 'PUT', 'PUT', 'RANDOMIZE', 'READ', 'REDIM', 'REM',
'RESET', 'RESTORE', 'RESUME', 'RETURN', 'RMDIR', 'RSET', 'RUN',
'SCREEN', 'SEEK', 'SELECT CASE', 'SHARED', 'SHELL', 'SLEEP',
'SOUND', 'STATIC', 'STOP', 'STRIG', 'SUB', 'SWAP', 'SYSTEM',
'TIME$', 'TIMER', 'TROFF', 'TRON', 'TYPE', 'UEVENT', 'UNLOCK',
'VIEW', 'WAIT', 'WHILE', 'WEND', 'WIDTH', 'WINDOW', 'WRITE'
)
keywords = (
'ACCESS', 'ALIAS', 'ANY', 'APPEND', 'AS', 'BASE', 'BINARY',
'BYVAL', 'CASE', 'CDECL', 'DOUBLE', 'ELSE', 'ELSEIF', 'ENDIF',
'INTEGER', 'IS', 'LIST', 'LOCAL', 'LONG', 'LOOP', 'MOD',
'NEXT', 'OFF', 'ON', 'OUTPUT', 'RANDOM', 'SIGNAL', 'SINGLE',
'STEP', 'STRING', 'THEN', 'TO', 'UNTIL', 'USING', 'WEND'
)
tokens = {
'root': [
(r'\n+', Text),
(r'\s+', Text.Whitespace),
(r'^(\s*)(\d*)(\s*)(REM .*)$',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace,
Comment.Single)),
(r'^(\s*)(\d+)(\s*)',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace)),
(r'(?=[\s]*)(\w+)(?=[\s]*=)', Name.Variable.Global),
(r'(?=[^"]*)\'.*$', Comment.Single),
(r'"[^\n"]*"', String.Double),
(r'(END)(\s+)(FUNCTION|IF|SELECT|SUB)',
bygroups(Keyword.Reserved, Text.Whitespace, Keyword.Reserved)),
(r'(DECLARE)(\s+)([A-Z]+)(\s+)(\S+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name)),
(r'(DIM)(\s+)(SHARED)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name.Variable.Global)),
(r'(DIM)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable.Global)),
(r'^(\s*)([a-zA-Z_]+)(\s*)(\=)',
bygroups(Text.Whitespace, Name.Variable.Global, Text.Whitespace,
Operator)),
(r'(GOTO|GOSUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
(r'(SUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
include('declarations'),
include('functions'),
include('metacommands'),
include('operators'),
include('statements'),
include('keywords'),
(r'[a-zA-Z_]\w*[$@#&!]', Name.Variable.Global),
(r'[a-zA-Z_]\w*\:', Name.Label),
(r'\-?\d*\.\d+[@|#]?', Number.Float),
(r'\-?\d+[@|#]', Number.Float),
(r'\-?\d+#?', Number.Integer.Long),
(r'\-?\d+#?', Number.Integer),
(r'!=|==|:=|\.=|<<|>>|[-~+/\\*%=<>&^|?:!.]', Operator),
(r'[\[\]{}(),;]', Punctuation),
(r'[\w]+', Name.Variable.Global),
],
# can't use regular \b because of X$()
# XXX: use words() here
'declarations': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, declarations)),
Keyword.Declaration),
],
'functions': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, functions)),
Keyword.Reserved),
],
'metacommands': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, metacommands)),
Keyword.Constant),
],
'operators': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, operators)), Operator.Word),
],
'statements': [
(r'\b(%s)\b' % '|'.join(map(re.escape, statements)),
Keyword.Reserved),
],
'keywords': [
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
],
}
def analyse_text(text):
if '$DYNAMIC' in text or '$STATIC' in text:
return 0.9
class VBScriptLexer(RegexLexer):
"""
VBScript is scripting language that is modeled on Visual Basic.
.. versionadded:: 2.4
"""
name = 'VBScript'
aliases = ['vbscript']
filenames = ['*.vbs', '*.VBS']
flags = re.IGNORECASE
tokens = {
'root': [
(r"'[^\n]*", Comment.Single),
(r'\s+', Whitespace),
('"', String.Double, 'string'),
('&h[0-9a-f]+', Number.Hex),
# Float variant 1, for example: 1., 1.e2, 1.2e3
(r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float),
(r'\.[0-9]+(e[+-]?[0-9]+)?', Number.Float), # Float variant 2, for example: .1, .1e2
(r'[0-9]+e[+-]?[0-9]+', Number.Float), # Float variant 3, for example: 123e45
(r'\d+', Number.Integer),
('#.+#', String), # date or time value
(r'(dim)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Variable), 'dim_more'),
(r'(function|sub)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Function)),
(r'(class)(\s+)([a-z_][a-z0-9_]*)', bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(r'(const)(\s+)([a-z_][a-z0-9_]*)', bygroups(Keyword.Declaration, Whitespace, Name.Constant)),
(r'(end)(\s+)(class|function|if|property|sub|with)', bygroups(Keyword, Whitespace, Keyword)),
(r'(on)(\s+)(error)(\s+)(goto)(\s+)(0)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Number.Integer)),
(r'(on)(\s+)(error)(\s+)(resume)(\s+)(next)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Keyword)),
(r'(option)(\s+)(explicit)', bygroups(Keyword, Whitespace, Keyword)),
(r'(property)(\s+)(get|let|set)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Whitespace, Name.Property)),
(r'rem\s.*[^\n]*', Comment.Single),
(words(_vbscript_builtins.KEYWORDS, suffix=r'\b'), Keyword),
(words(_vbscript_builtins.OPERATORS), Operator),
(words(_vbscript_builtins.OPERATOR_WORDS, suffix=r'\b'), Operator.Word),
(words(_vbscript_builtins.BUILTIN_CONSTANTS, suffix=r'\b'), Name.Constant),
(words(_vbscript_builtins.BUILTIN_FUNCTIONS, suffix=r'\b'), Name.Builtin),
(words(_vbscript_builtins.BUILTIN_VARIABLES, suffix=r'\b'), Name.Builtin),
(r'[a-z_][a-z0-9_]*', Name),
(r'\b_\n', Operator),
(words(r'(),.:'), Punctuation),
(r'.+(\n)?', Error)
],
'dim_more': [
(r'(\s*)(,)(\s*)([a-z_][a-z0-9]*)', bygroups(Whitespace, Punctuation, Whitespace, Name.Variable)),
default('#pop'),
],
'string': [
(r'[^"\n]+', String.Double),
(r'\"\"', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, '#pop'), # Unterminated string
],
}
class BBCBasicLexer(RegexLexer):
"""
BBC Basic was supplied on the BBC Micro, and later Acorn RISC OS.
It is also used by BBC Basic For Windows.
.. versionadded:: 2.4
"""
base_keywords = ['OTHERWISE', 'AND', 'DIV', 'EOR', 'MOD', 'OR', 'ERROR',
'LINE', 'OFF', 'STEP', 'SPC', 'TAB', 'ELSE', 'THEN',
'OPENIN', 'PTR', 'PAGE', 'TIME', 'LOMEM', 'HIMEM', 'ABS',
'ACS', 'ADVAL', 'ASC', 'ASN', 'ATN', 'BGET', 'COS', 'COUNT',
'DEG', 'ERL', 'ERR', 'EVAL', 'EXP', 'EXT', 'FALSE', 'FN',
'GET', 'INKEY', 'INSTR', 'INT', 'LEN', 'LN', 'LOG', 'NOT',
'OPENUP', 'OPENOUT', 'PI', 'POINT', 'POS', 'RAD', 'RND',
'SGN', 'SIN', 'SQR', 'TAN', 'TO', 'TRUE', 'USR', 'VAL',
'VPOS', 'CHR$', 'GET$', 'INKEY$', 'LEFT$', 'MID$',
'RIGHT$', 'STR$', 'STRING$', 'EOF', 'PTR', 'PAGE', 'TIME',
'LOMEM', 'HIMEM', 'SOUND', 'BPUT', 'CALL', 'CHAIN', 'CLEAR',
'CLOSE', 'CLG', 'CLS', 'DATA', 'DEF', 'DIM', 'DRAW', 'END',
'ENDPROC', 'ENVELOPE', 'FOR', 'GOSUB', 'GOTO', 'GCOL', 'IF',
'INPUT', 'LET', 'LOCAL', 'MODE', 'MOVE', 'NEXT', 'ON',
'VDU', 'PLOT', 'PRINT', 'PROC', 'READ', 'REM', 'REPEAT',
'REPORT', 'RESTORE', 'RETURN', 'RUN', 'STOP', 'COLOUR',
'TRACE', 'UNTIL', 'WIDTH', 'OSCLI']
basic5_keywords = ['WHEN', 'OF', 'ENDCASE', 'ENDIF', 'ENDWHILE', 'CASE',
'CIRCLE', 'FILL', 'ORIGIN', 'POINT', 'RECTANGLE', 'SWAP',
'WHILE', 'WAIT', 'MOUSE', 'QUIT', 'SYS', 'INSTALL',
'LIBRARY', 'TINT', 'ELLIPSE', 'BEATS', 'TEMPO', 'VOICES',
'VOICE', 'STEREO', 'OVERLAY', 'APPEND', 'AUTO', 'CRUNCH',
'DELETE', 'EDIT', 'HELP', 'LIST', 'LOAD', 'LVAR', 'NEW',
'OLD', 'RENUMBER', 'SAVE', 'TEXTLOAD', 'TEXTSAVE',
'TWIN', 'TWINO', 'INSTALL', 'SUM', 'BEAT']
name = 'BBC Basic'
aliases = ['bbcbasic']
filenames = ['*.bbc']
tokens = {
'root': [
(r"[0-9]+", Name.Label),
(r"(\*)([^\n]*)",
bygroups(Keyword.Pseudo, Comment.Special)),
(r"", Whitespace, 'code'),
],
'code': [
(r"(REM)([^\n]*)",
bygroups(Keyword.Declaration, Comment.Single)),
(r'\n', Whitespace, 'root'),
(r'\s+', Whitespace),
(r':', Comment.Preproc),
# Some special cases to make functions come out nicer
(r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Name.Function)),
(r'(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword, Name.Function)),
(r'(GOTO|GOSUB|THEN|RESTORE)(\s*)(\d+)',
bygroups(Keyword, Whitespace, Name.Label)),
(r'(TRUE|FALSE)', Keyword.Constant),
(r'(PAGE|LOMEM|HIMEM|TIME|WIDTH|ERL|ERR|REPORT\$|POS|VPOS|VOICES)', Keyword.Pseudo),
(words(base_keywords), Keyword),
(words(basic5_keywords), Keyword),
('"', String.Double, 'string'),
('%[01]{1,32}', Number.Bin),
('&[0-9a-f]{1,8}', Number.Hex),
(r'[+-]?[0-9]+\.[0-9]*(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?\.[0-9]+(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?[0-9]+E[+-]?[0-9]+', Number.Float),
(r'[+-]?\d+', Number.Integer),
(r'([A-Za-z_@][\w@]*[%$]?)', Name.Variable),
(r'([+\-]=|[$!|?+\-*/%^=><();]|>=|<=|<>|<<|>>|>>>|,)', Operator),
],
'string': [
(r'[^"\n]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, 'root'), # Unterminated string
],
}
def analyse_text(text):
if text.startswith('10REM >') or text.startswith('REM >'):
return 0.9
| 41.890909 | 110 | 0.458731 |
import re
from testflows._core.contrib.pygments.lexer import RegexLexer, bygroups, default, words, include
from testflows._core.contrib.pygments.token import Comment, Error, Keyword, Name, Number, \
Punctuation, Operator, String, Text, Whitespace
from testflows._core.contrib.pygments.lexers import _vbscript_builtins
__all__ = ['BlitzBasicLexer', 'BlitzMaxLexer', 'MonkeyLexer', 'CbmBasicV2Lexer',
'QBasicLexer', 'VBScriptLexer', 'BBCBasicLexer']
class BlitzMaxLexer(RegexLexer):
name = 'BlitzMax'
aliases = ['blitzmax', 'bmax']
filenames = ['*.bmx']
mimetypes = ['text/x-bmx']
bmax_vopwords = r'\b(Shl|Shr|Sar|Mod)\b'
bmax_sktypes = r'@{1,2}|[!#$%]'
bmax_lktypes = r'\b(Int|Byte|Short|Float|Double|Long)\b'
bmax_name = r'[a-z_]\w*'
bmax_var = (r'(%s)(?:(?:([ \t]*)(%s)|([ \t]*:[ \t]*\b(?:Shl|Shr|Sar|Mod)\b)'
r'|([ \t]*)(:)([ \t]*)(?:%s|(%s)))(?:([ \t]*)(Ptr))?)') % \
(bmax_name, bmax_sktypes, bmax_lktypes, bmax_name)
bmax_func = bmax_var + r'?((?:[ \t]|\.\.\n)*)([(])'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'[ \t]+', Text),
(r'\.\.\n', Text),
(r"'.*?\n", Comment.Single),
(r'([ \t]*)\bRem\n(\n|.)*?\s*\bEnd([ \t]*)Rem', Comment.Multiline),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]*(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(r'(?:(?:(:)?([ \t]*)(:?%s|([+\-*/&|~]))|Or|And|Not|[=<>^]))' %
(bmax_vopwords), Operator),
(r'[(),.:\[\]]', Punctuation),
(r'(?:#[\w \t]*)', Name.Label),
(r'(?:\?[\w \t]*)', Comment.Preproc),
# Identifiers
(r'\b(New)\b([ \t]?)([(]?)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Punctuation, Name.Class)),
(r'\b(Import|Framework|Module)([ \t]+)(%s\.%s)' %
(bmax_name, bmax_name),
bygroups(Keyword.Reserved, Text, Keyword.Namespace)),
(bmax_func, bygroups(Name.Function, Text, Keyword.Type,
Operator, Text, Punctuation, Text,
Keyword.Type, Name.Class, Text,
Keyword.Type, Text, Punctuation)),
(bmax_var, bygroups(Name.Variable, Text, Keyword.Type, Operator,
Text, Punctuation, Text, Keyword.Type,
Name.Class, Text, Keyword.Type)),
(r'\b(Type|Extends)([ \t]+)(%s)' % (bmax_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Ptr)\b', Keyword.Type),
(r'\b(Pi|True|False|Null|Self|Super)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field)\b', Keyword.Declaration),
(words((
'TNullMethodException', 'TNullFunctionException',
'TNullObjectException', 'TArrayBoundsException',
'TRuntimeException'), prefix=r'\b', suffix=r'\b'), Name.Exception),
(words((
'Strict', 'SuperStrict', 'Module', 'ModuleInfo',
'End', 'Return', 'Continue', 'Exit', 'Public', 'Private',
'Var', 'VarPtr', 'Chr', 'Len', 'Asc', 'SizeOf', 'Sgn', 'Abs', 'Min', 'Max',
'New', 'Release', 'Delete', 'Incbin', 'IncbinPtr', 'IncbinLen',
'Framework', 'Include', 'Import', 'Extern', 'EndExtern',
'Function', 'EndFunction', 'Type', 'EndType', 'Extends', 'Method', 'EndMethod',
'Abstract', 'Final', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'EachIn', 'While', 'Wend', 'EndWhile',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default', 'EndSelect',
'Try', 'Catch', 'EndTry', 'Throw', 'Assert', 'Goto', 'DefData', 'ReadData',
'RestoreData'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
(r'(%s)' % (bmax_name), Name.Variable),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '
(r'[^"]+', String.Double),
],
}
class BlitzBasicLexer(RegexLexer):
name = 'BlitzBasic'
aliases = ['blitzbasic', 'b3d', 'bplus']
filenames = ['*.bb', '*.decls']
mimetypes = ['text/x-bb']
bb_sktypes = r'@{1,2}|[#$%]'
bb_name = r'[a-z]\w*'
bb_var = (r'(%s)(?:([ \t]*)(%s)|([ \t]*)([.])([ \t]*)(?:(%s)))?') % \
(bb_name, bb_sktypes, bb_name)
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
# Text
(r'[ \t]+', Text),
# Comments
(r";.*?\n", Comment.Single),
# Data types
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-f]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Other
(words(('Shl', 'Shr', 'Sar', 'Mod', 'Or', 'And', 'Not',
'Abs', 'Sgn', 'Handle', 'Int', 'Float', 'Str',
'First', 'Last', 'Before', 'After'),
prefix=r'\b', suffix=r'\b'),
Operator),
(r'([+\-*/~=<>^])', Operator),
(r'[(),:\[\]\\]', Punctuation),
(r'\.([ \t]*)(%s)' % bb_name, Name.Label),
# Identifiers
(r'\b(New)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(Gosub|Goto)\b([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Label)),
(r'\b(Object)\b([ \t]*)([.])([ \t]*)(%s)\b' % (bb_name),
bygroups(Operator, Text, Punctuation, Text, Name.Class)),
(r'\b%s\b([ \t]*)(\()' % bb_var,
bygroups(Name.Function, Text, Keyword.Type, Text, Punctuation,
Text, Name.Class, Text, Punctuation)),
(r'\b(Function)\b([ \t]+)%s' % bb_var,
bygroups(Keyword.Reserved, Text, Name.Function, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
(r'\b(Type)([ \t]+)(%s)' % (bb_name),
bygroups(Keyword.Reserved, Text, Name.Class)),
# Keywords
(r'\b(Pi|True|False|Null)\b', Keyword.Constant),
(r'\b(Local|Global|Const|Field|Dim)\b', Keyword.Declaration),
(words((
'End', 'Return', 'Exit', 'Chr', 'Len', 'Asc', 'New', 'Delete', 'Insert',
'Include', 'Function', 'Type', 'If', 'Then', 'Else', 'ElseIf', 'EndIf',
'For', 'To', 'Next', 'Step', 'Each', 'While', 'Wend',
'Repeat', 'Until', 'Forever', 'Select', 'Case', 'Default',
'Goto', 'Gosub', 'Data', 'Read', 'Restore'), prefix=r'\b', suffix=r'\b'),
Keyword.Reserved),
# Final resolve (for variable names and such)
# (r'(%s)' % (bb_name), Name.Variable),
(bb_var, bygroups(Name.Variable, Text, Keyword.Type,
Text, Punctuation, Text, Name.Class)),
],
'string': [
(r'""', String.Double),
(r'"C?', String.Double, '#pop'),
(r'[^"]+', String.Double),
],
}
class MonkeyLexer(RegexLexer):
name = 'Monkey'
aliases = ['monkey']
filenames = ['*.monkey']
mimetypes = ['text/x-monkey']
name_variable = r'[a-z_]\w*'
name_function = r'[A-Z]\w*'
name_constant = r'[A-Z_][A-Z0-9_]*'
name_class = r'[A-Z]\w*'
name_module = r'[a-z0-9_]*'
keyword_type = r'(?:Int|Float|String|Bool|Object|Array|Void)'
# ? == Bool // % == Int // # == Float // $ == String
keyword_type_special = r'[?%
flags = re.MULTILINE
tokens = {
'root': [
# Text
(r'\s+', Text),
# Comments
(r"'.*", Comment),
(r'(?i)^#rem\b', Comment.Multiline, 'comment'),
(r'(?i)^(?:#If|#ElseIf|#Else|#EndIf|#End|#Print|#Error)\b', Comment.Preproc),
(r'^#', Comment.Preproc, 'variables'),
('"', String.Double, 'string'),
# Numbers
(r'[0-9]+\.[0-9]*(?!\.)', Number.Float),
(r'\.[0-9]+(?!\.)', Number.Float),
(r'[0-9]+', Number.Integer),
(r'\$[0-9a-fA-Z]+', Number.Hex),
(r'\%[10]+', Number.Bin),
# Native data types
(r'\b%s\b' % keyword_type, Keyword.Type),
# Exception handling
(r'(?i)\b(?:Try|Catch|Throw)\b', Keyword.Reserved),
(r'Throwable', Name.Exception),
# Builtins
(r'(?i)\b(?:Null|True|False)\b', Name.Builtin),
(r'(?i)\b(?:Self|Super)\b', Name.Builtin.Pseudo),
(r'\b(?:HOST|LANG|TARGET|CONFIG)\b', Name.Constant),
# Keywords
(r'(?i)^(Import)(\s+)(.*)(\n)',
bygroups(Keyword.Namespace, Text, Name.Namespace, Text)),
(r'(?i)^Strict\b.*\n', Keyword.Reserved),
(r'(?i)(Const|Local|Global|Field)(\s+)',
bygroups(Keyword.Declaration, Text), 'variables'),
(r'(?i)(New|Class|Interface|Extends|Implements)(\s+)',
bygroups(Keyword.Reserved, Text), 'classname'),
(r'(?i)(Function|Method)(\s+)',
bygroups(Keyword.Reserved, Text), 'funcname'),
(r'(?i)(?:End|Return|Public|Private|Extern|Property|'
r'Final|Abstract)\b', Keyword.Reserved),
# Flow Control stuff
(r'(?i)(?:If|Then|Else|ElseIf|EndIf|'
r'Select|Case|Default|'
r'While|Wend|'
r'Repeat|Until|Forever|'
r'For|To|Until|Step|EachIn|Next|'
r'Exit|Continue)\s+', Keyword.Reserved),
# not used yet
(r'(?i)\b(?:Module|Inline)\b', Keyword.Reserved),
# Array
(r'[\[\]]', Punctuation),
# Other
(r'<=|>=|<>|\*=|/=|\+=|-=|&=|~=|\|=|[-&*/^+=<>|~]', Operator),
(r'(?i)(?:Not|Mod|Shl|Shr|And|Or)', Operator.Word),
(r'[(){}!#,.:]', Punctuation),
# catch the rest
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_function, Name.Function),
(r'%s\b' % name_variable, Name.Variable),
],
'funcname': [
(r'(?i)%s\b' % name_function, Name.Function),
(r':', Punctuation, 'classname'),
(r'\s+', Text),
(r'\(', Punctuation, 'variables'),
(r'\)', Punctuation, '#pop')
],
'classname': [
(r'%s\.' % name_module, Name.Namespace),
(r'%s\b' % keyword_type, Keyword.Type),
(r'%s\b' % name_class, Name.Class),
# array (of given size)
(r'(\[)(\s*)(\d*)(\s*)(\])',
bygroups(Punctuation, Text, Number.Integer, Text, Punctuation)),
# generics
(r'\s+(?!<)', Text, '#pop'),
(r'<', Punctuation, '#push'),
(r'>', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
default('#pop')
],
'variables': [
(r'%s\b' % name_constant, Name.Constant),
(r'%s\b' % name_variable, Name.Variable),
(r'%s' % keyword_type_special, Keyword.Type),
(r'\s+', Text),
(r':', Punctuation, 'classname'),
(r',', Punctuation, '#push'),
default('#pop')
],
'string': [
(r'[^"~]+', String.Double),
(r'~q|~n|~r|~t|~z|~~', String.Escape),
(r'"', String.Double, '#pop'),
],
'comment': [
(r'(?i)^#rem.*?', Comment.Multiline, "
(r'(?i)^#end.*?', Comment.Multiline, "
(r'\n', Comment.Multiline),
(r'.+', Comment.Multiline),
],
}
class CbmBasicV2Lexer(RegexLexer):
name = 'CBM BASIC V2'
aliases = ['cbmbas']
filenames = ['*.bas']
flags = re.IGNORECASE
tokens = {
'root': [
(r'rem.*\n', Comment.Single),
(r'\s+', Text),
(r'new|run|end|for|to|next|step|go(to|sub)?|on|return|stop|cont'
r'|if|then|input#?|read|wait|load|save|verify|poke|sys|print#?'
r'|list|clr|cmd|open|close|get#?', Keyword.Reserved),
(r'data|restore|dim|let|def|fn', Keyword.Declaration),
(r'tab|spc|sgn|int|abs|usr|fre|pos|sqr|rnd|log|exp|cos|sin|tan|atn'
r'|peek|len|val|asc|(str|chr|left|right|mid)\$', Name.Builtin),
(r'[-+*/^<>=]', Operator),
(r'not|and|or', Operator.Word),
(r'"[^"\n]*.', String),
(r'\d+|[-+]?\d*\.\d*(e[-+]?\d+)?', Number.Float),
(r'[(),:;]', Punctuation),
(r'\w+[$%]?', Name),
]
}
def analyse_text(self, text):
# if it starts with a line number, it shouldn't be a "modern" Basic
# like VB.net
if re.match(r'\d+', text):
return 0.2
class QBasicLexer(RegexLexer):
name = 'QBasic'
aliases = ['qbasic', 'basic']
filenames = ['*.BAS', '*.bas']
mimetypes = ['text/basic']
declarations = ('DATA', 'LET')
functions = (
'ABS', 'ASC', 'ATN', 'CDBL', 'CHR$', 'CINT', 'CLNG',
'COMMAND$', 'COS', 'CSNG', 'CSRLIN', 'CVD', 'CVDMBF', 'CVI',
'CVL', 'CVS', 'CVSMBF', 'DATE$', 'ENVIRON$', 'EOF', 'ERDEV',
'ERDEV$', 'ERL', 'ERR', 'EXP', 'FILEATTR', 'FIX', 'FRE',
'FREEFILE', 'HEX$', 'INKEY$', 'INP', 'INPUT$', 'INSTR', 'INT',
'IOCTL$', 'LBOUND', 'LCASE$', 'LEFT$', 'LEN', 'LOC', 'LOF',
'LOG', 'LPOS', 'LTRIM$', 'MID$', 'MKD$', 'MKDMBF$', 'MKI$',
'MKL$', 'MKS$', 'MKSMBF$', 'OCT$', 'PEEK', 'PEN', 'PLAY',
'PMAP', 'POINT', 'POS', 'RIGHT$', 'RND', 'RTRIM$', 'SADD',
'SCREEN', 'SEEK', 'SETMEM', 'SGN', 'SIN', 'SPACE$', 'SPC',
'SQR', 'STICK', 'STR$', 'STRIG', 'STRING$', 'TAB', 'TAN',
'TIME$', 'TIMER', 'UBOUND', 'UCASE$', 'VAL', 'VARPTR',
'VARPTR$', 'VARSEG'
)
metacommands = ('$DYNAMIC', '$INCLUDE', '$STATIC')
operators = ('AND', 'EQV', 'IMP', 'NOT', 'OR', 'XOR')
statements = (
'BEEP', 'BLOAD', 'BSAVE', 'CALL', 'CALL ABSOLUTE',
'CALL INTERRUPT', 'CALLS', 'CHAIN', 'CHDIR', 'CIRCLE', 'CLEAR',
'CLOSE', 'CLS', 'COLOR', 'COM', 'COMMON', 'CONST', 'DATA',
'DATE$', 'DECLARE', 'DEF FN', 'DEF SEG', 'DEFDBL', 'DEFINT',
'DEFLNG', 'DEFSNG', 'DEFSTR', 'DEF', 'DIM', 'DO', 'LOOP',
'DRAW', 'END', 'ENVIRON', 'ERASE', 'ERROR', 'EXIT', 'FIELD',
'FILES', 'FOR', 'NEXT', 'FUNCTION', 'GET', 'GOSUB', 'GOTO',
'IF', 'THEN', 'INPUT', 'INPUT #', 'IOCTL', 'KEY', 'KEY',
'KILL', 'LET', 'LINE', 'LINE INPUT', 'LINE INPUT #', 'LOCATE',
'LOCK', 'UNLOCK', 'LPRINT', 'LSET', 'MID$', 'MKDIR', 'NAME',
'ON COM', 'ON ERROR', 'ON KEY', 'ON PEN', 'ON PLAY',
'ON STRIG', 'ON TIMER', 'ON UEVENT', 'ON', 'OPEN', 'OPEN COM',
'OPTION BASE', 'OUT', 'PAINT', 'PALETTE', 'PCOPY', 'PEN',
'PLAY', 'POKE', 'PRESET', 'PRINT', 'PRINT #', 'PRINT USING',
'PSET', 'PUT', 'PUT', 'RANDOMIZE', 'READ', 'REDIM', 'REM',
'RESET', 'RESTORE', 'RESUME', 'RETURN', 'RMDIR', 'RSET', 'RUN',
'SCREEN', 'SEEK', 'SELECT CASE', 'SHARED', 'SHELL', 'SLEEP',
'SOUND', 'STATIC', 'STOP', 'STRIG', 'SUB', 'SWAP', 'SYSTEM',
'TIME$', 'TIMER', 'TROFF', 'TRON', 'TYPE', 'UEVENT', 'UNLOCK',
'VIEW', 'WAIT', 'WHILE', 'WEND', 'WIDTH', 'WINDOW', 'WRITE'
)
keywords = (
'ACCESS', 'ALIAS', 'ANY', 'APPEND', 'AS', 'BASE', 'BINARY',
'BYVAL', 'CASE', 'CDECL', 'DOUBLE', 'ELSE', 'ELSEIF', 'ENDIF',
'INTEGER', 'IS', 'LIST', 'LOCAL', 'LONG', 'LOOP', 'MOD',
'NEXT', 'OFF', 'ON', 'OUTPUT', 'RANDOM', 'SIGNAL', 'SINGLE',
'STEP', 'STRING', 'THEN', 'TO', 'UNTIL', 'USING', 'WEND'
)
tokens = {
'root': [
(r'\n+', Text),
(r'\s+', Text.Whitespace),
(r'^(\s*)(\d*)(\s*)(REM .*)$',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace,
Comment.Single)),
(r'^(\s*)(\d+)(\s*)',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace)),
(r'(?=[\s]*)(\w+)(?=[\s]*=)', Name.Variable.Global),
(r'(?=[^"]*)\'.*$', Comment.Single),
(r'"[^\n"]*"', String.Double),
(r'(END)(\s+)(FUNCTION|IF|SELECT|SUB)',
bygroups(Keyword.Reserved, Text.Whitespace, Keyword.Reserved)),
(r'(DECLARE)(\s+)([A-Z]+)(\s+)(\S+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name)),
(r'(DIM)(\s+)(SHARED)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name.Variable.Global)),
(r'(DIM)(\s+)([^\s(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable.Global)),
(r'^(\s*)([a-zA-Z_]+)(\s*)(\=)',
bygroups(Text.Whitespace, Name.Variable.Global, Text.Whitespace,
Operator)),
(r'(GOTO|GOSUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
(r'(SUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
include('declarations'),
include('functions'),
include('metacommands'),
include('operators'),
include('statements'),
include('keywords'),
(r'[a-zA-Z_]\w*[$@#&!]', Name.Variable.Global),
(r'[a-zA-Z_]\w*\:', Name.Label),
(r'\-?\d*\.\d+[@|#]?', Number.Float),
(r'\-?\d+[@|#]', Number.Float),
(r'\-?\d+#?', Number.Integer.Long),
(r'\-?\d+#?', Number.Integer),
(r'!=|==|:=|\.=|<<|>>|[-~+/\\*%=<>&^|?:!.]', Operator),
(r'[\[\]{}(),;]', Punctuation),
(r'[\w]+', Name.Variable.Global),
],
# can't use regular \b because of X$()
# XXX: use words() here
'declarations': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, declarations)),
Keyword.Declaration),
],
'functions': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, functions)),
Keyword.Reserved),
],
'metacommands': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, metacommands)),
Keyword.Constant),
],
'operators': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, operators)), Operator.Word),
],
'statements': [
(r'\b(%s)\b' % '|'.join(map(re.escape, statements)),
Keyword.Reserved),
],
'keywords': [
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
],
}
def analyse_text(text):
if '$DYNAMIC' in text or '$STATIC' in text:
return 0.9
class VBScriptLexer(RegexLexer):
name = 'VBScript'
aliases = ['vbscript']
filenames = ['*.vbs', '*.VBS']
flags = re.IGNORECASE
tokens = {
'root': [
(r"'[^\n]*", Comment.Single),
(r'\s+', Whitespace),
('"', String.Double, 'string'),
('&h[0-9a-f]+', Number.Hex),
(r'[0-9]+\.[0-9]*(e[+-]?[0-9]+)?', Number.Float),
(r'\.[0-9]+(e[+-]?[0-9]+)?', Number.Float),
(r'[0-9]+e[+-]?[0-9]+', Number.Float),
(r'\d+', Number.Integer),
('#.+#', String),
(r'(dim)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Variable), 'dim_more'),
(r'(function|sub)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Name.Function)),
(r'(class)(\s+)([a-z_][a-z0-9_]*)', bygroups(Keyword.Declaration, Whitespace, Name.Class)),
(r'(const)(\s+)([a-z_][a-z0-9_]*)', bygroups(Keyword.Declaration, Whitespace, Name.Constant)),
(r'(end)(\s+)(class|function|if|property|sub|with)', bygroups(Keyword, Whitespace, Keyword)),
(r'(on)(\s+)(error)(\s+)(goto)(\s+)(0)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Number.Integer)),
(r'(on)(\s+)(error)(\s+)(resume)(\s+)(next)',
bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword, Whitespace, Keyword)),
(r'(option)(\s+)(explicit)', bygroups(Keyword, Whitespace, Keyword)),
(r'(property)(\s+)(get|let|set)(\s+)([a-z_][a-z0-9_]*)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Whitespace, Name.Property)),
(r'rem\s.*[^\n]*', Comment.Single),
(words(_vbscript_builtins.KEYWORDS, suffix=r'\b'), Keyword),
(words(_vbscript_builtins.OPERATORS), Operator),
(words(_vbscript_builtins.OPERATOR_WORDS, suffix=r'\b'), Operator.Word),
(words(_vbscript_builtins.BUILTIN_CONSTANTS, suffix=r'\b'), Name.Constant),
(words(_vbscript_builtins.BUILTIN_FUNCTIONS, suffix=r'\b'), Name.Builtin),
(words(_vbscript_builtins.BUILTIN_VARIABLES, suffix=r'\b'), Name.Builtin),
(r'[a-z_][a-z0-9_]*', Name),
(r'\b_\n', Operator),
(words(r'(),.:'), Punctuation),
(r'.+(\n)?', Error)
],
'dim_more': [
(r'(\s*)(,)(\s*)([a-z_][a-z0-9]*)', bygroups(Whitespace, Punctuation, Whitespace, Name.Variable)),
default('#pop'),
],
'string': [
(r'[^"\n]+', String.Double),
(r'\"\"', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, '#pop'),
],
}
class BBCBasicLexer(RegexLexer):
base_keywords = ['OTHERWISE', 'AND', 'DIV', 'EOR', 'MOD', 'OR', 'ERROR',
'LINE', 'OFF', 'STEP', 'SPC', 'TAB', 'ELSE', 'THEN',
'OPENIN', 'PTR', 'PAGE', 'TIME', 'LOMEM', 'HIMEM', 'ABS',
'ACS', 'ADVAL', 'ASC', 'ASN', 'ATN', 'BGET', 'COS', 'COUNT',
'DEG', 'ERL', 'ERR', 'EVAL', 'EXP', 'EXT', 'FALSE', 'FN',
'GET', 'INKEY', 'INSTR', 'INT', 'LEN', 'LN', 'LOG', 'NOT',
'OPENUP', 'OPENOUT', 'PI', 'POINT', 'POS', 'RAD', 'RND',
'SGN', 'SIN', 'SQR', 'TAN', 'TO', 'TRUE', 'USR', 'VAL',
'VPOS', 'CHR$', 'GET$', 'INKEY$', 'LEFT$', 'MID$',
'RIGHT$', 'STR$', 'STRING$', 'EOF', 'PTR', 'PAGE', 'TIME',
'LOMEM', 'HIMEM', 'SOUND', 'BPUT', 'CALL', 'CHAIN', 'CLEAR',
'CLOSE', 'CLG', 'CLS', 'DATA', 'DEF', 'DIM', 'DRAW', 'END',
'ENDPROC', 'ENVELOPE', 'FOR', 'GOSUB', 'GOTO', 'GCOL', 'IF',
'INPUT', 'LET', 'LOCAL', 'MODE', 'MOVE', 'NEXT', 'ON',
'VDU', 'PLOT', 'PRINT', 'PROC', 'READ', 'REM', 'REPEAT',
'REPORT', 'RESTORE', 'RETURN', 'RUN', 'STOP', 'COLOUR',
'TRACE', 'UNTIL', 'WIDTH', 'OSCLI']
basic5_keywords = ['WHEN', 'OF', 'ENDCASE', 'ENDIF', 'ENDWHILE', 'CASE',
'CIRCLE', 'FILL', 'ORIGIN', 'POINT', 'RECTANGLE', 'SWAP',
'WHILE', 'WAIT', 'MOUSE', 'QUIT', 'SYS', 'INSTALL',
'LIBRARY', 'TINT', 'ELLIPSE', 'BEATS', 'TEMPO', 'VOICES',
'VOICE', 'STEREO', 'OVERLAY', 'APPEND', 'AUTO', 'CRUNCH',
'DELETE', 'EDIT', 'HELP', 'LIST', 'LOAD', 'LVAR', 'NEW',
'OLD', 'RENUMBER', 'SAVE', 'TEXTLOAD', 'TEXTSAVE',
'TWIN', 'TWINO', 'INSTALL', 'SUM', 'BEAT']
name = 'BBC Basic'
aliases = ['bbcbasic']
filenames = ['*.bbc']
tokens = {
'root': [
(r"[0-9]+", Name.Label),
(r"(\*)([^\n]*)",
bygroups(Keyword.Pseudo, Comment.Special)),
(r"", Whitespace, 'code'),
],
'code': [
(r"(REM)([^\n]*)",
bygroups(Keyword.Declaration, Comment.Single)),
(r'\n', Whitespace, 'root'),
(r'\s+', Whitespace),
(r':', Comment.Preproc),
(r'(DEF)(\s*)(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword.Declaration, Whitespace, Keyword.Declaration, Name.Function)),
(r'(FN|PROC)([A-Za-z_@][\w@]*)',
bygroups(Keyword, Name.Function)),
(r'(GOTO|GOSUB|THEN|RESTORE)(\s*)(\d+)',
bygroups(Keyword, Whitespace, Name.Label)),
(r'(TRUE|FALSE)', Keyword.Constant),
(r'(PAGE|LOMEM|HIMEM|TIME|WIDTH|ERL|ERR|REPORT\$|POS|VPOS|VOICES)', Keyword.Pseudo),
(words(base_keywords), Keyword),
(words(basic5_keywords), Keyword),
('"', String.Double, 'string'),
('%[01]{1,32}', Number.Bin),
('&[0-9a-f]{1,8}', Number.Hex),
(r'[+-]?[0-9]+\.[0-9]*(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?\.[0-9]+(E[+-]?[0-9]+)?', Number.Float),
(r'[+-]?[0-9]+E[+-]?[0-9]+', Number.Float),
(r'[+-]?\d+', Number.Integer),
(r'([A-Za-z_@][\w@]*[%$]?)', Name.Variable),
(r'([+\-]=|[$!|?+\-*/%^=><();]|>=|<=|<>|<<|>>|>>>|,)', Operator),
],
'string': [
(r'[^"\n]+', String.Double),
(r'"', String.Double, '#pop'),
(r'\n', Error, 'root'), # Unterminated string
],
}
def analyse_text(text):
if text.startswith('10REM >') or text.startswith('REM >'):
return 0.9
| true | true |
f7359464763e4b55530c6e96bb22acbd33f087a1 | 375 | py | Python | onnx_tf/handlers/frontend/reduce_sum.py | jsigee87/onnx-tensorflow | 90a58bed24a18f5260ebe9f22bf4997f9d591f62 | [
"Apache-2.0"
] | 18 | 2020-02-03T07:14:40.000Z | 2021-12-20T18:45:43.000Z | onnx_tf/handlers/frontend/reduce_sum.py | jsigee87/onnx-tensorflow | 90a58bed24a18f5260ebe9f22bf4997f9d591f62 | [
"Apache-2.0"
] | 11 | 2020-01-28T23:16:25.000Z | 2022-02-10T01:04:56.000Z | onnx_tf/handlers/frontend/reduce_sum.py | jsigee87/onnx-tensorflow | 90a58bed24a18f5260ebe9f22bf4997f9d591f62 | [
"Apache-2.0"
] | 2 | 2020-08-20T08:15:09.000Z | 2021-02-23T07:30:40.000Z | from onnx_tf.handlers.frontend_handler import FrontendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_op
from .math_mixin import ReductionMixin
@onnx_op("ReduceSum")
@tf_op("Sum")
class ReduceSum(ReductionMixin, FrontendHandler):
@classmethod
def version_1(cls, node, **kwargs):
return cls.reduction_op(node, **kwargs)
| 26.785714 | 61 | 0.8 | from onnx_tf.handlers.frontend_handler import FrontendHandler
from onnx_tf.handlers.handler import onnx_op
from onnx_tf.handlers.handler import tf_op
from .math_mixin import ReductionMixin
@onnx_op("ReduceSum")
@tf_op("Sum")
class ReduceSum(ReductionMixin, FrontendHandler):
@classmethod
def version_1(cls, node, **kwargs):
return cls.reduction_op(node, **kwargs)
| true | true |
f735949721e939de43ac48ed4c1dba57e72e9aed | 3,674 | py | Python | tests/test_words_grammar.py | piller-imre/exprail-python | 1b73a0993d399ef44ada232647845a253b821ecc | [
"MIT"
] | null | null | null | tests/test_words_grammar.py | piller-imre/exprail-python | 1b73a0993d399ef44ada232647845a253b821ecc | [
"MIT"
] | null | null | null | tests/test_words_grammar.py | piller-imre/exprail-python | 1b73a0993d399ef44ada232647845a253b821ecc | [
"MIT"
] | null | null | null | import unittest
from exprail.classifier import Classifier
from exprail.grammar import Grammar
from exprail.parser import Parser
from exprail.source import SourceString
class WsClassifier(Classifier):
"""Classify alphabetic characters and whitespaces"""
@staticmethod
def is_in_class(token_class, token):
"""
Distinguish alphabetic characters and whitespaces.
:param token_class: 'a-Z' or 'ws' set names
:param token: the value of the token
:return: True, when the token is in the class, else False
"""
if token.type == 'empty':
return False
if token_class == 'ws':
return token.value == ' '
else:
return token.value != ' '
class WsParser(Parser):
"""Parse the input text and print the words"""
def __init__(self, grammar, source):
super(WsParser, self).__init__(grammar, source)
self._result = []
@property
def result(self):
return self._result
def operate(self, operation, token):
"""Print the token value on print operation."""
if operation == 'save':
word = ''.join(self._stacks[''])
self._result.append(word)
else:
raise ValueError('The "{}" is an invalid operation!'.format(operation))
class WordsGrammarTest(unittest.TestCase):
"""Words grammar tests with examples"""
def test_empty_source(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r'')
parser = WsParser(grammar, source)
parser.parse()
self.assertEqual(parser.result, [])
def test_single_word(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r'single')
parser = WsParser(grammar, source)
parser.parse()
self.assertEqual(parser.result, ['single'])
def test_multiple_words(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r'Some simple words after each others')
parser = WsParser(grammar, source)
parser.parse()
words = [
'Some', 'simple', 'words', 'after', 'each', 'others'
]
self.assertEqual(parser.result, words)
def test_only_spaces(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r' ')
parser = WsParser(grammar, source)
parser.parse()
self.assertEqual(parser.result, [])
def test_multiple_separator_spaces(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r'Some simple words after each others')
parser = WsParser(grammar, source)
parser.parse()
words = [
'Some', 'simple', 'words', 'after', 'each', 'others'
]
self.assertEqual(parser.result, words)
def test_leading_and_trailing_spaces(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r' Leading and trailing spaces ')
parser = WsParser(grammar, source)
parser.parse()
words = [
'Leading', 'and', 'trailing', 'spaces'
]
self.assertEqual(parser.result, words)
| 34.336449 | 86 | 0.629831 | import unittest
from exprail.classifier import Classifier
from exprail.grammar import Grammar
from exprail.parser import Parser
from exprail.source import SourceString
class WsClassifier(Classifier):
@staticmethod
def is_in_class(token_class, token):
if token.type == 'empty':
return False
if token_class == 'ws':
return token.value == ' '
else:
return token.value != ' '
class WsParser(Parser):
def __init__(self, grammar, source):
super(WsParser, self).__init__(grammar, source)
self._result = []
@property
def result(self):
return self._result
def operate(self, operation, token):
if operation == 'save':
word = ''.join(self._stacks[''])
self._result.append(word)
else:
raise ValueError('The "{}" is an invalid operation!'.format(operation))
class WordsGrammarTest(unittest.TestCase):
def test_empty_source(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r'')
parser = WsParser(grammar, source)
parser.parse()
self.assertEqual(parser.result, [])
def test_single_word(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r'single')
parser = WsParser(grammar, source)
parser.parse()
self.assertEqual(parser.result, ['single'])
def test_multiple_words(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r'Some simple words after each others')
parser = WsParser(grammar, source)
parser.parse()
words = [
'Some', 'simple', 'words', 'after', 'each', 'others'
]
self.assertEqual(parser.result, words)
def test_only_spaces(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r' ')
parser = WsParser(grammar, source)
parser.parse()
self.assertEqual(parser.result, [])
def test_multiple_separator_spaces(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r'Some simple words after each others')
parser = WsParser(grammar, source)
parser.parse()
words = [
'Some', 'simple', 'words', 'after', 'each', 'others'
]
self.assertEqual(parser.result, words)
def test_leading_and_trailing_spaces(self):
ws_classifier = WsClassifier()
grammar = Grammar(filename='grammars/words.grammar', classifier=ws_classifier)
source = SourceString(r' Leading and trailing spaces ')
parser = WsParser(grammar, source)
parser.parse()
words = [
'Leading', 'and', 'trailing', 'spaces'
]
self.assertEqual(parser.result, words)
| true | true |
f73594c6ed5d49a981b35e06fa1e6b1b78ca49b5 | 15,405 | py | Python | SurfaceTopography/Uniform/Filtering.py | ComputationalMechanics/SurfaceTopography | 6751be427c89d526ef4857300409596c79119029 | [
"MIT"
] | 5 | 2020-06-03T20:01:36.000Z | 2021-03-03T09:25:03.000Z | SurfaceTopography/Uniform/Filtering.py | ComputationalMechanics/SurfaceTopography | 6751be427c89d526ef4857300409596c79119029 | [
"MIT"
] | 44 | 2020-06-02T12:25:15.000Z | 2021-03-17T15:18:51.000Z | SurfaceTopography/Uniform/Filtering.py | ComputationalMechanics/SurfaceTopography | 6751be427c89d526ef4857300409596c79119029 | [
"MIT"
] | 1 | 2021-03-08T13:15:18.000Z | 2021-03-08T13:15:18.000Z | #
# Copyright 2020-2021 Lars Pastewka
# 2020-2021 Antoine Sanner
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
from scipy.signal import get_window
from ..FFTTricks import get_window_2D
from ..HeightContainer import UniformTopographyInterface
from ..UniformLineScanAndTopography import DecoratedUniformTopography
class WindowedUniformTopography(DecoratedUniformTopography):
"""
Construct a topography with a window function applied to it.
"""
name = 'windowed_topography'
def __init__(self, topography, window=None, direction=None, info={}):
"""
window : str, optional
Window for eliminating edge effect. See scipy.signal.get_window.
(Default: no window for periodic Topographies, "hann" window for
nonperiodic Topographies)
direction : str, optional
Direction in which the window is applied. Possible options are
'x', 'y' and 'radial'. If set to None, it chooses 'x' for line
scans and 'radial' for topographies. (Default: None)
"""
super().__init__(topography, info=info)
self._window_name = window
self._direction = direction
self._window_data = None
def _make_window(self):
self._window_data = None
n = self.parent_topography.nb_grid_pts
try:
nx, ny = n
except ValueError:
nx, = n
window_name = self._window_name
if not self.parent_topography.is_periodic and window_name is None:
window_name = "hann"
direction = self._direction
if direction is None:
direction = 'x' if self.parent_topography.dim == 1 else 'radial'
# Construct window
if window_name is not None and window_name != 'None':
if direction == 'x':
# Get window from scipy.signal
win = get_window(window_name, nx)
# Normalize window
win *= np.sqrt(nx / (win ** 2).sum())
elif direction == 'y':
if self.parent_topography.dim == 1:
raise ValueError("Direction 'y' does not make sense for line scans.")
# Get window from scipy.signal
win = get_window(window_name, ny)
# Normalize window
win *= np.sqrt(ny / (win ** 2).sum())
elif direction == 'radial':
if self.parent_topography.dim == 1:
raise ValueError("Direction 'radial' does not make sense for line scans.")
win = get_window_2D(window_name, nx, ny,
self.parent_topography.physical_sizes)
# Normalize window
win *= np.sqrt(nx * ny / (win ** 2).sum())
else:
raise ValueError(f"Unknown direction '{self._direction}'.")
self._window_data = win
def __getstate__(self):
""" is called and the returned object is pickled as the contents for
the instance
"""
state = super().__getstate__(), \
self._window_name, self._direction
return state
def __setstate__(self, state):
""" Upon unpickling, it is called with the unpickled state
Keyword Arguments:
state -- result of __getstate__
"""
superstate, self._window_name, self._direction = state
super().__setstate__(superstate)
@property
def window_data(self):
if self._window_data is None:
self._make_window()
return self._window_data
def heights(self):
""" Computes the windowed topography.
"""
if self.window_data is None:
return self.parent_topography.heights()
else:
direction = self._direction
if direction is None:
direction = 'x' if self.parent_topography.dim == 1 else 'radial'
if direction == 'x':
return (self.window_data * self.parent_topography.heights().T).T
elif direction == 'y' or direction == 'radial':
return self.window_data * self.parent_topography.heights()
else:
raise ValueError(f"Unknown direction '{self._direction}'.")
class FilteredUniformTopography(DecoratedUniformTopography):
name = 'filtered_topography'
def __init__(self, topography,
filter_function=lambda qx, qy: (np.abs(qx) <= 1) * np.abs(qy) <= 1,
isotropic=True,
info={}):
if not topography.is_periodic:
raise ValueError("only implemented for periodic topographies")
super().__init__(topography, info=info)
self._filter_function = filter_function
self._is_filter_isotropic = isotropic
# TODO: should be deductible from the filter function signature
def __getstate__(self):
""" is called and the returned object is pickled as the contents for
the instance
"""
state = super().__getstate__(), \
self._filter_function, self._is_filter_isotropic
return state
def __setstate__(self, state):
""" Upon unpickling, it is called with the unpickled state
Keyword Arguments:
state -- result of __getstate__
"""
superstate, self._filter_function, self._is_filter_isotropic = state
super().__setstate__(superstate)
@property
def is_filter_isotropic(self):
return self._is_filter_isotropic
def filter_function(self, *args):
"""
Parameters
----------
if dim = 2 and filter is not isotropic
qx, qy
if dim = 1
q
"""
if self.dim == 2 and not self.is_filter_isotropic \
and len(args) != 2:
raise ("ValueError: qx, qy expected")
elif self.dim == 1 and len(args) != 1:
raise ("ValueError: q expected")
return self._filter_function(*args)
def heights(self):
if self.dim == 2:
nx, ny = self.parent_topography.nb_grid_pts
sx, sy = self.parent_topography.physical_sizes
qx = np.arange(0, nx, dtype=np.float64).reshape(-1, 1)
qx = np.where(qx <= nx // 2, qx / sx, (qx - nx) / sx)
qx *= 2 * np.pi
qy = np.arange(0, ny // 2 + 1, dtype=np.float64).reshape(1, -1)
qy *= 2 * np.pi / sy
if self.is_filter_isotropic:
h_qs = np.fft.irfftn(np.fft.rfftn(self.parent_topography.heights()) *
self.filter_function(np.sqrt(qx ** 2 + qy ** 2)))
else:
h_qs = np.fft.irfftn(np.fft.rfftn(self.parent_topography.heights()) *
self.filter_function(qx, qy))
return h_qs
elif self.dim == 1:
s, = self.parent_topography.physical_sizes
n, = self.parent_topography.nb_grid_pts
q = abs(2 * np.pi * np.fft.rfftfreq(n, s / n))
h = self.parent_topography.heights()
h_q = np.fft.rfft(h)
h_q_filtered = np.fft.irfft(h_q * self.filter_function(q))
# Max_imaginary = np.max(np.imag(shifted_pot))
# assert Max_imaginary < 1e-14 *np.max(np.real(shifted_pot)) ,
# f"{Max_imaginary}"
return np.real(h_q_filtered)
class ShortCutTopography(FilteredUniformTopography):
name = 'shortcut_filtered_topography'
def __init__(self, topography,
cutoff_wavevector=None, cutoff_wavelength=None,
kind="circular step",
info={}):
r"""Applies a short wavelength cut filter to the topography using fft.
for `kind=="circular step"` (default), parts of the spectrum with
`|q| > cutoff_wavevector` are set to zero
for `kind=="square step"`, parts of the spectrum with
`q_x > cutoff_wavevector or q_y > cutoff_wavevector ` are set to zero
either `cutoff_wavelength` or
`cutoff_wavevector` :math:`= 2 pi /` `cutoff_wavelength`
have to be provided.
Parameters
----------
topography: Topography
cutoff_wavevector: float
highest wavevector
cutoff_wavelength: float
shortest wavelength
kind: {"circular step", "square step"}
Returns
-------
Topography with filtered heights
Examples
--------
>>> topography.shortcut(cutoff_wavevector=2 * np.pi / l)
>>> topography.shortcut(cutoff_wavelength=l) # equivalent
"""
if not topography.is_periodic:
raise ValueError("only implemented for periodic topographies")
if cutoff_wavelength is None:
if cutoff_wavevector is not None:
cutoff_wavelength = 2 * np.pi / cutoff_wavevector
else:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
elif cutoff_wavevector is not None:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
self._cutoff_wavelength = cutoff_wavelength
self._kind = kind
def circular_step(q):
return q <= self.cutoff_wavevector
def square_step(qx, qy):
return (np.abs(qx) <= self.cutoff_wavevector) * (
np.abs(qy) <= self.cutoff_wavevector)
if self._kind == "circular step":
super().__init__(topography, info=info,
filter_function=circular_step)
elif self._kind == "square step":
super().__init__(topography, info=info,
filter_function=square_step, isotropic=False)
else:
raise ValueError("Invalid kind")
@property
def cutoff_wavevector(self):
return 2 * np.pi / self._cutoff_wavelength
@property
def cutoff_wavelength(self):
return self._cutoff_wavelength
def __getstate__(self):
""" is called and the returned object is pickled as the contents for
the instance
"""
state = super().__getstate__(), self._filter_function, \
self._kind, self._cutoff_wavelength
return state
def __setstate__(self, state):
""" Upon unpickling, it is called with the unpickled state
Keyword Arguments:
state -- result of __getstate__
"""
superstate, self._filter_function, self._kind, \
self._cutoff_wavelength = state
super().__setstate__(superstate)
class LongCutTopography(FilteredUniformTopography):
name = 'longcut_filtered_topography'
def __init__(self, topography,
cutoff_wavevector=None, cutoff_wavelength=None,
kind="circular step",
info={}):
r"""Applies a long wavelength cut filter to the topography using fft.
for `kind=="circular step"` (default), parts of the spectrum with
`|q| < cutoff_wavevector` are set to zero
for `kind=="square step"`, parts of the spectrum with
`q_x < cutoff_wavevector or q_y < cutoff_wavevector ` are set to zero
either `cutoff_wavelength` or
`cutoff_wavevector` :math:`= 2 pi /` `cutoff_wavelength`
have to be provided.
Parameters
----------
topography: Topography
cutoff_wavevector: float
highest wavevector
cutoff_wavelength: float
shortest wavelength
kind: {"circular step", "square step"}
Returns
-------
Topography with filtered heights
Examples
--------
>>> topography.longcut(cutoff_wavevector=2 * np.pi / l)
>>> topography.longcut(cutoff_wavelength=l) # equivalent
"""
if not topography.is_periodic:
raise ValueError("only implemented for periodic topographies")
if cutoff_wavelength is None:
if cutoff_wavevector is not None:
cutoff_wavelength = 2 * np.pi / cutoff_wavevector
else:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
elif cutoff_wavevector is not None:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
self._cutoff_wavelength = cutoff_wavelength
self._kind = kind
def circular_step(q):
return q >= self.cutoff_wavevector
def square_step(qx, qy):
return (np.abs(qx) >= self.cutoff_wavevector) * (
np.abs(qy) >= self.cutoff_wavevector)
if self._kind == "circular step":
super().__init__(topography, info=info,
filter_function=circular_step)
elif self._kind == "square step":
super().__init__(topography, info=info,
filter_function=square_step, isotropic=False)
else:
raise ValueError("Invalid kind")
@property
def cutoff_wavevector(self):
return 2 * np.pi / self._cutoff_wavelength
@property
def cutoff_wavelength(self):
return self._cutoff_wavelength
def __getstate__(self):
""" is called and the returned object is pickled as the contents for
the instance
"""
state = super().__getstate__(), self._filter_function, \
self._kind, self._cutoff_wavelength
return state
def __setstate__(self, state):
""" Upon unpickling, it is called with the unpickled state
Keyword Arguments:
state -- result of __getstate__
"""
superstate, self._filter_function, self._kind, \
self._cutoff_wavelength = state
super().__setstate__(superstate)
UniformTopographyInterface.register_function("window", WindowedUniformTopography)
UniformTopographyInterface.register_function("filter", FilteredUniformTopography)
UniformTopographyInterface.register_function("shortcut", ShortCutTopography)
UniformTopographyInterface.register_function("longcut", LongCutTopography)
| 35.909091 | 94 | 0.6037 |
om scipy.signal import get_window
from ..FFTTricks import get_window_2D
from ..HeightContainer import UniformTopographyInterface
from ..UniformLineScanAndTopography import DecoratedUniformTopography
class WindowedUniformTopography(DecoratedUniformTopography):
name = 'windowed_topography'
def __init__(self, topography, window=None, direction=None, info={}):
super().__init__(topography, info=info)
self._window_name = window
self._direction = direction
self._window_data = None
def _make_window(self):
self._window_data = None
n = self.parent_topography.nb_grid_pts
try:
nx, ny = n
except ValueError:
nx, = n
window_name = self._window_name
if not self.parent_topography.is_periodic and window_name is None:
window_name = "hann"
direction = self._direction
if direction is None:
direction = 'x' if self.parent_topography.dim == 1 else 'radial'
if window_name is not None and window_name != 'None':
if direction == 'x':
win = get_window(window_name, nx)
win *= np.sqrt(nx / (win ** 2).sum())
elif direction == 'y':
if self.parent_topography.dim == 1:
raise ValueError("Direction 'y' does not make sense for line scans.")
win = get_window(window_name, ny)
win *= np.sqrt(ny / (win ** 2).sum())
elif direction == 'radial':
if self.parent_topography.dim == 1:
raise ValueError("Direction 'radial' does not make sense for line scans.")
win = get_window_2D(window_name, nx, ny,
self.parent_topography.physical_sizes)
win *= np.sqrt(nx * ny / (win ** 2).sum())
else:
raise ValueError(f"Unknown direction '{self._direction}'.")
self._window_data = win
def __getstate__(self):
state = super().__getstate__(), \
self._window_name, self._direction
return state
def __setstate__(self, state):
superstate, self._window_name, self._direction = state
super().__setstate__(superstate)
@property
def window_data(self):
if self._window_data is None:
self._make_window()
return self._window_data
def heights(self):
if self.window_data is None:
return self.parent_topography.heights()
else:
direction = self._direction
if direction is None:
direction = 'x' if self.parent_topography.dim == 1 else 'radial'
if direction == 'x':
return (self.window_data * self.parent_topography.heights().T).T
elif direction == 'y' or direction == 'radial':
return self.window_data * self.parent_topography.heights()
else:
raise ValueError(f"Unknown direction '{self._direction}'.")
class FilteredUniformTopography(DecoratedUniformTopography):
name = 'filtered_topography'
def __init__(self, topography,
filter_function=lambda qx, qy: (np.abs(qx) <= 1) * np.abs(qy) <= 1,
isotropic=True,
info={}):
if not topography.is_periodic:
raise ValueError("only implemented for periodic topographies")
super().__init__(topography, info=info)
self._filter_function = filter_function
self._is_filter_isotropic = isotropic
def __getstate__(self):
state = super().__getstate__(), \
self._filter_function, self._is_filter_isotropic
return state
def __setstate__(self, state):
superstate, self._filter_function, self._is_filter_isotropic = state
super().__setstate__(superstate)
@property
def is_filter_isotropic(self):
return self._is_filter_isotropic
def filter_function(self, *args):
if self.dim == 2 and not self.is_filter_isotropic \
and len(args) != 2:
raise ("ValueError: qx, qy expected")
elif self.dim == 1 and len(args) != 1:
raise ("ValueError: q expected")
return self._filter_function(*args)
def heights(self):
if self.dim == 2:
nx, ny = self.parent_topography.nb_grid_pts
sx, sy = self.parent_topography.physical_sizes
qx = np.arange(0, nx, dtype=np.float64).reshape(-1, 1)
qx = np.where(qx <= nx // 2, qx / sx, (qx - nx) / sx)
qx *= 2 * np.pi
qy = np.arange(0, ny // 2 + 1, dtype=np.float64).reshape(1, -1)
qy *= 2 * np.pi / sy
if self.is_filter_isotropic:
h_qs = np.fft.irfftn(np.fft.rfftn(self.parent_topography.heights()) *
self.filter_function(np.sqrt(qx ** 2 + qy ** 2)))
else:
h_qs = np.fft.irfftn(np.fft.rfftn(self.parent_topography.heights()) *
self.filter_function(qx, qy))
return h_qs
elif self.dim == 1:
s, = self.parent_topography.physical_sizes
n, = self.parent_topography.nb_grid_pts
q = abs(2 * np.pi * np.fft.rfftfreq(n, s / n))
h = self.parent_topography.heights()
h_q = np.fft.rfft(h)
h_q_filtered = np.fft.irfft(h_q * self.filter_function(q))
return np.real(h_q_filtered)
class ShortCutTopography(FilteredUniformTopography):
name = 'shortcut_filtered_topography'
def __init__(self, topography,
cutoff_wavevector=None, cutoff_wavelength=None,
kind="circular step",
info={}):
if not topography.is_periodic:
raise ValueError("only implemented for periodic topographies")
if cutoff_wavelength is None:
if cutoff_wavevector is not None:
cutoff_wavelength = 2 * np.pi / cutoff_wavevector
else:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
elif cutoff_wavevector is not None:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
self._cutoff_wavelength = cutoff_wavelength
self._kind = kind
def circular_step(q):
return q <= self.cutoff_wavevector
def square_step(qx, qy):
return (np.abs(qx) <= self.cutoff_wavevector) * (
np.abs(qy) <= self.cutoff_wavevector)
if self._kind == "circular step":
super().__init__(topography, info=info,
filter_function=circular_step)
elif self._kind == "square step":
super().__init__(topography, info=info,
filter_function=square_step, isotropic=False)
else:
raise ValueError("Invalid kind")
@property
def cutoff_wavevector(self):
return 2 * np.pi / self._cutoff_wavelength
@property
def cutoff_wavelength(self):
return self._cutoff_wavelength
def __getstate__(self):
state = super().__getstate__(), self._filter_function, \
self._kind, self._cutoff_wavelength
return state
def __setstate__(self, state):
superstate, self._filter_function, self._kind, \
self._cutoff_wavelength = state
super().__setstate__(superstate)
class LongCutTopography(FilteredUniformTopography):
name = 'longcut_filtered_topography'
def __init__(self, topography,
cutoff_wavevector=None, cutoff_wavelength=None,
kind="circular step",
info={}):
if not topography.is_periodic:
raise ValueError("only implemented for periodic topographies")
if cutoff_wavelength is None:
if cutoff_wavevector is not None:
cutoff_wavelength = 2 * np.pi / cutoff_wavevector
else:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
elif cutoff_wavevector is not None:
raise ValueError("cutoff_wavevector "
"or cutoff_wavelength should be provided")
self._cutoff_wavelength = cutoff_wavelength
self._kind = kind
def circular_step(q):
return q >= self.cutoff_wavevector
def square_step(qx, qy):
return (np.abs(qx) >= self.cutoff_wavevector) * (
np.abs(qy) >= self.cutoff_wavevector)
if self._kind == "circular step":
super().__init__(topography, info=info,
filter_function=circular_step)
elif self._kind == "square step":
super().__init__(topography, info=info,
filter_function=square_step, isotropic=False)
else:
raise ValueError("Invalid kind")
@property
def cutoff_wavevector(self):
return 2 * np.pi / self._cutoff_wavelength
@property
def cutoff_wavelength(self):
return self._cutoff_wavelength
def __getstate__(self):
state = super().__getstate__(), self._filter_function, \
self._kind, self._cutoff_wavelength
return state
def __setstate__(self, state):
superstate, self._filter_function, self._kind, \
self._cutoff_wavelength = state
super().__setstate__(superstate)
UniformTopographyInterface.register_function("window", WindowedUniformTopography)
UniformTopographyInterface.register_function("filter", FilteredUniformTopography)
UniformTopographyInterface.register_function("shortcut", ShortCutTopography)
UniformTopographyInterface.register_function("longcut", LongCutTopography)
| true | true |
f73596d35f54a80081eb51834b2498b60a9102ed | 8,322 | py | Python | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/french/holiday_parser_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 10 | 2019-05-11T18:07:14.000Z | 2021-08-20T03:02:47.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/french/holiday_parser_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 1 | 2020-07-10T08:25:36.000Z | 2020-07-10T08:25:36.000Z | Python/libraries/recognizers-date-time/recognizers_date_time/date_time/french/holiday_parser_config.py | ahmedabuamra/Recognizers-Text | 31193d89d3532839742992a2755c1d8539c68116 | [
"MIT"
] | 18 | 2019-08-19T12:11:00.000Z | 2021-10-12T09:36:27.000Z | from typing import List, Dict, Callable
from datetime import datetime
from recognizers_text.utilities import RegExpUtility
from ..utilities import DateUtils
from ..base_holiday import BaseHolidayParserConfiguration
from ...resources.french_date_time import FrenchDateTime
class FrenchHolidayParserConfiguration(BaseHolidayParserConfiguration):
@property
def holiday_names(self) -> Dict[str, List[str]]:
return self._holiday_names
@property
def holiday_regex_list(self) -> List[str]:
return self._holiday_regexes
@property
def holiday_func_dictionary(self) -> Dict[str, Callable[[int], datetime]]:
return self._holiday_func_dictionary
def __init__(self, config):
super().__init__()
self._holiday_regexes = [
RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex1),
RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex2),
RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex3),
RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex4)
]
self._holiday_names = FrenchDateTime.HolidayNames
#self._variable_holidays_timex_dictionary = FrenchDateTime.VariableHolidaysTimexDictionary
def _init_holiday_funcs(self) -> Dict[str, Callable[[int], datetime]]:
local = dict([
('maosbirthday', FrenchHolidayParserConfiguration.mao_birthday),
('yuandan', FrenchHolidayParserConfiguration.new_year),
('teachersday', FrenchHolidayParserConfiguration.teacher_day),
('singleday', FrenchHolidayParserConfiguration.singles_day),
('allsaintsday', FrenchHolidayParserConfiguration.halloween_day),
('youthday', FrenchHolidayParserConfiguration.youth_day),
('childrenday', FrenchHolidayParserConfiguration.children_day),
('femaleday', FrenchHolidayParserConfiguration.female_day),
('treeplantingday', FrenchHolidayParserConfiguration.tree_plant_day),
('arborday', FrenchHolidayParserConfiguration.tree_plant_day),
('girlsday', FrenchHolidayParserConfiguration.girls_day),
('whiteloverday', FrenchHolidayParserConfiguration.white_lover_day),
('loverday', FrenchHolidayParserConfiguration.valentines_day),
('christmas', FrenchHolidayParserConfiguration.christmas_day),
('xmas', FrenchHolidayParserConfiguration.christmas_day),
('newyear', FrenchHolidayParserConfiguration.new_year),
('newyearday', FrenchHolidayParserConfiguration.new_year),
('newyearsday', FrenchHolidayParserConfiguration.new_year),
('inaugurationday', FrenchHolidayParserConfiguration.inauguration_day),
('groundhougday', FrenchHolidayParserConfiguration.groundhog_day),
('valentinesday', FrenchHolidayParserConfiguration.valentines_day),
('stpatrickday', FrenchHolidayParserConfiguration.st_patrick_day),
('aprilfools', FrenchHolidayParserConfiguration.fool_day),
('stgeorgeday', FrenchHolidayParserConfiguration.st_george_day),
('mayday', FrenchHolidayParserConfiguration.mayday),
('cincodemayoday', FrenchHolidayParserConfiguration.cinco_de_mayo_day),
('baptisteday', FrenchHolidayParserConfiguration.baptiste_day),
('usindependenceday', FrenchHolidayParserConfiguration.usa_independence_day),
('independenceday', FrenchHolidayParserConfiguration.usa_independence_day),
('bastilleday', FrenchHolidayParserConfiguration.bastille_day),
('halloweenday', FrenchHolidayParserConfiguration.halloween_day),
('allhallowday', FrenchHolidayParserConfiguration.all_hallow_day),
('allsoulsday', FrenchHolidayParserConfiguration.all_souls_day),
('guyfawkesday', FrenchHolidayParserConfiguration.guyfawkes_day),
('veteransday', FrenchHolidayParserConfiguration.veterans_day),
('christmaseve', FrenchHolidayParserConfiguration.christmas_eve),
('newyeareve', FrenchHolidayParserConfiguration.new_year_eve),
('fathersday', FrenchHolidayParserConfiguration.fathers_day),
('mothersday', FrenchHolidayParserConfiguration.mothers_day),
('labourday', FrenchHolidayParserConfiguration.labour_day)
])
return {**super()._init_holiday_funcs(), **local}
@staticmethod
def new_year(year: int) -> datetime:
return datetime(year, 1, 1)
@staticmethod
def new_year_eve(year: int) -> datetime:
return datetime(year, 12, 31)
@staticmethod
def christmas_day(year: int) -> datetime:
return datetime(year, 12, 25)
@staticmethod
def christmas_eve(year: int) -> datetime:
return datetime(year, 12, 24)
@staticmethod
def female_day(year: int) -> datetime:
return datetime(year, 3, 8)
@staticmethod
def children_day(year: int) -> datetime:
return datetime(year, 6, 1)
@staticmethod
def halloween_day(year: int) -> datetime:
return datetime(year, 10, 31)
@staticmethod
def easter_day(year: int) -> datetime:
return DateUtils.min_value
@staticmethod
def valentines_day(year: int) -> datetime:
return datetime(year, 2, 14)
@staticmethod
def white_lover_day(year: int) -> datetime:
return datetime(year, 3, 14)
@staticmethod
def fool_day(year: int) -> datetime:
return datetime(year, 4, 1)
@staticmethod
def girls_day(year: int) -> datetime:
return datetime(year, 3, 7)
@staticmethod
def tree_plant_day(year: int) -> datetime:
return datetime(year, 3, 12)
@staticmethod
def youth_day(year: int) -> datetime:
return datetime(year, 5, 4)
@staticmethod
def teacher_day(year: int) -> datetime:
return datetime(year, 9, 10)
@staticmethod
def singles_day(year: int) -> datetime:
return datetime(year, 11, 11)
@staticmethod
def mao_birthday(year: int) -> datetime:
return datetime(year, 12, 26)
@staticmethod
def inauguration_day(year: int) -> datetime:
return datetime(year, 1, 20)
@staticmethod
def groundhog_day(year: int) -> datetime:
return datetime(year, 2, 2)
@staticmethod
def st_patrick_day(year: int) -> datetime:
return datetime(year, 3, 17)
@staticmethod
def st_george_day(year: int) -> datetime:
return datetime(year, 4, 23)
@staticmethod
def mayday(year: int) -> datetime:
return datetime(year, 5, 1)
@staticmethod
def cinco_de_mayo_day(year: int) -> datetime:
return datetime(year, 5, 5)
@staticmethod
def baptiste_day(year: int) -> datetime:
return datetime(year, 6, 24)
@staticmethod
def usa_independence_day(year: int) -> datetime:
return datetime(year, 7, 4)
@staticmethod
def bastille_day(year: int) -> datetime:
return datetime(year, 7, 14)
@staticmethod
def all_hallow_day(year: int) -> datetime:
return datetime(year, 11, 1)
@staticmethod
def all_souls_day(year: int) -> datetime:
return datetime(year, 11, 2)
@staticmethod
def guyfawkes_day(year: int) -> datetime:
return datetime(year, 11, 5)
@staticmethod
def veterans_day(year: int) -> datetime:
return datetime(year, 11, 11)
@staticmethod
def fathers_day(year: int) -> datetime:
return datetime(year, 6, 17)
@staticmethod
def mothers_day(year: int) -> datetime:
return datetime(year, 5, 27)
@staticmethod
def labour_day(year: int) -> datetime:
return datetime(year, 5, 1)
def get_swift_year(self, text: str) -> int:
trimmed_text = text.strip().lower()
swift = -10
if trimmed_text.endswith('prochain'): # next - 'l'annee prochain'
swift = 1
if trimmed_text.endswith('dernier'): # last - 'l'annee dernier'
swift = -1
if trimmed_text.startswith('cette'): # this - 'cette annees'
swift = 0
return swift
def sanitize_holiday_token(self, holiday: str) -> str:
return holiday.replace(' ', '').replace('\'', '')
| 36.340611 | 98 | 0.674718 | from typing import List, Dict, Callable
from datetime import datetime
from recognizers_text.utilities import RegExpUtility
from ..utilities import DateUtils
from ..base_holiday import BaseHolidayParserConfiguration
from ...resources.french_date_time import FrenchDateTime
class FrenchHolidayParserConfiguration(BaseHolidayParserConfiguration):
@property
def holiday_names(self) -> Dict[str, List[str]]:
return self._holiday_names
@property
def holiday_regex_list(self) -> List[str]:
return self._holiday_regexes
@property
def holiday_func_dictionary(self) -> Dict[str, Callable[[int], datetime]]:
return self._holiday_func_dictionary
def __init__(self, config):
super().__init__()
self._holiday_regexes = [
RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex1),
RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex2),
RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex3),
RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex4)
]
self._holiday_names = FrenchDateTime.HolidayNames
def _init_holiday_funcs(self) -> Dict[str, Callable[[int], datetime]]:
local = dict([
('maosbirthday', FrenchHolidayParserConfiguration.mao_birthday),
('yuandan', FrenchHolidayParserConfiguration.new_year),
('teachersday', FrenchHolidayParserConfiguration.teacher_day),
('singleday', FrenchHolidayParserConfiguration.singles_day),
('allsaintsday', FrenchHolidayParserConfiguration.halloween_day),
('youthday', FrenchHolidayParserConfiguration.youth_day),
('childrenday', FrenchHolidayParserConfiguration.children_day),
('femaleday', FrenchHolidayParserConfiguration.female_day),
('treeplantingday', FrenchHolidayParserConfiguration.tree_plant_day),
('arborday', FrenchHolidayParserConfiguration.tree_plant_day),
('girlsday', FrenchHolidayParserConfiguration.girls_day),
('whiteloverday', FrenchHolidayParserConfiguration.white_lover_day),
('loverday', FrenchHolidayParserConfiguration.valentines_day),
('christmas', FrenchHolidayParserConfiguration.christmas_day),
('xmas', FrenchHolidayParserConfiguration.christmas_day),
('newyear', FrenchHolidayParserConfiguration.new_year),
('newyearday', FrenchHolidayParserConfiguration.new_year),
('newyearsday', FrenchHolidayParserConfiguration.new_year),
('inaugurationday', FrenchHolidayParserConfiguration.inauguration_day),
('groundhougday', FrenchHolidayParserConfiguration.groundhog_day),
('valentinesday', FrenchHolidayParserConfiguration.valentines_day),
('stpatrickday', FrenchHolidayParserConfiguration.st_patrick_day),
('aprilfools', FrenchHolidayParserConfiguration.fool_day),
('stgeorgeday', FrenchHolidayParserConfiguration.st_george_day),
('mayday', FrenchHolidayParserConfiguration.mayday),
('cincodemayoday', FrenchHolidayParserConfiguration.cinco_de_mayo_day),
('baptisteday', FrenchHolidayParserConfiguration.baptiste_day),
('usindependenceday', FrenchHolidayParserConfiguration.usa_independence_day),
('independenceday', FrenchHolidayParserConfiguration.usa_independence_day),
('bastilleday', FrenchHolidayParserConfiguration.bastille_day),
('halloweenday', FrenchHolidayParserConfiguration.halloween_day),
('allhallowday', FrenchHolidayParserConfiguration.all_hallow_day),
('allsoulsday', FrenchHolidayParserConfiguration.all_souls_day),
('guyfawkesday', FrenchHolidayParserConfiguration.guyfawkes_day),
('veteransday', FrenchHolidayParserConfiguration.veterans_day),
('christmaseve', FrenchHolidayParserConfiguration.christmas_eve),
('newyeareve', FrenchHolidayParserConfiguration.new_year_eve),
('fathersday', FrenchHolidayParserConfiguration.fathers_day),
('mothersday', FrenchHolidayParserConfiguration.mothers_day),
('labourday', FrenchHolidayParserConfiguration.labour_day)
])
return {**super()._init_holiday_funcs(), **local}
@staticmethod
def new_year(year: int) -> datetime:
return datetime(year, 1, 1)
@staticmethod
def new_year_eve(year: int) -> datetime:
return datetime(year, 12, 31)
@staticmethod
def christmas_day(year: int) -> datetime:
return datetime(year, 12, 25)
@staticmethod
def christmas_eve(year: int) -> datetime:
return datetime(year, 12, 24)
@staticmethod
def female_day(year: int) -> datetime:
return datetime(year, 3, 8)
@staticmethod
def children_day(year: int) -> datetime:
return datetime(year, 6, 1)
@staticmethod
def halloween_day(year: int) -> datetime:
return datetime(year, 10, 31)
@staticmethod
def easter_day(year: int) -> datetime:
return DateUtils.min_value
@staticmethod
def valentines_day(year: int) -> datetime:
return datetime(year, 2, 14)
@staticmethod
def white_lover_day(year: int) -> datetime:
return datetime(year, 3, 14)
@staticmethod
def fool_day(year: int) -> datetime:
return datetime(year, 4, 1)
@staticmethod
def girls_day(year: int) -> datetime:
return datetime(year, 3, 7)
@staticmethod
def tree_plant_day(year: int) -> datetime:
return datetime(year, 3, 12)
@staticmethod
def youth_day(year: int) -> datetime:
return datetime(year, 5, 4)
@staticmethod
def teacher_day(year: int) -> datetime:
return datetime(year, 9, 10)
@staticmethod
def singles_day(year: int) -> datetime:
return datetime(year, 11, 11)
@staticmethod
def mao_birthday(year: int) -> datetime:
return datetime(year, 12, 26)
@staticmethod
def inauguration_day(year: int) -> datetime:
return datetime(year, 1, 20)
@staticmethod
def groundhog_day(year: int) -> datetime:
return datetime(year, 2, 2)
@staticmethod
def st_patrick_day(year: int) -> datetime:
return datetime(year, 3, 17)
@staticmethod
def st_george_day(year: int) -> datetime:
return datetime(year, 4, 23)
@staticmethod
def mayday(year: int) -> datetime:
return datetime(year, 5, 1)
@staticmethod
def cinco_de_mayo_day(year: int) -> datetime:
return datetime(year, 5, 5)
@staticmethod
def baptiste_day(year: int) -> datetime:
return datetime(year, 6, 24)
@staticmethod
def usa_independence_day(year: int) -> datetime:
return datetime(year, 7, 4)
@staticmethod
def bastille_day(year: int) -> datetime:
return datetime(year, 7, 14)
@staticmethod
def all_hallow_day(year: int) -> datetime:
return datetime(year, 11, 1)
@staticmethod
def all_souls_day(year: int) -> datetime:
return datetime(year, 11, 2)
@staticmethod
def guyfawkes_day(year: int) -> datetime:
return datetime(year, 11, 5)
@staticmethod
def veterans_day(year: int) -> datetime:
return datetime(year, 11, 11)
@staticmethod
def fathers_day(year: int) -> datetime:
return datetime(year, 6, 17)
@staticmethod
def mothers_day(year: int) -> datetime:
return datetime(year, 5, 27)
@staticmethod
def labour_day(year: int) -> datetime:
return datetime(year, 5, 1)
def get_swift_year(self, text: str) -> int:
trimmed_text = text.strip().lower()
swift = -10
if trimmed_text.endswith('prochain'):
swift = 1
if trimmed_text.endswith('dernier'): # last - 'l'annee dernier'
swift = -1
if trimmed_text.startswith('cette'):
swift = 0
return swift
def sanitize_holiday_token(self, holiday: str) -> str:
return holiday.replace(' ', '').replace('\'', '')
| true | true |
f73596d88c15690f02fa3a19c1a461cf4a7e91c2 | 2,282 | py | Python | ros/src/twist_controller/twist_controller.py | aresgtr/CarND-Capstone | 604e448972221e4770dbada9070c90717f2992ee | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | aresgtr/CarND-Capstone | 604e448972221e4770dbada9070c90717f2992ee | [
"MIT"
] | null | null | null | ros/src/twist_controller/twist_controller.py | aresgtr/CarND-Capstone | 604e448972221e4770dbada9070c90717f2992ee | [
"MIT"
] | null | null | null | from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, fuel_capacity, brake_deadband, decel_limit,
accel_limit, wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle):
# TODO: Implement
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)
kp = 0.3
ki = 0.1
kd = 0.
mn = 0. # Minimum throttle value
mx = 0.2 # Maximum throttle value
self.throttle_controller = PID(kp, ki, kd, mn, mx)
tau = 0.5 # 1 / (2pi * tau) = cutoff frequency
ts = .02 # Sample time
self.vel_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
# TODO: Change the arg, kwarg list to suit your needs
# Return throttle, brake, steer
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error, sample_time)
brake = 0
if linear_vel == 0. and current_vel < 0.1:
throttle = 0
brake = 400 # N*m - to hold the car in place if we are stopped at a light. Acceleration - 1m/s^2
elif throttle < .1 and vel_error < 0:
throttle = 0
decel = max(vel_error, self.decel_limit)
brake = abs(decel) * self.vehicle_mass * self.wheel_radius # Torque N*m
return throttle, brake, steering
| 33.558824 | 109 | 0.643295 | from pid import PID
from lowpass import LowPassFilter
from yaw_controller import YawController
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self, vehicle_mass, fuel_capacity, brake_deadband, decel_limit,
accel_limit, wheel_radius, wheel_base, steer_ratio, max_lat_accel, max_steer_angle):
self.yaw_controller = YawController(wheel_base, steer_ratio, 0.1, max_lat_accel, max_steer_angle)
kp = 0.3
ki = 0.1
kd = 0.
mn = 0.
mx = 0.2
self.throttle_controller = PID(kp, ki, kd, mn, mx)
tau = 0.5
ts = .02
self.vel_lpf = LowPassFilter(tau, ts)
self.vehicle_mass = vehicle_mass
self.fuel_capacity = fuel_capacity
self.brake_deadband = brake_deadband
self.decel_limit = decel_limit
self.accel_limit = accel_limit
self.wheel_radius = wheel_radius
self.last_time = rospy.get_time()
def control(self, current_vel, dbw_enabled, linear_vel, angular_vel):
if not dbw_enabled:
self.throttle_controller.reset()
return 0., 0., 0.
current_vel = self.vel_lpf.filt(current_vel)
steering = self.yaw_controller.get_steering(linear_vel, angular_vel, current_vel)
vel_error = linear_vel - current_vel
self.last_vel = current_vel
current_time = rospy.get_time()
sample_time = current_time - self.last_time
self.last_time = current_time
throttle = self.throttle_controller.step(vel_error, sample_time)
brake = 0
if linear_vel == 0. and current_vel < 0.1:
throttle = 0
brake = 400
elif throttle < .1 and vel_error < 0:
throttle = 0
decel = max(vel_error, self.decel_limit)
brake = abs(decel) * self.vehicle_mass * self.wheel_radius
return throttle, brake, steering
| true | true |
f73597b85ba8ca40d9733200858fd68397b530e6 | 3,280 | py | Python | final_plots/read_aws.py | georgetown-analytics/DC-Bikeshare | 9f5a6a3256cff15a29f0dca6e9a9d8098ab2df28 | [
"MIT"
] | 11 | 2018-07-01T16:43:05.000Z | 2020-07-17T19:08:16.000Z | final_plots/read_aws.py | noahnewberger/Bikeshare-DC | 42676654d103cdaddfb76db76d1eece533251261 | [
"MIT"
] | 5 | 2021-02-08T20:21:12.000Z | 2021-12-13T19:47:04.000Z | final_plots/read_aws.py | noahnewberger/Bikeshare-DC | 42676654d103cdaddfb76db76d1eece533251261 | [
"MIT"
] | 5 | 2018-10-05T19:54:20.000Z | 2020-10-27T11:54:09.000Z | import psycopg2
import psycopg2.extras
import pandas as pd
import os
import time
from pathlib import Path
from dotenv import load_dotenv
def read_only_connect_aws():
env_path = 'env_readonly.env'
load_dotenv(dotenv_path=env_path)
host = "bikeshare-restored.cs9te7lm3pt2.us-east-1.rds.amazonaws.com"
port = 5432
database = "bikeshare"
user = os.environ.get("AWS_READONLY_USER")
password = os.environ.get("AWS_READONLY_PASS")
# Connect to aws postgres D
conn = psycopg2.connect(
host=host, user=user, port=port, password=password,
database=database)
return conn
# Function to load cabi data from AWS. Leaving room to add different load
# types. Right now only allowing a load of all the database
class QueryTool:
def __init__(self, connection, table=None):
self.connection = connection
self.table = table
def basic(self):
query = (
'SELECT * from ') + self.table
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def missing_check(self):
query = ("""
SELECT
COUNT(*) as total_count,
dt.operator as operator
FROM dockless_trips as dt
GROUP BY
operator;""")
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def geo_metric(self, cut):
self.cut = cut
query = ("""
SELECT
stations.end_region_code,
stations.start_region_code,
extract({0} from subq_trip.start_date) as {0},
COUNT(*) as total_trips
FROM
(SELECT * FROM {1} LIMIT 25) as subq_trip
LEFT JOIN cabi_stations_geo_temp AS stations
ON subq_trip.start_station = stations.start_short_name
AND subq_trip.end_station = stations.end_short_name
GROUP BY
stations.end_region_code,
stations.start_region_code,
extract({0} from subq_trip.start_date);""").format(cut, table)
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def annual(self, year):
self.year = year
start_string = (
'SELECT * from cabi_trips '
'WHERE EXTRACT(YEAR FROM start_date)=')
query = start_string + str(self.year)
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def describe_data(self):
cur = self.connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("""select *
from information_schema.columns
where table_schema NOT IN (
'information_schema', 'pg_catalog')
order by table_schema, table_name""")
for row in cur:
print("schema: {schema}, table: {table}, column: {col}, \
type: {type}".format(
schema=row['table_schema'], table=row['table_name'],
col=row['column_name'], type=row['data_type']))
if __name__ == '__main__':
print('Running')
conn = read_only_connect_aws()
CABI_TRIPS = QueryTool(conn, 'cabi_trips')
CABI_TRIPS.describe_data()
| 32.475248 | 79 | 0.603659 | import psycopg2
import psycopg2.extras
import pandas as pd
import os
import time
from pathlib import Path
from dotenv import load_dotenv
def read_only_connect_aws():
env_path = 'env_readonly.env'
load_dotenv(dotenv_path=env_path)
host = "bikeshare-restored.cs9te7lm3pt2.us-east-1.rds.amazonaws.com"
port = 5432
database = "bikeshare"
user = os.environ.get("AWS_READONLY_USER")
password = os.environ.get("AWS_READONLY_PASS")
conn = psycopg2.connect(
host=host, user=user, port=port, password=password,
database=database)
return conn
class QueryTool:
def __init__(self, connection, table=None):
self.connection = connection
self.table = table
def basic(self):
query = (
'SELECT * from ') + self.table
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def missing_check(self):
query = ("""
SELECT
COUNT(*) as total_count,
dt.operator as operator
FROM dockless_trips as dt
GROUP BY
operator;""")
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def geo_metric(self, cut):
self.cut = cut
query = ("""
SELECT
stations.end_region_code,
stations.start_region_code,
extract({0} from subq_trip.start_date) as {0},
COUNT(*) as total_trips
FROM
(SELECT * FROM {1} LIMIT 25) as subq_trip
LEFT JOIN cabi_stations_geo_temp AS stations
ON subq_trip.start_station = stations.start_short_name
AND subq_trip.end_station = stations.end_short_name
GROUP BY
stations.end_region_code,
stations.start_region_code,
extract({0} from subq_trip.start_date);""").format(cut, table)
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def annual(self, year):
self.year = year
start_string = (
'SELECT * from cabi_trips '
'WHERE EXTRACT(YEAR FROM start_date)=')
query = start_string + str(self.year)
dataframe = pd.read_sql(query, con=self.connection)
return dataframe
def describe_data(self):
cur = self.connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute("""select *
from information_schema.columns
where table_schema NOT IN (
'information_schema', 'pg_catalog')
order by table_schema, table_name""")
for row in cur:
print("schema: {schema}, table: {table}, column: {col}, \
type: {type}".format(
schema=row['table_schema'], table=row['table_name'],
col=row['column_name'], type=row['data_type']))
if __name__ == '__main__':
print('Running')
conn = read_only_connect_aws()
CABI_TRIPS = QueryTool(conn, 'cabi_trips')
CABI_TRIPS.describe_data()
| true | true |
f7359a48b1c8cf914b8fb141b4b2ed6ebf6ee390 | 4,053 | py | Python | tests/unit/cli/errors_test.py | Teino1978-Corp/compose | 05a5aae552be9de2ef99c30cc79bd7b0d8bfa62b | [
"Apache-2.0"
] | 1 | 2021-05-04T17:15:12.000Z | 2021-05-04T17:15:12.000Z | tests/unit/cli/errors_test.py | HamidOsouli/compose | 52d2fcc27479ffe3301c332fb971296ab686255a | [
"Apache-2.0"
] | null | null | null | tests/unit/cli/errors_test.py | HamidOsouli/compose | 52d2fcc27479ffe3301c332fb971296ab686255a | [
"Apache-2.0"
] | null | null | null | import pytest
from docker.errors import APIError
from requests.exceptions import ConnectionError
from compose.cli import errors
from compose.cli.errors import handle_connection_errors
from compose.const import IS_WINDOWS_PLATFORM
from tests import mock
@pytest.yield_fixture
def mock_logging():
with mock.patch('compose.cli.errors.log', autospec=True) as mock_log:
yield mock_log
def patch_find_executable(side_effect):
return mock.patch(
'compose.cli.errors.find_executable',
autospec=True,
side_effect=side_effect)
class TestHandleConnectionErrors(object):
def test_generic_connection_error(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with patch_find_executable(['/bin/docker', None]):
with handle_connection_errors(mock.Mock()):
raise ConnectionError()
_, args, _ = mock_logging.error.mock_calls[0]
assert "Couldn't connect to Docker daemon" in args[0]
def test_api_error_version_mismatch(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.38')):
raise APIError(None, None, b"client is newer than server")
_, args, _ = mock_logging.error.mock_calls[0]
assert "Docker Engine of version 18.06.0 or greater" in args[0]
def test_api_error_version_mismatch_unicode_explanation(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.38')):
raise APIError(None, None, u"client is newer than server")
_, args, _ = mock_logging.error.mock_calls[0]
assert "Docker Engine of version 18.06.0 or greater" in args[0]
def test_api_error_version_other(self, mock_logging):
msg = b"Something broke!"
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, msg)
mock_logging.error.assert_called_once_with(msg.decode('utf-8'))
def test_api_error_version_other_unicode_explanation(self, mock_logging):
msg = u"Something broke!"
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, msg)
mock_logging.error.assert_called_once_with(msg)
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_no_data(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(232, 'WriteFile', 'The pipe is being closed.')
_, args, _ = mock_logging.error.mock_calls[0]
assert "The current Compose file version is not compatible with your engine version." in args[0]
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_misc(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(231, 'WriteFile', 'The pipe is busy.')
_, args, _ = mock_logging.error.mock_calls[0]
assert "Windows named pipe error: The pipe is busy. (code: 231)" == args[0]
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_encoding_issue(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(9999, 'WriteFile', 'I use weird characters \xe9')
_, args, _ = mock_logging.error.mock_calls[0]
assert 'Windows named pipe error: I use weird characters \xe9 (code: 9999)' == args[0]
| 42.21875 | 104 | 0.695781 | import pytest
from docker.errors import APIError
from requests.exceptions import ConnectionError
from compose.cli import errors
from compose.cli.errors import handle_connection_errors
from compose.const import IS_WINDOWS_PLATFORM
from tests import mock
@pytest.yield_fixture
def mock_logging():
with mock.patch('compose.cli.errors.log', autospec=True) as mock_log:
yield mock_log
def patch_find_executable(side_effect):
return mock.patch(
'compose.cli.errors.find_executable',
autospec=True,
side_effect=side_effect)
class TestHandleConnectionErrors(object):
def test_generic_connection_error(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with patch_find_executable(['/bin/docker', None]):
with handle_connection_errors(mock.Mock()):
raise ConnectionError()
_, args, _ = mock_logging.error.mock_calls[0]
assert "Couldn't connect to Docker daemon" in args[0]
def test_api_error_version_mismatch(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.38')):
raise APIError(None, None, b"client is newer than server")
_, args, _ = mock_logging.error.mock_calls[0]
assert "Docker Engine of version 18.06.0 or greater" in args[0]
def test_api_error_version_mismatch_unicode_explanation(self, mock_logging):
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.38')):
raise APIError(None, None, u"client is newer than server")
_, args, _ = mock_logging.error.mock_calls[0]
assert "Docker Engine of version 18.06.0 or greater" in args[0]
def test_api_error_version_other(self, mock_logging):
msg = b"Something broke!"
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, msg)
mock_logging.error.assert_called_once_with(msg.decode('utf-8'))
def test_api_error_version_other_unicode_explanation(self, mock_logging):
msg = u"Something broke!"
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise APIError(None, None, msg)
mock_logging.error.assert_called_once_with(msg)
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_no_data(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(232, 'WriteFile', 'The pipe is being closed.')
_, args, _ = mock_logging.error.mock_calls[0]
assert "The current Compose file version is not compatible with your engine version." in args[0]
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_misc(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(231, 'WriteFile', 'The pipe is busy.')
_, args, _ = mock_logging.error.mock_calls[0]
assert "Windows named pipe error: The pipe is busy. (code: 231)" == args[0]
@pytest.mark.skipif(not IS_WINDOWS_PLATFORM, reason='Needs pywin32')
def test_windows_pipe_error_encoding_issue(self, mock_logging):
import pywintypes
with pytest.raises(errors.ConnectionError):
with handle_connection_errors(mock.Mock(api_version='1.22')):
raise pywintypes.error(9999, 'WriteFile', 'I use weird characters \xe9')
_, args, _ = mock_logging.error.mock_calls[0]
assert 'Windows named pipe error: I use weird characters \xe9 (code: 9999)' == args[0]
| true | true |
f7359b0dca3a0c1fc4f8174997e46c89022028eb | 572 | py | Python | base.py | tomas-barros/speed-test | a95da7a24147f1845bb400c4e9db80a60e1b2df7 | [
"MIT"
] | null | null | null | base.py | tomas-barros/speed-test | a95da7a24147f1845bb400c4e9db80a60e1b2df7 | [
"MIT"
] | null | null | null | base.py | tomas-barros/speed-test | a95da7a24147f1845bb400c4e9db80a60e1b2df7 | [
"MIT"
] | null | null | null | from speedtest import Speedtest
# debugmode
debugmode = 0
st = Speedtest()
# debug
if debugmode:
print(f'Download: {st.download()}')
print(f'Upload: {st.upload()}')
st.get_best_server([])
print(f'Ping: {st.results.ping}')
# functons
def get_upload_speed():
print('UPLOAD SPEED: Wait a few seconds...')
return int(st.upload())
def get_download_speed():
print('DOWNLOAD SPEED: Wait a few seconds...')
return int(st.download())
def get_ping():
print('Wait a few seconds...')
st.get_best_server([])
return int(st.results.ping) | 21.185185 | 50 | 0.657343 | from speedtest import Speedtest
debugmode = 0
st = Speedtest()
if debugmode:
print(f'Download: {st.download()}')
print(f'Upload: {st.upload()}')
st.get_best_server([])
print(f'Ping: {st.results.ping}')
def get_upload_speed():
print('UPLOAD SPEED: Wait a few seconds...')
return int(st.upload())
def get_download_speed():
print('DOWNLOAD SPEED: Wait a few seconds...')
return int(st.download())
def get_ping():
print('Wait a few seconds...')
st.get_best_server([])
return int(st.results.ping) | true | true |
f7359e65f7dea51a3e8a2bef0ce59b92e89fc7c6 | 508 | py | Python | pacote de dawload/projeto progamas em Python/Aula 19 Dicionarios parte 1.py | wagnersistemalima/Mundo-3-Python-Curso-em-Video | f78aef33e10a236f71850e05fdb9ae8ddbe155c3 | [
"MIT"
] | 1 | 2020-12-11T23:22:19.000Z | 2020-12-11T23:22:19.000Z | pacote de dawload/projeto progamas em Python/Aula 19 Dicionarios parte 1.py | wagnersistemalima/Mundo-3-Python-Curso-em-Video | f78aef33e10a236f71850e05fdb9ae8ddbe155c3 | [
"MIT"
] | null | null | null | pacote de dawload/projeto progamas em Python/Aula 19 Dicionarios parte 1.py | wagnersistemalima/Mundo-3-Python-Curso-em-Video | f78aef33e10a236f71850e05fdb9ae8ddbe155c3 | [
"MIT"
] | null | null | null | # Aula 19 Dicionarios. É assim que tratamos os dicionarios
pessoas = {'nome': 'Gustavo', 'sexo': 'M', 'idade': 22}
print(pessoas['nome'])
print(pessoas['idade'])
print(pessoas['sexo'])
print(f'{pessoas["nome"]} tem {pessoas["idade"]} anos') # Utilizar aspas duplas para a localização [" "]
print(pessoas.keys()) # nome / sexo / idade
print(pessoas.values()) # Gustavo / M / 22
print(pessoas.items()) #Composição de elementos: lista e treis tuplas
| 42.333333 | 105 | 0.608268 |
pessoas = {'nome': 'Gustavo', 'sexo': 'M', 'idade': 22}
print(pessoas['nome'])
print(pessoas['idade'])
print(pessoas['sexo'])
print(f'{pessoas["nome"]} tem {pessoas["idade"]} anos')
print(pessoas.keys())
print(pessoas.values())
print(pessoas.items())
| true | true |
f7359e904faa026a802269f620739354e64f4bbc | 62 | py | Python | app/tests.py | nibinn/Rnd | e306b3da3af5e9da66d11436bc7abf3a77f50573 | [
"MIT"
] | 1 | 2022-03-30T15:28:33.000Z | 2022-03-30T15:28:33.000Z | authentication/tests.py | piyushjain-pj/pneumonia_and_covid_prediction_tool | b5f503b1672b1093c4bd6f9e053d6024e4f73f9d | [
"MIT"
] | 14 | 2020-06-05T18:37:13.000Z | 2022-03-11T23:26:12.000Z | authentication/tests.py | piyushjain-pj/pneumonia_and_covid_prediction_tool | b5f503b1672b1093c4bd6f9e053d6024e4f73f9d | [
"MIT"
] | null | null | null |
from django.test import TestCase
# Create your tests here.
| 10.333333 | 32 | 0.758065 |
from django.test import TestCase
| true | true |
f735a0bd714d0c11768957bfb5be71284da6fc0b | 4,802 | py | Python | tests/test_features.py | aka863/python-geojson | 9cda95ab0e226e01391325fede635983e6b7b207 | [
"BSD-3-Clause"
] | null | null | null | tests/test_features.py | aka863/python-geojson | 9cda95ab0e226e01391325fede635983e6b7b207 | [
"BSD-3-Clause"
] | null | null | null | tests/test_features.py | aka863/python-geojson | 9cda95ab0e226e01391325fede635983e6b7b207 | [
"BSD-3-Clause"
] | null | null | null | try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import unittest
import geojson
class FeaturesTest(unittest.TestCase):
def test_protocol(self):
"""
A dictionary can satisfy the protocol
"""
f = {
'type': 'Feature',
'id': '1',
'geometry': {'type': 'Point', 'coordinates': [53, -4]},
'properties': {'title': 'Dict 1'},
}
json = geojson.dumps(f, sort_keys=True)
self.assertEqual(json, '{"geometry":'
' {"coordinates": [53, -4],'
' "type": "Point"},'
' "id": "1",'
' "properties": {"title": "Dict 1"},'
' "type": "Feature"}')
o = geojson.loads(json)
output = geojson.dumps(o, sort_keys=True)
self.assertEqual(output, '{"geometry":'
' {"coordinates": [53, -4],'
' "type": "Point"},'
' "id": "1",'
' "properties": {"title": "Dict 1"},'
' "type": "Feature"}')
def test_unicode_properties(self):
with open("tests/data.geojson") as file_:
obj = geojson.load(file_)
geojson.dump(obj, StringIO())
def test_feature_class(self):
"""
Test the Feature class
"""
from geojson.examples import SimpleWebFeature
feature = SimpleWebFeature(
id='1',
geometry={'type': 'Point', 'coordinates': [53, -4]},
title='Feature 1', summary='The first feature',
link='http://example.org/features/1'
)
# It satisfies the feature protocol
self.assertEqual(feature.id, '1')
self.assertEqual(feature.properties['title'], 'Feature 1')
self.assertEqual(feature.properties['summary'], 'The first feature')
self.assertEqual(feature.properties['link'],
'http://example.org/features/1')
self.assertEqual(geojson.dumps(feature.geometry, sort_keys=True),
'{"coordinates": [53, -4], "type": "Point"}')
# Encoding
json = ('{"geometry": {"coordinates": [53, -4],'
' "type": "Point"},'
' "id": "1",'
' "properties":'
' {"link": "http://example.org/features/1",'
' "summary": "The first feature",'
' "title": "Feature 1"},'
' "type": "Feature"}')
self.assertEqual(geojson.dumps(feature, sort_keys=True), json)
# Decoding
factory = geojson.examples.create_simple_web_feature
json = ('{"geometry": {"type": "Point",'
' "coordinates": [53, -4]},'
' "id": "1",'
' "properties": {"summary": "The first feature",'
' "link": "http://example.org/features/1",'
' "title": "Feature 1"}}')
feature = geojson.loads(json, object_hook=factory, encoding="utf-8")
self.assertEqual(repr(type(feature)),
"<class 'geojson.examples.SimpleWebFeature'>")
self.assertEqual(feature.id, '1')
self.assertEqual(feature.properties['title'], 'Feature 1')
self.assertEqual(feature.properties['summary'], 'The first feature')
self.assertEqual(feature.properties['link'],
'http://example.org/features/1')
self.assertEqual(geojson.dumps(feature.geometry, sort_keys=True),
'{"coordinates": [53, -4], "type": "Point"}')
def test_geo_interface(self):
class Thingy(object):
def __init__(self, id, title, x, y):
self.id = id
self.title = title
self.x = x
self.y = y
@property
def __geo_interface__(self):
return ({"id": self.id,
"properties": {"title": self.title},
"geometry": {"type": "Point",
"coordinates": (self.x, self.y)}})
ob = Thingy('1', 'thingy one', -106, 40)
self.assertEqual(geojson.dumps(ob.__geo_interface__['geometry'],
sort_keys=True),
'{"coordinates": [-106, 40], "type": "Point"}')
self.assertEqual(geojson.dumps(ob, sort_keys=True),
('{"geometry": {"coordinates": [-106, 40],'
' "type": "Point"},'
' "id": "1",'
' "properties": {"title": "thingy one"}}'))
| 40.016667 | 76 | 0.466264 | try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import unittest
import geojson
class FeaturesTest(unittest.TestCase):
def test_protocol(self):
f = {
'type': 'Feature',
'id': '1',
'geometry': {'type': 'Point', 'coordinates': [53, -4]},
'properties': {'title': 'Dict 1'},
}
json = geojson.dumps(f, sort_keys=True)
self.assertEqual(json, '{"geometry":'
' {"coordinates": [53, -4],'
' "type": "Point"},'
' "id": "1",'
' "properties": {"title": "Dict 1"},'
' "type": "Feature"}')
o = geojson.loads(json)
output = geojson.dumps(o, sort_keys=True)
self.assertEqual(output, '{"geometry":'
' {"coordinates": [53, -4],'
' "type": "Point"},'
' "id": "1",'
' "properties": {"title": "Dict 1"},'
' "type": "Feature"}')
def test_unicode_properties(self):
with open("tests/data.geojson") as file_:
obj = geojson.load(file_)
geojson.dump(obj, StringIO())
def test_feature_class(self):
from geojson.examples import SimpleWebFeature
feature = SimpleWebFeature(
id='1',
geometry={'type': 'Point', 'coordinates': [53, -4]},
title='Feature 1', summary='The first feature',
link='http://example.org/features/1'
)
self.assertEqual(feature.id, '1')
self.assertEqual(feature.properties['title'], 'Feature 1')
self.assertEqual(feature.properties['summary'], 'The first feature')
self.assertEqual(feature.properties['link'],
'http://example.org/features/1')
self.assertEqual(geojson.dumps(feature.geometry, sort_keys=True),
'{"coordinates": [53, -4], "type": "Point"}')
json = ('{"geometry": {"coordinates": [53, -4],'
' "type": "Point"},'
' "id": "1",'
' "properties":'
' {"link": "http://example.org/features/1",'
' "summary": "The first feature",'
' "title": "Feature 1"},'
' "type": "Feature"}')
self.assertEqual(geojson.dumps(feature, sort_keys=True), json)
factory = geojson.examples.create_simple_web_feature
json = ('{"geometry": {"type": "Point",'
' "coordinates": [53, -4]},'
' "id": "1",'
' "properties": {"summary": "The first feature",'
' "link": "http://example.org/features/1",'
' "title": "Feature 1"}}')
feature = geojson.loads(json, object_hook=factory, encoding="utf-8")
self.assertEqual(repr(type(feature)),
"<class 'geojson.examples.SimpleWebFeature'>")
self.assertEqual(feature.id, '1')
self.assertEqual(feature.properties['title'], 'Feature 1')
self.assertEqual(feature.properties['summary'], 'The first feature')
self.assertEqual(feature.properties['link'],
'http://example.org/features/1')
self.assertEqual(geojson.dumps(feature.geometry, sort_keys=True),
'{"coordinates": [53, -4], "type": "Point"}')
def test_geo_interface(self):
class Thingy(object):
def __init__(self, id, title, x, y):
self.id = id
self.title = title
self.x = x
self.y = y
@property
def __geo_interface__(self):
return ({"id": self.id,
"properties": {"title": self.title},
"geometry": {"type": "Point",
"coordinates": (self.x, self.y)}})
ob = Thingy('1', 'thingy one', -106, 40)
self.assertEqual(geojson.dumps(ob.__geo_interface__['geometry'],
sort_keys=True),
'{"coordinates": [-106, 40], "type": "Point"}')
self.assertEqual(geojson.dumps(ob, sort_keys=True),
('{"geometry": {"coordinates": [-106, 40],'
' "type": "Point"},'
' "id": "1",'
' "properties": {"title": "thingy one"}}'))
| true | true |
f735a0ee2e5e07217279bd25dd0d6ed4955441c8 | 872 | py | Python | core/serializers/review.py | NTUSA/fudez-app | 91c8d85238fb642488ef5616be441e4417f21d0a | [
"MIT"
] | 2 | 2017-04-13T08:52:19.000Z | 2018-05-07T12:14:34.000Z | core/serializers/review.py | NTUSA/fudez-app | 91c8d85238fb642488ef5616be441e4417f21d0a | [
"MIT"
] | null | null | null | core/serializers/review.py | NTUSA/fudez-app | 91c8d85238fb642488ef5616be441e4417f21d0a | [
"MIT"
] | 1 | 2018-07-28T16:09:59.000Z | 2018-07-28T16:09:59.000Z | from django.contrib.auth import get_user_model
from rest_framework import serializers
class Parent(object):
def __init__(self, func):
self.func = func
def set_context(self, serializer_field):
self.value = serializer_field.queryset.get(
pk=self.func(serializer_field.context))
def __call__(self):
return self.value
class ReviewSerializer(serializers.Serializer):
reviewer_kinds = [get_user_model().PRESIDENT, get_user_model().STAFF, get_user_model().CHIEF]
reviewer = serializers.PrimaryKeyRelatedField(
default=Parent(lambda context: context['user']),
queryset=get_user_model().objects.filter(kind__in=reviewer_kinds))
class ApproveSerializer(ReviewSerializer):
amount = serializers.IntegerField(default=0)
class RejectSerializer(ReviewSerializer):
reason = serializers.CharField()
| 29.066667 | 97 | 0.741972 | from django.contrib.auth import get_user_model
from rest_framework import serializers
class Parent(object):
def __init__(self, func):
self.func = func
def set_context(self, serializer_field):
self.value = serializer_field.queryset.get(
pk=self.func(serializer_field.context))
def __call__(self):
return self.value
class ReviewSerializer(serializers.Serializer):
reviewer_kinds = [get_user_model().PRESIDENT, get_user_model().STAFF, get_user_model().CHIEF]
reviewer = serializers.PrimaryKeyRelatedField(
default=Parent(lambda context: context['user']),
queryset=get_user_model().objects.filter(kind__in=reviewer_kinds))
class ApproveSerializer(ReviewSerializer):
amount = serializers.IntegerField(default=0)
class RejectSerializer(ReviewSerializer):
reason = serializers.CharField()
| true | true |
f735a1f81d5ca9884293312b3c8981cc9b7d982b | 3,437 | py | Python | src/pix2pix/data/unaligned_dataset.py | Stannislav/pytorch-CycleGAN-and-pix2pix | c6a4927f5d2c49c278694ae7e9ac148b83a3f2d5 | [
"BSD-3-Clause"
] | null | null | null | src/pix2pix/data/unaligned_dataset.py | Stannislav/pytorch-CycleGAN-and-pix2pix | c6a4927f5d2c49c278694ae7e9ac148b83a3f2d5 | [
"BSD-3-Clause"
] | null | null | null | src/pix2pix/data/unaligned_dataset.py | Stannislav/pytorch-CycleGAN-and-pix2pix | c6a4927f5d2c49c278694ae7e9ac148b83a3f2d5 | [
"BSD-3-Clause"
] | null | null | null | import os
import random
from PIL import Image
from .base_dataset import BaseDataset, get_transform
from .image_folder import make_dataset
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(
opt.dataroot, opt.phase + "A"
) # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(
opt.dataroot, opt.phase + "B"
) # create a path '/path/to/data/trainB'
self.A_paths = sorted(
make_dataset(self.dir_A, opt.max_dataset_size)
) # load images from '/path/to/data/trainA'
self.B_paths = sorted(
make_dataset(self.dir_B, opt.max_dataset_size)
) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == "BtoA"
input_nc = (
self.opt.output_nc if btoA else self.opt.input_nc
) # get the number of channels of input image
output_nc = (
self.opt.input_nc if btoA else self.opt.output_nc
) # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[
index % self.A_size
] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = Image.open(A_path).convert("RGB")
B_img = Image.open(B_path).convert("RGB")
# apply image transformation
A = self.transform_A(A_img)
B = self.transform_B(B_img)
return {"A": A, "B": B, "A_paths": A_path, "B_paths": B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
| 39.056818 | 104 | 0.620017 | import os
import random
from PIL import Image
from .base_dataset import BaseDataset, get_transform
from .image_folder import make_dataset
class UnalignedDataset(BaseDataset):
def __init__(self, opt):
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(
opt.dataroot, opt.phase + "A"
)
self.dir_B = os.path.join(
opt.dataroot, opt.phase + "B"
)
self.A_paths = sorted(
make_dataset(self.dir_A, opt.max_dataset_size)
)
self.B_paths = sorted(
make_dataset(self.dir_B, opt.max_dataset_size)
)
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
btoA = self.opt.direction == "BtoA"
input_nc = (
self.opt.output_nc if btoA else self.opt.input_nc
)
output_nc = (
self.opt.input_nc if btoA else self.opt.output_nc
)
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
A_path = self.A_paths[
index % self.A_size
]
if self.opt.serial_batches:
index_B = index % self.B_size
else:
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = Image.open(A_path).convert("RGB")
B_img = Image.open(B_path).convert("RGB")
A = self.transform_A(A_img)
B = self.transform_B(B_img)
return {"A": A, "B": B, "A_paths": A_path, "B_paths": B_path}
def __len__(self):
return max(self.A_size, self.B_size)
| true | true |
f735a217197b558e078812728fdb87b8c77d9e70 | 2,790 | py | Python | airflow/npmjs_dags3/npmjs_static_61.py | Yanivmd/maloss | af85ac202668da88d0b4a885386a1e56703e37c8 | [
"MIT"
] | 1 | 2022-01-29T16:13:06.000Z | 2022-01-29T16:13:06.000Z | airflow/npmjs_dags3/npmjs_static_61.py | Yanivmd/maloss | af85ac202668da88d0b4a885386a1e56703e37c8 | [
"MIT"
] | null | null | null | airflow/npmjs_dags3/npmjs_static_61.py | Yanivmd/maloss | af85ac202668da88d0b4a885386a1e56703e37c8 | [
"MIT"
] | 1 | 2022-01-29T16:13:07.000Z | 2022-01-29T16:13:07.000Z | import re
import pickle
import logging
import networkx
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2019, 1, 1),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
# don't auto-schedule the dag
# https://airflow.readthedocs.io/en/stable/scheduler.html
dag = DAG('npmjs_static_61', default_args=default_args, schedule_interval=None)
# periodically run the dag
# dag = DAG('tutorial', default_args=default_args, schedule_interval=timedelta(days=1))
# load dep_tree for packages, relative to AIRFLOW_HOME
npmjs_dep_path = "./dags/npmjs.with_stats.dep_graph_61.pickle"
dep_tree = pickle.load(open(npmjs_dep_path, "rb"))
logging.info("loaded dep_tree with %d nodes", dep_tree.number_of_nodes())
def get_sanitized_pkgname(pkg_name):
invalid_name = re.compile(r'[^a-zA-Z0-9_.-]')
pkg_name = re.sub(invalid_name, '..', pkg_name)
return pkg_name
def get_bash_op(pkg_name, dag, configpath='/home/maloss/config/astgen_javascript_smt.config', cache_dir='/home/maloss/metadata', outdir='/home/maloss/result'):
return BashOperator(
task_id=get_sanitized_pkgname(pkg_name=pkg_name),
execution_timeout=timedelta(hours=2),
bash_command='cd /home/maloss/src/ && python main.py astfilter --ignore_dep_version -n %s -c %s -d %s -o %s -l javascript' % (pkg_name, configpath, cache_dir, outdir),
dag=dag)
# all analysis jobs
# get all leaves
# https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.simple_paths.all_simple_paths.html
# leaves = (v for v, d in dep_tree.out_degree() if d == 0)
pkg2op = {}
for pkg in dep_tree.nodes():
pkg = str(pkg)
dep_pkgs = list(dep_tree.successors(pkg))
logging.debug("%s has %d dep_pkgs", pkg, len(dep_pkgs))
if not get_sanitized_pkgname(pkg_name=pkg):
continue
if pkg not in pkg2op:
pkg2op[pkg] = get_bash_op(pkg_name=pkg, dag=dag)
else:
continue
pkg_task = pkg2op[pkg]
dep_tasks = set()
for dep_pkg in dep_pkgs:
dep_pkg = str(dep_pkg)
# avoid cycles
if dep_pkg == pkg or not get_sanitized_pkgname(pkg_name=dep_pkg):
continue
if dep_pkg not in pkg2op:
pkg2op[dep_pkg] = get_bash_op(pkg_name=dep_pkg, dag=dag)
dep_tasks.add(pkg2op[dep_pkg])
# default trigger rule is all_success
# use all_done instead
pkg_task << list(dep_tasks)
| 34.875 | 175 | 0.699283 | import re
import pickle
import logging
import networkx
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2019, 1, 1),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'priority_weight': 10,
}
# https://airflow.readthedocs.io/en/stable/scheduler.html
dag = DAG('npmjs_static_61', default_args=default_args, schedule_interval=None)
# periodically run the dag
# dag = DAG('tutorial', default_args=default_args, schedule_interval=timedelta(days=1))
# load dep_tree for packages, relative to AIRFLOW_HOME
npmjs_dep_path = "./dags/npmjs.with_stats.dep_graph_61.pickle"
dep_tree = pickle.load(open(npmjs_dep_path, "rb"))
logging.info("loaded dep_tree with %d nodes", dep_tree.number_of_nodes())
def get_sanitized_pkgname(pkg_name):
invalid_name = re.compile(r'[^a-zA-Z0-9_.-]')
pkg_name = re.sub(invalid_name, '..', pkg_name)
return pkg_name
def get_bash_op(pkg_name, dag, configpath='/home/maloss/config/astgen_javascript_smt.config', cache_dir='/home/maloss/metadata', outdir='/home/maloss/result'):
return BashOperator(
task_id=get_sanitized_pkgname(pkg_name=pkg_name),
execution_timeout=timedelta(hours=2),
bash_command='cd /home/maloss/src/ && python main.py astfilter --ignore_dep_version -n %s -c %s -d %s -o %s -l javascript' % (pkg_name, configpath, cache_dir, outdir),
dag=dag)
# all analysis jobs
# get all leaves
# https://networkx.github.io/documentation/latest/reference/algorithms/generated/networkx.algorithms.simple_paths.all_simple_paths.html
# leaves = (v for v, d in dep_tree.out_degree() if d == 0)
pkg2op = {}
for pkg in dep_tree.nodes():
pkg = str(pkg)
dep_pkgs = list(dep_tree.successors(pkg))
logging.debug("%s has %d dep_pkgs", pkg, len(dep_pkgs))
if not get_sanitized_pkgname(pkg_name=pkg):
continue
if pkg not in pkg2op:
pkg2op[pkg] = get_bash_op(pkg_name=pkg, dag=dag)
else:
continue
pkg_task = pkg2op[pkg]
dep_tasks = set()
for dep_pkg in dep_pkgs:
dep_pkg = str(dep_pkg)
# avoid cycles
if dep_pkg == pkg or not get_sanitized_pkgname(pkg_name=dep_pkg):
continue
if dep_pkg not in pkg2op:
pkg2op[dep_pkg] = get_bash_op(pkg_name=dep_pkg, dag=dag)
dep_tasks.add(pkg2op[dep_pkg])
# default trigger rule is all_success
# use all_done instead
pkg_task << list(dep_tasks)
| true | true |
f735a259c097fe08e9820d06e460b7e690f1af8c | 3,993 | py | Python | Sandbox/qemu-sgx-master/scripts/tracetool/backend/__init__.py | Maxul/sgx_vmx_protocol | b18dcdd6cbbf10c7d609649295676f0163dd9a5e | [
"MIT"
] | 8 | 2020-03-16T06:34:49.000Z | 2021-12-06T01:50:54.000Z | Sandbox/qemu-sgx-master/scripts/tracetool/backend/__init__.py | Maxul/sgx_vmx_protocol | b18dcdd6cbbf10c7d609649295676f0163dd9a5e | [
"MIT"
] | 3 | 2021-09-06T09:14:42.000Z | 2022-03-27T08:09:54.000Z | Sandbox/qemu-sgx-master/scripts/tracetool/backend/__init__.py | Maxul/sgx_vmx_protocol | b18dcdd6cbbf10c7d609649295676f0163dd9a5e | [
"MIT"
] | 1 | 2022-03-18T07:17:40.000Z | 2022-03-18T07:17:40.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Backend management.
Creating new backends
---------------------
A new backend named 'foo-bar' corresponds to Python module
'tracetool/backend/foo_bar.py'.
A backend module should provide a docstring, whose first non-empty line will be
considered its short description.
All backends must generate their contents through the 'tracetool.out' routine.
Backend attributes
------------------
========= ====================================================================
Attribute Description
========= ====================================================================
PUBLIC If exists and is set to 'True', the backend is considered "public".
========= ====================================================================
Backend functions
-----------------
All the following functions are optional, and no output will be generated if
they do not exist.
=============================== ==============================================
Function Description
=============================== ==============================================
generate_<format>_begin(events) Generate backend- and format-specific file
header contents.
generate_<format>_end(events) Generate backend- and format-specific file
footer contents.
generate_<format>(event) Generate backend- and format-specific contents
for the given event.
=============================== ==============================================
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
import os
import tracetool
def get_list(only_public = False):
"""Get a list of (name, description) pairs."""
res = [("nop", "Tracing disabled.")]
modnames = []
for filename in os.listdir(tracetool.backend.__path__[0]):
if filename.endswith('.py') and filename != '__init__.py':
modnames.append(filename.rsplit('.', 1)[0])
for modname in sorted(modnames):
module = tracetool.try_import("tracetool.backend." + modname)
# just in case; should never fail unless non-module files are put there
if not module[0]:
continue
module = module[1]
public = getattr(module, "PUBLIC", False)
if only_public and not public:
continue
doc = module.__doc__
if doc is None:
doc = ""
doc = doc.strip().split("\n")[0]
name = modname.replace("_", "-")
res.append((name, doc))
return res
def exists(name):
"""Return whether the given backend exists."""
if len(name) == 0:
return False
if name == "nop":
return True
name = name.replace("-", "_")
return tracetool.try_import("tracetool.backend." + name)[1]
class Wrapper:
def __init__(self, backends, format):
self._backends = [backend.replace("-", "_") for backend in backends]
self._format = format.replace("-", "_")
for backend in self._backends:
assert exists(backend)
assert tracetool.format.exists(self._format)
def _run_function(self, name, *args, **kwargs):
for backend in self._backends:
func = tracetool.try_import("tracetool.backend." + backend,
name % self._format, None)[1]
if func is not None:
func(*args, **kwargs)
def generate_begin(self, events, group):
self._run_function("generate_%s_begin", events, group)
def generate(self, event, group):
self._run_function("generate_%s", event, group)
def generate_end(self, events, group):
self._run_function("generate_%s_end", events, group)
| 32.201613 | 79 | 0.547959 |
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012-2014, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
import os
import tracetool
def get_list(only_public = False):
res = [("nop", "Tracing disabled.")]
modnames = []
for filename in os.listdir(tracetool.backend.__path__[0]):
if filename.endswith('.py') and filename != '__init__.py':
modnames.append(filename.rsplit('.', 1)[0])
for modname in sorted(modnames):
module = tracetool.try_import("tracetool.backend." + modname)
if not module[0]:
continue
module = module[1]
public = getattr(module, "PUBLIC", False)
if only_public and not public:
continue
doc = module.__doc__
if doc is None:
doc = ""
doc = doc.strip().split("\n")[0]
name = modname.replace("_", "-")
res.append((name, doc))
return res
def exists(name):
if len(name) == 0:
return False
if name == "nop":
return True
name = name.replace("-", "_")
return tracetool.try_import("tracetool.backend." + name)[1]
class Wrapper:
def __init__(self, backends, format):
self._backends = [backend.replace("-", "_") for backend in backends]
self._format = format.replace("-", "_")
for backend in self._backends:
assert exists(backend)
assert tracetool.format.exists(self._format)
def _run_function(self, name, *args, **kwargs):
for backend in self._backends:
func = tracetool.try_import("tracetool.backend." + backend,
name % self._format, None)[1]
if func is not None:
func(*args, **kwargs)
def generate_begin(self, events, group):
self._run_function("generate_%s_begin", events, group)
def generate(self, event, group):
self._run_function("generate_%s", event, group)
def generate_end(self, events, group):
self._run_function("generate_%s_end", events, group)
| true | true |
f735a2675ac8071006c4b3c0c51d104d738b5589 | 3,992 | py | Python | pygmt/tests/test_datasets_earth_relief.py | daroari/pygmt | e022851d62814a9255ed2bb63ae092b666b832b9 | [
"BSD-3-Clause"
] | 326 | 2019-02-13T09:33:39.000Z | 2022-03-25T17:24:05.000Z | pygmt/tests/test_datasets_earth_relief.py | daroari/pygmt | e022851d62814a9255ed2bb63ae092b666b832b9 | [
"BSD-3-Clause"
] | 1,153 | 2019-01-22T19:14:32.000Z | 2022-03-31T22:07:03.000Z | pygmt/tests/test_datasets_earth_relief.py | daroari/pygmt | e022851d62814a9255ed2bb63ae092b666b832b9 | [
"BSD-3-Clause"
] | 160 | 2019-02-10T15:24:19.000Z | 2022-03-31T09:07:41.000Z | """
Test basic functionality for loading Earth relief datasets.
"""
import numpy as np
import numpy.testing as npt
import pytest
from pygmt.datasets import load_earth_relief
from pygmt.exceptions import GMTInvalidInput
def test_earth_relief_fails():
"""
Make sure earth relief fails for invalid resolutions.
"""
resolutions = "1m 1d bla 60d 001m 03".split()
resolutions.append(60)
for resolution in resolutions:
with pytest.raises(GMTInvalidInput):
load_earth_relief(resolution=resolution)
# Only test 01d and 30m to avoid downloading large datasets in CI
def test_earth_relief_01d():
"""
Test some properties of the earth relief 01d data.
"""
data = load_earth_relief(resolution="01d", registration="gridline")
assert data.shape == (181, 361)
npt.assert_allclose(data.lat, np.arange(-90, 91, 1))
npt.assert_allclose(data.lon, np.arange(-180, 181, 1))
npt.assert_allclose(data.min(), -8592.5)
npt.assert_allclose(data.max(), 5559.0)
def test_earth_relief_01d_with_region():
"""
Test loading low-resolution earth relief with 'region'.
"""
data = load_earth_relief(
resolution="01d", region=[-10, 10, -5, 5], registration="gridline"
)
assert data.shape == (11, 21)
npt.assert_allclose(data.lat, np.arange(-5, 6, 1))
npt.assert_allclose(data.lon, np.arange(-10, 11, 1))
npt.assert_allclose(data.min(), -5145)
npt.assert_allclose(data.max(), 805.5)
def test_earth_relief_30m():
"""
Test some properties of the earth relief 30m data.
"""
data = load_earth_relief(resolution="30m", registration="gridline")
assert data.shape == (361, 721)
npt.assert_allclose(data.lat, np.arange(-90, 90.5, 0.5))
npt.assert_allclose(data.lon, np.arange(-180, 180.5, 0.5))
npt.assert_allclose(data.min(), -9460.5)
npt.assert_allclose(data.max(), 5887.5)
def test_earth_relief_05m_with_region():
"""
Test loading a subregion of high-resolution earth relief grid.
"""
data = load_earth_relief(
resolution="05m", region=[120, 160, 30, 60], registration="gridline"
)
assert data.coords["lat"].data.min() == 30.0
assert data.coords["lat"].data.max() == 60.0
assert data.coords["lon"].data.min() == 120.0
assert data.coords["lon"].data.max() == 160.0
assert data.data.min() == -9633.0
assert data.data.max() == 2532.0
assert data.sizes["lat"] == 361
assert data.sizes["lon"] == 481
def test_earth_relief_05m_without_region():
"""
Test loading high-resolution earth relief without passing 'region'.
"""
with pytest.raises(GMTInvalidInput):
load_earth_relief("05m")
def test_earth_relief_03s_landonly_srtm():
"""
Test loading original 3 arc-second land-only SRTM tiles.
"""
data = load_earth_relief(
"03s", region=[135, 136, 35, 36], registration="gridline", use_srtm=True
)
assert data.coords["lat"].data.min() == 35.0
assert data.coords["lat"].data.max() == 36.0
assert data.coords["lon"].data.min() == 135.0
assert data.coords["lon"].data.max() == 136.0
# data.data.min() == -305.51846 if use_srtm is False.
assert data.data.min() == -6.0
assert data.data.max() == 1191.0
assert data.sizes["lat"] == 1201
assert data.sizes["lon"] == 1201
def test_earth_relief_incorrect_registration():
"""
Test loading earth relief with incorrect registration type.
"""
with pytest.raises(GMTInvalidInput):
load_earth_relief(registration="improper_type")
def test_earth_relief_invalid_resolution_registration_combination():
"""
Test loading earth relief with invalid combination of resolution and
registration.
"""
for resolution, registration in [
("15s", "gridline"),
("03s", "pixel"),
("01s", "pixel"),
]:
with pytest.raises(GMTInvalidInput):
load_earth_relief(resolution=resolution, registration=registration)
| 31.936 | 80 | 0.665832 | import numpy as np
import numpy.testing as npt
import pytest
from pygmt.datasets import load_earth_relief
from pygmt.exceptions import GMTInvalidInput
def test_earth_relief_fails():
resolutions = "1m 1d bla 60d 001m 03".split()
resolutions.append(60)
for resolution in resolutions:
with pytest.raises(GMTInvalidInput):
load_earth_relief(resolution=resolution)
def test_earth_relief_01d():
data = load_earth_relief(resolution="01d", registration="gridline")
assert data.shape == (181, 361)
npt.assert_allclose(data.lat, np.arange(-90, 91, 1))
npt.assert_allclose(data.lon, np.arange(-180, 181, 1))
npt.assert_allclose(data.min(), -8592.5)
npt.assert_allclose(data.max(), 5559.0)
def test_earth_relief_01d_with_region():
data = load_earth_relief(
resolution="01d", region=[-10, 10, -5, 5], registration="gridline"
)
assert data.shape == (11, 21)
npt.assert_allclose(data.lat, np.arange(-5, 6, 1))
npt.assert_allclose(data.lon, np.arange(-10, 11, 1))
npt.assert_allclose(data.min(), -5145)
npt.assert_allclose(data.max(), 805.5)
def test_earth_relief_30m():
data = load_earth_relief(resolution="30m", registration="gridline")
assert data.shape == (361, 721)
npt.assert_allclose(data.lat, np.arange(-90, 90.5, 0.5))
npt.assert_allclose(data.lon, np.arange(-180, 180.5, 0.5))
npt.assert_allclose(data.min(), -9460.5)
npt.assert_allclose(data.max(), 5887.5)
def test_earth_relief_05m_with_region():
data = load_earth_relief(
resolution="05m", region=[120, 160, 30, 60], registration="gridline"
)
assert data.coords["lat"].data.min() == 30.0
assert data.coords["lat"].data.max() == 60.0
assert data.coords["lon"].data.min() == 120.0
assert data.coords["lon"].data.max() == 160.0
assert data.data.min() == -9633.0
assert data.data.max() == 2532.0
assert data.sizes["lat"] == 361
assert data.sizes["lon"] == 481
def test_earth_relief_05m_without_region():
with pytest.raises(GMTInvalidInput):
load_earth_relief("05m")
def test_earth_relief_03s_landonly_srtm():
data = load_earth_relief(
"03s", region=[135, 136, 35, 36], registration="gridline", use_srtm=True
)
assert data.coords["lat"].data.min() == 35.0
assert data.coords["lat"].data.max() == 36.0
assert data.coords["lon"].data.min() == 135.0
assert data.coords["lon"].data.max() == 136.0
assert data.data.min() == -6.0
assert data.data.max() == 1191.0
assert data.sizes["lat"] == 1201
assert data.sizes["lon"] == 1201
def test_earth_relief_incorrect_registration():
with pytest.raises(GMTInvalidInput):
load_earth_relief(registration="improper_type")
def test_earth_relief_invalid_resolution_registration_combination():
for resolution, registration in [
("15s", "gridline"),
("03s", "pixel"),
("01s", "pixel"),
]:
with pytest.raises(GMTInvalidInput):
load_earth_relief(resolution=resolution, registration=registration)
| true | true |
f735a3ebcec803e50af52d4d74ca775eb29ee26a | 11,273 | py | Python | smartRemotes/imports/bt-ble/gapAdvertise.py | HeadHodge/My-SmartHome-Projects | a5db1bf5d54f2840154e7cfb1044b3fb4ec9f77d | [
"MIT"
] | 1 | 2022-02-20T04:28:49.000Z | 2022-02-20T04:28:49.000Z | smartRemotes/imports/bt-ble/gapAdvertise.py | HeadHodge/smartRemotes-v0.2 | 9a3c671dc3c000a911b55ebe2f09e6278a874285 | [
"MIT"
] | null | null | null | smartRemotes/imports/bt-ble/gapAdvertise.py | HeadHodge/smartRemotes-v0.2 | 9a3c671dc3c000a911b55ebe2f09e6278a874285 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# SPDX-License-Identifier: LGPL-2.1-or-later
from __future__ import print_function
import argparse
import dbus
import dbus.exceptions
import dbus.mainloop.glib
import dbus.service
import time
import threading
try:
from gi.repository import GObject # python3
except ImportError:
import gobject as GObject # python2
mainloop = None
BLUEZ_SERVICE_NAME = 'org.bluez'
LE_ADVERTISING_MANAGER_IFACE = 'org.bluez.LEAdvertisingManager1'
DBUS_OM_IFACE = 'org.freedesktop.DBus.ObjectManager'
DBUS_PROP_IFACE = 'org.freedesktop.DBus.Properties'
LE_ADVERTISEMENT_IFACE = 'org.bluez.LEAdvertisement1'
class InvalidArgsException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.freedesktop.DBus.Error.InvalidArgs'
class NotSupportedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.NotSupported'
class NotPermittedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.NotPermitted'
class InvalidValueLengthException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.InvalidValueLength'
class FailedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.Failed'
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
BUS_NAME = 'org.bluez'
AGENT_INTERFACE = 'org.bluez.Agent1'
AGENT_PATH = "/org/bluez/justWorks/agent"
AGENT_CAPABILITY = "NoInputNoOutput"
bus = None
device_obj = None
dev_path = None
def ask(prompt):
try:
return raw_input(prompt)
except:
return input(prompt)
def set_trusted(path):
props = dbus.Interface(bus.get_object("org.bluez", path),
"org.freedesktop.DBus.Properties")
props.Set("org.bluez.Device1", "Trusted", True)
def dev_connect(path):
dev = dbus.Interface(bus.get_object("org.bluez", path),
"org.bluez.Device1")
dev.Connect()
class Rejected(dbus.DBusException):
_dbus_error_name = "org.bluez.Error.Rejected"
def pair_reply():
print("Device paired")
set_trusted(dev_path)
dev_connect(dev_path)
mainloop.quit()
def pair_error(error):
err_name = error.get_dbus_name()
if err_name == "org.freedesktop.DBus.Error.NoReply" and device_obj:
print("Timed out. Cancelling pairing")
device_obj.CancelPairing()
else:
print("Creating device failed: %s" % (error))
def register_ad_cb():
print('Advertisement registered')
def register_ad_error_cb(error):
print('Failed to register advertisement: ' + str(error))
mainloop.quit()
def find_adapter(bus):
remote_om = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, '/'),
DBUS_OM_IFACE)
objects = remote_om.GetManagedObjects()
for o, props in objects.items():
if LE_ADVERTISING_MANAGER_IFACE in props:
return o
return None
def shutdown(timeout):
print('Advertising for {} seconds...'.format(timeout))
time.sleep(timeout)
mainloop.quit()
class Agent(dbus.service.Object):
exit_on_release = True
def set_exit_on_release(self, exit_on_release):
self.exit_on_release = exit_on_release
@dbus.service.method(AGENT_INTERFACE,
in_signature="", out_signature="")
def Release(self):
print("Release")
if self.exit_on_release:
mainloop.quit()
@dbus.service.method(AGENT_INTERFACE,
in_signature="os", out_signature="")
def AuthorizeService(self, device, uuid):
print("AuthorizeService (%s, %s)" % (device, uuid))
authorize = ask("Authorize connection (yes/no): ")
if (authorize == "yes"):
return
raise Rejected("Connection rejected by user")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="s")
def RequestPinCode(self, device):
print("RequestPinCode (%s)" % (device))
set_trusted(device)
return ask("Enter PIN Code: ")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="u")
def RequestPasskey(self, device):
print("RequestPasskey (%s)" % (device))
set_trusted(device)
passkey = ask("Enter passkey: ")
return dbus.UInt32(passkey)
@dbus.service.method(AGENT_INTERFACE,
in_signature="ouq", out_signature="")
def DisplayPasskey(self, device, passkey, entered):
print("DisplayPasskey (%s, %06u entered %u)" %
(device, passkey, entered))
@dbus.service.method(AGENT_INTERFACE,
in_signature="os", out_signature="")
def DisplayPinCode(self, device, pincode):
print("DisplayPinCode (%s, %s)" % (device, pincode))
@dbus.service.method(AGENT_INTERFACE,
in_signature="ou", out_signature="")
def RequestConfirmation(self, device, passkey):
print("RequestConfirmation (%s, %06d)" % (device, passkey))
confirm = ask("Confirm passkey (yes/no): ")
if (confirm == "yes"):
set_trusted(device)
return
raise Rejected("Passkey doesn't match")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="")
def RequestAuthorization(self, device):
print("RequestAuthorization (%s)" % (device))
auth = ask("Authorize? (yes/no): ")
if (auth == "yes"):
return
raise Rejected("Pairing rejected")
@dbus.service.method(AGENT_INTERFACE,
in_signature="", out_signature="")
def Cancel(self):
print("Cancel")
class Advertisement(dbus.service.Object):
PATH_BASE = '/org/bluez/example/advertisement'
def __init__(self, bus, index, advertising_type):
self.path = self.PATH_BASE + str(index)
self.bus = bus
self.ad_type = advertising_type
self.service_uuids = None
self.manufacturer_data = None
self.solicit_uuids = None
self.service_data = None
self.local_name = None
self.include_tx_power = False
self.data = None
dbus.service.Object.__init__(self, bus, self.path)
def get_properties(self):
properties = dict()
properties['Type'] = self.ad_type
if self.service_uuids is not None:
properties['ServiceUUIDs'] = dbus.Array(self.service_uuids,
signature='s')
if self.solicit_uuids is not None:
properties['SolicitUUIDs'] = dbus.Array(self.solicit_uuids,
signature='s')
if self.manufacturer_data is not None:
properties['ManufacturerData'] = dbus.Dictionary(
self.manufacturer_data, signature='qv')
if self.service_data is not None:
properties['ServiceData'] = dbus.Dictionary(self.service_data,
signature='sv')
if self.local_name is not None:
properties['LocalName'] = dbus.String(self.local_name)
properties['Appearance'] = dbus.UInt16(961)
properties['Discoverable'] = dbus.Boolean(True)
properties['DiscoverableTimeout'] = dbus.UInt16(0)
if self.include_tx_power:
properties['Includes'] = dbus.Array(["tx-power"], signature='s')
if self.data is not None:
properties['Data'] = dbus.Dictionary(
self.data, signature='yv')
return {LE_ADVERTISEMENT_IFACE: properties}
def get_path(self):
return dbus.ObjectPath(self.path)
def add_service_uuid(self, uuid):
if not self.service_uuids:
self.service_uuids = []
self.service_uuids.append(uuid)
def add_solicit_uuid(self, uuid):
if not self.solicit_uuids:
self.solicit_uuids = []
self.solicit_uuids.append(uuid)
def add_manufacturer_data(self, manuf_code, data):
if not self.manufacturer_data:
self.manufacturer_data = dbus.Dictionary({}, signature='qv')
self.manufacturer_data[manuf_code] = dbus.Array(data, signature='y')
def add_service_data(self, uuid, data):
if not self.service_data:
self.service_data = dbus.Dictionary({}, signature='sv')
self.service_data[uuid] = dbus.Array(data, signature='y')
def add_local_name(self, name):
if not self.local_name:
self.local_name = ""
self.local_name = dbus.String(name)
def add_data(self, ad_type, data):
if not self.data:
self.data = dbus.Dictionary({}, signature='yv')
self.data[ad_type] = dbus.Array(data, signature='y')
@dbus.service.method(DBUS_PROP_IFACE,
in_signature='s',
out_signature='a{sv}')
def GetAll(self, interface):
print('GetAll')
if interface != LE_ADVERTISEMENT_IFACE:
raise InvalidArgsException()
print('returning props')
return self.get_properties()[LE_ADVERTISEMENT_IFACE]
@dbus.service.method(LE_ADVERTISEMENT_IFACE,
in_signature='',
out_signature='')
def Release(self):
print('%s: Released!' % self.path)
class TestAdvertisement(Advertisement):
def __init__(self, bus, index):
Advertisement.__init__(self, bus, index, 'peripheral')
#self.add_service_uuid('180D')
#self.add_service_uuid('180F')
#self.add_manufacturer_data(0xffff, [0x00, 0x01, 0x02, 0x03])
#self.add_service_data('9999', [0x00, 0x01, 0x02, 0x03, 0x04])
self.add_local_name('TestAdvertisement')
self.include_tx_power = True
#self.add_data(0x26, [0x01, 0x01, 0x00])
def main(timeout=0):
global mainloop
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
adapter = find_adapter(bus)
if not adapter:
print('LEAdvertisingManager1 interface not found')
return
path = AGENT_PATH
capability = AGENT_CAPABILITY
agent = Agent(bus, path)
obj = bus.get_object(BUS_NAME, "/org/bluez");
manager = dbus.Interface(obj, "org.bluez.AgentManager1")
manager.RegisterAgent(path, capability)
manager.RequestDefaultAgent(path)
print("Agent registered")
adapter_props = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, adapter),
"org.freedesktop.DBus.Properties")
adapter_props.Set("org.bluez.Adapter1", "Powered", dbus.Boolean(1))
ad_manager = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, adapter),
LE_ADVERTISING_MANAGER_IFACE)
test_advertisement = TestAdvertisement(bus, 0)
mainloop = GObject.MainLoop()
ad_manager.RegisterAdvertisement(test_advertisement.get_path(), {},
reply_handler=register_ad_cb,
error_handler=register_ad_error_cb)
if timeout > 0:
threading.Thread(target=shutdown, args=(timeout,)).start()
else:
print('Advertising forever...')
mainloop.run() # blocks until mainloop.quit() is called
ad_manager.UnregisterAdvertisement(test_advertisement)
print('Advertisement unregistered')
dbus.service.Object.remove_from_connection(test_advertisement)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--timeout', default=0, type=int, help="advertise " +
"for this many seconds then stop, 0=run forever " +
"(default: 0)")
args = parser.parse_args()
main(args.timeout)
| 30.550136 | 79 | 0.665661 |
from __future__ import print_function
import argparse
import dbus
import dbus.exceptions
import dbus.mainloop.glib
import dbus.service
import time
import threading
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
mainloop = None
BLUEZ_SERVICE_NAME = 'org.bluez'
LE_ADVERTISING_MANAGER_IFACE = 'org.bluez.LEAdvertisingManager1'
DBUS_OM_IFACE = 'org.freedesktop.DBus.ObjectManager'
DBUS_PROP_IFACE = 'org.freedesktop.DBus.Properties'
LE_ADVERTISEMENT_IFACE = 'org.bluez.LEAdvertisement1'
class InvalidArgsException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.freedesktop.DBus.Error.InvalidArgs'
class NotSupportedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.NotSupported'
class NotPermittedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.NotPermitted'
class InvalidValueLengthException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.InvalidValueLength'
class FailedException(dbus.exceptions.DBusException):
_dbus_error_name = 'org.bluez.Error.Failed'
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
BUS_NAME = 'org.bluez'
AGENT_INTERFACE = 'org.bluez.Agent1'
AGENT_PATH = "/org/bluez/justWorks/agent"
AGENT_CAPABILITY = "NoInputNoOutput"
bus = None
device_obj = None
dev_path = None
def ask(prompt):
try:
return raw_input(prompt)
except:
return input(prompt)
def set_trusted(path):
props = dbus.Interface(bus.get_object("org.bluez", path),
"org.freedesktop.DBus.Properties")
props.Set("org.bluez.Device1", "Trusted", True)
def dev_connect(path):
dev = dbus.Interface(bus.get_object("org.bluez", path),
"org.bluez.Device1")
dev.Connect()
class Rejected(dbus.DBusException):
_dbus_error_name = "org.bluez.Error.Rejected"
def pair_reply():
print("Device paired")
set_trusted(dev_path)
dev_connect(dev_path)
mainloop.quit()
def pair_error(error):
err_name = error.get_dbus_name()
if err_name == "org.freedesktop.DBus.Error.NoReply" and device_obj:
print("Timed out. Cancelling pairing")
device_obj.CancelPairing()
else:
print("Creating device failed: %s" % (error))
def register_ad_cb():
print('Advertisement registered')
def register_ad_error_cb(error):
print('Failed to register advertisement: ' + str(error))
mainloop.quit()
def find_adapter(bus):
remote_om = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, '/'),
DBUS_OM_IFACE)
objects = remote_om.GetManagedObjects()
for o, props in objects.items():
if LE_ADVERTISING_MANAGER_IFACE in props:
return o
return None
def shutdown(timeout):
print('Advertising for {} seconds...'.format(timeout))
time.sleep(timeout)
mainloop.quit()
class Agent(dbus.service.Object):
exit_on_release = True
def set_exit_on_release(self, exit_on_release):
self.exit_on_release = exit_on_release
@dbus.service.method(AGENT_INTERFACE,
in_signature="", out_signature="")
def Release(self):
print("Release")
if self.exit_on_release:
mainloop.quit()
@dbus.service.method(AGENT_INTERFACE,
in_signature="os", out_signature="")
def AuthorizeService(self, device, uuid):
print("AuthorizeService (%s, %s)" % (device, uuid))
authorize = ask("Authorize connection (yes/no): ")
if (authorize == "yes"):
return
raise Rejected("Connection rejected by user")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="s")
def RequestPinCode(self, device):
print("RequestPinCode (%s)" % (device))
set_trusted(device)
return ask("Enter PIN Code: ")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="u")
def RequestPasskey(self, device):
print("RequestPasskey (%s)" % (device))
set_trusted(device)
passkey = ask("Enter passkey: ")
return dbus.UInt32(passkey)
@dbus.service.method(AGENT_INTERFACE,
in_signature="ouq", out_signature="")
def DisplayPasskey(self, device, passkey, entered):
print("DisplayPasskey (%s, %06u entered %u)" %
(device, passkey, entered))
@dbus.service.method(AGENT_INTERFACE,
in_signature="os", out_signature="")
def DisplayPinCode(self, device, pincode):
print("DisplayPinCode (%s, %s)" % (device, pincode))
@dbus.service.method(AGENT_INTERFACE,
in_signature="ou", out_signature="")
def RequestConfirmation(self, device, passkey):
print("RequestConfirmation (%s, %06d)" % (device, passkey))
confirm = ask("Confirm passkey (yes/no): ")
if (confirm == "yes"):
set_trusted(device)
return
raise Rejected("Passkey doesn't match")
@dbus.service.method(AGENT_INTERFACE,
in_signature="o", out_signature="")
def RequestAuthorization(self, device):
print("RequestAuthorization (%s)" % (device))
auth = ask("Authorize? (yes/no): ")
if (auth == "yes"):
return
raise Rejected("Pairing rejected")
@dbus.service.method(AGENT_INTERFACE,
in_signature="", out_signature="")
def Cancel(self):
print("Cancel")
class Advertisement(dbus.service.Object):
PATH_BASE = '/org/bluez/example/advertisement'
def __init__(self, bus, index, advertising_type):
self.path = self.PATH_BASE + str(index)
self.bus = bus
self.ad_type = advertising_type
self.service_uuids = None
self.manufacturer_data = None
self.solicit_uuids = None
self.service_data = None
self.local_name = None
self.include_tx_power = False
self.data = None
dbus.service.Object.__init__(self, bus, self.path)
def get_properties(self):
properties = dict()
properties['Type'] = self.ad_type
if self.service_uuids is not None:
properties['ServiceUUIDs'] = dbus.Array(self.service_uuids,
signature='s')
if self.solicit_uuids is not None:
properties['SolicitUUIDs'] = dbus.Array(self.solicit_uuids,
signature='s')
if self.manufacturer_data is not None:
properties['ManufacturerData'] = dbus.Dictionary(
self.manufacturer_data, signature='qv')
if self.service_data is not None:
properties['ServiceData'] = dbus.Dictionary(self.service_data,
signature='sv')
if self.local_name is not None:
properties['LocalName'] = dbus.String(self.local_name)
properties['Appearance'] = dbus.UInt16(961)
properties['Discoverable'] = dbus.Boolean(True)
properties['DiscoverableTimeout'] = dbus.UInt16(0)
if self.include_tx_power:
properties['Includes'] = dbus.Array(["tx-power"], signature='s')
if self.data is not None:
properties['Data'] = dbus.Dictionary(
self.data, signature='yv')
return {LE_ADVERTISEMENT_IFACE: properties}
def get_path(self):
return dbus.ObjectPath(self.path)
def add_service_uuid(self, uuid):
if not self.service_uuids:
self.service_uuids = []
self.service_uuids.append(uuid)
def add_solicit_uuid(self, uuid):
if not self.solicit_uuids:
self.solicit_uuids = []
self.solicit_uuids.append(uuid)
def add_manufacturer_data(self, manuf_code, data):
if not self.manufacturer_data:
self.manufacturer_data = dbus.Dictionary({}, signature='qv')
self.manufacturer_data[manuf_code] = dbus.Array(data, signature='y')
def add_service_data(self, uuid, data):
if not self.service_data:
self.service_data = dbus.Dictionary({}, signature='sv')
self.service_data[uuid] = dbus.Array(data, signature='y')
def add_local_name(self, name):
if not self.local_name:
self.local_name = ""
self.local_name = dbus.String(name)
def add_data(self, ad_type, data):
if not self.data:
self.data = dbus.Dictionary({}, signature='yv')
self.data[ad_type] = dbus.Array(data, signature='y')
@dbus.service.method(DBUS_PROP_IFACE,
in_signature='s',
out_signature='a{sv}')
def GetAll(self, interface):
print('GetAll')
if interface != LE_ADVERTISEMENT_IFACE:
raise InvalidArgsException()
print('returning props')
return self.get_properties()[LE_ADVERTISEMENT_IFACE]
@dbus.service.method(LE_ADVERTISEMENT_IFACE,
in_signature='',
out_signature='')
def Release(self):
print('%s: Released!' % self.path)
class TestAdvertisement(Advertisement):
def __init__(self, bus, index):
Advertisement.__init__(self, bus, index, 'peripheral')
#self.add_service_uuid('180D')
#self.add_service_uuid('180F')
#self.add_manufacturer_data(0xffff, [0x00, 0x01, 0x02, 0x03])
#self.add_service_data('9999', [0x00, 0x01, 0x02, 0x03, 0x04])
self.add_local_name('TestAdvertisement')
self.include_tx_power = True
#self.add_data(0x26, [0x01, 0x01, 0x00])
def main(timeout=0):
global mainloop
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
adapter = find_adapter(bus)
if not adapter:
print('LEAdvertisingManager1 interface not found')
return
path = AGENT_PATH
capability = AGENT_CAPABILITY
agent = Agent(bus, path)
obj = bus.get_object(BUS_NAME, "/org/bluez");
manager = dbus.Interface(obj, "org.bluez.AgentManager1")
manager.RegisterAgent(path, capability)
manager.RequestDefaultAgent(path)
print("Agent registered")
adapter_props = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, adapter),
"org.freedesktop.DBus.Properties")
adapter_props.Set("org.bluez.Adapter1", "Powered", dbus.Boolean(1))
ad_manager = dbus.Interface(bus.get_object(BLUEZ_SERVICE_NAME, adapter),
LE_ADVERTISING_MANAGER_IFACE)
test_advertisement = TestAdvertisement(bus, 0)
mainloop = GObject.MainLoop()
ad_manager.RegisterAdvertisement(test_advertisement.get_path(), {},
reply_handler=register_ad_cb,
error_handler=register_ad_error_cb)
if timeout > 0:
threading.Thread(target=shutdown, args=(timeout,)).start()
else:
print('Advertising forever...')
mainloop.run() # blocks until mainloop.quit() is called
ad_manager.UnregisterAdvertisement(test_advertisement)
print('Advertisement unregistered')
dbus.service.Object.remove_from_connection(test_advertisement)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--timeout', default=0, type=int, help="advertise " +
"for this many seconds then stop, 0=run forever " +
"(default: 0)")
args = parser.parse_args()
main(args.timeout)
| true | true |
f735a4d7d2546a807d88f3af2e34e631513c4dc3 | 1,102 | py | Python | Trabalhando com Data e Hora/manipulando_data_hora.py | GlennSR/Curso-Python-Udemy | f97ae77deeed0c66607b3e1346a5fc20730e04a5 | [
"MIT"
] | null | null | null | Trabalhando com Data e Hora/manipulando_data_hora.py | GlennSR/Curso-Python-Udemy | f97ae77deeed0c66607b3e1346a5fc20730e04a5 | [
"MIT"
] | null | null | null | Trabalhando com Data e Hora/manipulando_data_hora.py | GlennSR/Curso-Python-Udemy | f97ae77deeed0c66607b3e1346a5fc20730e04a5 | [
"MIT"
] | null | null | null | '''
Manipulando Data e Hora
Python tem um módulo built-in para se trabalhar com data e hora chamado datetime
'''
import datetime
print(dir(datetime))
# Retorna a data e hora corrente
print(datetime.datetime.now())
# datetime.datetime(YYYY, MM, DD, Hour, Minute, Second, microsecond)
print(repr(datetime.datetime.now()))
# replace() para ajustar a data/hora
inicio = datetime.datetime.now()
print(inicio)
# Alterar o horário para 16 h, 0 min, 0 s, 0 microsecond
inicio = inicio.replace(hour=16, minute=0, second=0, microsecond=0)
print(inicio)
print()
# Recebendo dados do usuário e convertendo para data
nascimento = input('Digite a sua data de nascimento (DD/MM/AAAA): ').split('/')
nascimento = datetime.datetime(int(nascimento[2]), int(nascimento[1]), int(nascimento[0]))
print(nascimento)
print(type(nascimento))
# Acesso individual dos elementos de data e hora
evento = datetime.datetime.now()
print()
print(evento.year) # ano
print(evento.month) # mês
print(evento.day) # dia
print(evento.hour) # hora
print(evento.minute)
# e assim em seguida até microsegundos
print(dir(evento))
| 25.045455 | 90 | 0.741379 |
import datetime
print(dir(datetime))
print(datetime.datetime.now())
print(repr(datetime.datetime.now()))
inicio = datetime.datetime.now()
print(inicio)
inicio = inicio.replace(hour=16, minute=0, second=0, microsecond=0)
print(inicio)
print()
nascimento = input('Digite a sua data de nascimento (DD/MM/AAAA): ').split('/')
nascimento = datetime.datetime(int(nascimento[2]), int(nascimento[1]), int(nascimento[0]))
print(nascimento)
print(type(nascimento))
evento = datetime.datetime.now()
print()
print(evento.year)
print(evento.month)
print(evento.day)
print(evento.hour)
print(evento.minute)
print(dir(evento))
| true | true |
f735a6cbc7a8b59b969dc938e7b1436ece8b6720 | 2,387 | py | Python | Lab-5-SimulatedAnnealing/puzzle_utils.py | Abhishek-dev2/Artificial-Intelligence-Lab | c66778eefcabc6b21fa1f19f641987ac8a7a455a | [
"MIT"
] | 2 | 2020-12-16T07:43:03.000Z | 2021-11-06T12:45:18.000Z | Lab-5-SimulatedAnnealing/puzzle_utils.py | as2d3/Artificial-Intelligence-Lab | c66778eefcabc6b21fa1f19f641987ac8a7a455a | [
"MIT"
] | null | null | null | Lab-5-SimulatedAnnealing/puzzle_utils.py | as2d3/Artificial-Intelligence-Lab | c66778eefcabc6b21fa1f19f641987ac8a7a455a | [
"MIT"
] | 3 | 2020-12-16T07:44:31.000Z | 2021-09-14T17:44:09.000Z | import os
import math
def print_simulated_annealing(start, goal, parent_list, optimal_path_cost, string_to_matrix_mapping, number_states_explored):
if optimal_path_cost > 0:
print("Goal found successfully.")
else:
print("Goal NOT found")
print("Start state: ")
print_configuration(start)
print("\nGoal state: ")
print_configuration(goal)
print(f'Total configurations explored: {number_states_explored}')
if optimal_path_cost > 0:
print('printing path...')
print_optimal_path(parent_list, 0,
goal, start, string_to_matrix_mapping, 1)
print_configuration(goal)
elif optimal_path_cost == 0:
print("Total number of states on optimal path:", 1)
print_configuration(goal)
print(" v ")
print_configuration(goal)
def file_input(directory, args):
start = []
goal = []
if len(args) < 2:
print("Please add input file name to the python run command.")
exit(0)
try:
input_file = open(os.path.join(directory, args[1]))
input_data = input_file.readlines()
start = convert_to_matrix(input_data[1], input_data[2], input_data[3])
goal = convert_to_matrix(input_data[6], input_data[7], input_data[8])
input_file.close()
except IOError:
print("ERROR : IOERROR occurred while opening file")
exit(0)
return start, goal
def convert_to_matrix(row1, row2, row3):
matrix = [[int(x) for x in row1.split()]]
matrix.append([int(x) for x in row2.split()])
matrix.append([int(x) for x in row3.split()])
return matrix
def print_configuration(matrix):
for row in matrix:
for val in row:
print(val, end=" ")
print()
def print_optimal_path(parent_list, optimal_path_len, goal, start, string_to_matrix_mapping, total_states_on_optimal_path):
# print('yo')
if goal == start:
print("Total number of states on optimal path:",
total_states_on_optimal_path)
else:
node = parent_list[''.join(str(val) for row in goal for val in row)]
node = string_to_matrix_mapping[node]
print_optimal_path(parent_list, optimal_path_len,
node, start, string_to_matrix_mapping, total_states_on_optimal_path + 1)
print_configuration(node)
print(" v ")
| 33.152778 | 125 | 0.645999 | import os
import math
def print_simulated_annealing(start, goal, parent_list, optimal_path_cost, string_to_matrix_mapping, number_states_explored):
if optimal_path_cost > 0:
print("Goal found successfully.")
else:
print("Goal NOT found")
print("Start state: ")
print_configuration(start)
print("\nGoal state: ")
print_configuration(goal)
print(f'Total configurations explored: {number_states_explored}')
if optimal_path_cost > 0:
print('printing path...')
print_optimal_path(parent_list, 0,
goal, start, string_to_matrix_mapping, 1)
print_configuration(goal)
elif optimal_path_cost == 0:
print("Total number of states on optimal path:", 1)
print_configuration(goal)
print(" v ")
print_configuration(goal)
def file_input(directory, args):
start = []
goal = []
if len(args) < 2:
print("Please add input file name to the python run command.")
exit(0)
try:
input_file = open(os.path.join(directory, args[1]))
input_data = input_file.readlines()
start = convert_to_matrix(input_data[1], input_data[2], input_data[3])
goal = convert_to_matrix(input_data[6], input_data[7], input_data[8])
input_file.close()
except IOError:
print("ERROR : IOERROR occurred while opening file")
exit(0)
return start, goal
def convert_to_matrix(row1, row2, row3):
matrix = [[int(x) for x in row1.split()]]
matrix.append([int(x) for x in row2.split()])
matrix.append([int(x) for x in row3.split()])
return matrix
def print_configuration(matrix):
for row in matrix:
for val in row:
print(val, end=" ")
print()
def print_optimal_path(parent_list, optimal_path_len, goal, start, string_to_matrix_mapping, total_states_on_optimal_path):
if goal == start:
print("Total number of states on optimal path:",
total_states_on_optimal_path)
else:
node = parent_list[''.join(str(val) for row in goal for val in row)]
node = string_to_matrix_mapping[node]
print_optimal_path(parent_list, optimal_path_len,
node, start, string_to_matrix_mapping, total_states_on_optimal_path + 1)
print_configuration(node)
print(" v ")
| true | true |
f735a947169f85c3ec07ebdedc271ad1cfae990a | 4,418 | py | Python | ckan/cli/user.py | doc22940/ckan | fb0174b77a5ac1c614717643d9b1b2a0c82ee088 | [
"Apache-2.0"
] | 1 | 2020-02-08T16:16:51.000Z | 2020-02-08T16:16:51.000Z | ckan/cli/user.py | doc22940/ckan | fb0174b77a5ac1c614717643d9b1b2a0c82ee088 | [
"Apache-2.0"
] | null | null | null | ckan/cli/user.py | doc22940/ckan | fb0174b77a5ac1c614717643d9b1b2a0c82ee088 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
import logging
import sys
from pprint import pprint
import six
import click
from six import text_type
import ckan.logic as logic
import ckan.plugins as plugin
from ckan.cli import error_shout
log = logging.getLogger(__name__)
@click.group(name=u'user', short_help=u'Manage user commands')
@click.help_option(u'-h', u'--help')
def user():
pass
@user.command(u'add', short_help=u'Add new user')
@click.argument(u'username')
@click.argument(u'args', nargs=-1)
@click.pass_context
def add_user(ctx, username, args):
u'''Add new user if we use ckan sysadmin add
or ckan user add
'''
# parse args into data_dict
data_dict = {u'name': username}
for arg in args:
try:
field, value = arg.split(u'=', 1)
data_dict[field] = value
except ValueError:
raise ValueError(
u'Could not parse arg: %r (expected "<option>=<value>)"' % arg
)
# Required
if u'email' not in data_dict:
data_dict['email'] = click.prompt(u'Email address ').strip()
if u'password' not in data_dict:
data_dict['password'] = click.prompt(u'Password ', hide_input=True,
confirmation_prompt=True)
# Optional
if u'fullname' in data_dict:
data_dict['fullname'] = data_dict['fullname'].decode(
sys.getfilesystemencoding()
)
# pprint(u'Creating user: %r' % username)
try:
import ckan.logic as logic
import ckan.model as model
site_user = logic.get_action(u'get_site_user')({
u'model': model,
u'ignore_auth': True},
{}
)
context = {
u'model': model,
u'session': model.Session,
u'ignore_auth': True,
u'user': site_user['name'],
}
flask_app = ctx.meta['flask_app']
# Current user is tested agains sysadmin role during model
# dictization, thus we need request context
with flask_app.test_request_context():
user_dict = logic.get_action(u'user_create')(context, data_dict)
click.secho(u"Successfully created user: %s" % user_dict['name'],
fg=u'green', bold=True)
except logic.ValidationError as e:
error_shout(e)
raise click.Abort()
def get_user_str(user):
user_str = u'name=%s' % user.name
if user.name != user.display_name:
user_str += u' display=%s' % user.display_name
return user_str
@user.command(u'list', short_help=u'List all users')
def list_users():
import ckan.model as model
click.secho(u'Users:')
users = model.Session.query(model.User).filter_by(state=u'active')
click.secho(u'count = %i' % users.count())
for user in users:
click.secho(get_user_str(user))
@user.command(u'remove', short_help=u'Remove user')
@click.argument(u'username')
@click.pass_context
def remove_user(ctx, username):
import ckan.model as model
if not username:
error_shout(u'Please specify the username to be removed')
return
site_user = logic.get_action(u'get_site_user')({u'ignore_auth': True}, {})
context = {u'user': site_user[u'name']}
with ctx.meta['flask_app'].test_request_context():
plugin.toolkit.get_action(u'user_delete')(context, {u'id': username})
click.secho(u'Deleted user: %s' % username, fg=u'green', bold=True)
@user.command(u'show', short_help=u'Show user')
@click.argument(u'username')
def show_user(username):
import ckan.model as model
if not username:
error_shout(u'Please specify the username for the user')
return
user = model.User.get(text_type(username))
click.secho(u'User: %s' % user)
@user.command(u'setpass', short_help=u'Set password for the user')
@click.argument(u'username')
def set_password(username):
import ckan.model as model
if not username:
error_shout(u'Need name of the user.')
return
user = model.User.get(username)
if not user:
error_shout(u"User not found!")
return
click.secho(u'Editing user: %r' % user.name, fg=u'yellow')
password = click.prompt(u'Password', hide_input=True,
confirmation_prompt=True)
user.password = password
model.repo.commit_and_remove()
click.secho(u'Password updated!', fg=u'green', bold=True)
| 30.054422 | 78 | 0.630602 |
import logging
import sys
from pprint import pprint
import six
import click
from six import text_type
import ckan.logic as logic
import ckan.plugins as plugin
from ckan.cli import error_shout
log = logging.getLogger(__name__)
@click.group(name=u'user', short_help=u'Manage user commands')
@click.help_option(u'-h', u'--help')
def user():
pass
@user.command(u'add', short_help=u'Add new user')
@click.argument(u'username')
@click.argument(u'args', nargs=-1)
@click.pass_context
def add_user(ctx, username, args):
data_dict = {u'name': username}
for arg in args:
try:
field, value = arg.split(u'=', 1)
data_dict[field] = value
except ValueError:
raise ValueError(
u'Could not parse arg: %r (expected "<option>=<value>)"' % arg
)
if u'email' not in data_dict:
data_dict['email'] = click.prompt(u'Email address ').strip()
if u'password' not in data_dict:
data_dict['password'] = click.prompt(u'Password ', hide_input=True,
confirmation_prompt=True)
if u'fullname' in data_dict:
data_dict['fullname'] = data_dict['fullname'].decode(
sys.getfilesystemencoding()
)
try:
import ckan.logic as logic
import ckan.model as model
site_user = logic.get_action(u'get_site_user')({
u'model': model,
u'ignore_auth': True},
{}
)
context = {
u'model': model,
u'session': model.Session,
u'ignore_auth': True,
u'user': site_user['name'],
}
flask_app = ctx.meta['flask_app']
with flask_app.test_request_context():
user_dict = logic.get_action(u'user_create')(context, data_dict)
click.secho(u"Successfully created user: %s" % user_dict['name'],
fg=u'green', bold=True)
except logic.ValidationError as e:
error_shout(e)
raise click.Abort()
def get_user_str(user):
user_str = u'name=%s' % user.name
if user.name != user.display_name:
user_str += u' display=%s' % user.display_name
return user_str
@user.command(u'list', short_help=u'List all users')
def list_users():
import ckan.model as model
click.secho(u'Users:')
users = model.Session.query(model.User).filter_by(state=u'active')
click.secho(u'count = %i' % users.count())
for user in users:
click.secho(get_user_str(user))
@user.command(u'remove', short_help=u'Remove user')
@click.argument(u'username')
@click.pass_context
def remove_user(ctx, username):
import ckan.model as model
if not username:
error_shout(u'Please specify the username to be removed')
return
site_user = logic.get_action(u'get_site_user')({u'ignore_auth': True}, {})
context = {u'user': site_user[u'name']}
with ctx.meta['flask_app'].test_request_context():
plugin.toolkit.get_action(u'user_delete')(context, {u'id': username})
click.secho(u'Deleted user: %s' % username, fg=u'green', bold=True)
@user.command(u'show', short_help=u'Show user')
@click.argument(u'username')
def show_user(username):
import ckan.model as model
if not username:
error_shout(u'Please specify the username for the user')
return
user = model.User.get(text_type(username))
click.secho(u'User: %s' % user)
@user.command(u'setpass', short_help=u'Set password for the user')
@click.argument(u'username')
def set_password(username):
import ckan.model as model
if not username:
error_shout(u'Need name of the user.')
return
user = model.User.get(username)
if not user:
error_shout(u"User not found!")
return
click.secho(u'Editing user: %r' % user.name, fg=u'yellow')
password = click.prompt(u'Password', hide_input=True,
confirmation_prompt=True)
user.password = password
model.repo.commit_and_remove()
click.secho(u'Password updated!', fg=u'green', bold=True)
| true | true |
f735a9e0967ace13566be5b598fd75aff4b94dba | 167 | py | Python | src/setup.py | nineinchnick/pystok-fdw | 47b94054e3d3f3c27ae0a43aac34f4af3c9e0216 | [
"MIT"
] | null | null | null | src/setup.py | nineinchnick/pystok-fdw | 47b94054e3d3f3c27ae0a43aac34f4af3c9e0216 | [
"MIT"
] | null | null | null | src/setup.py | nineinchnick/pystok-fdw | 47b94054e3d3f3c27ae0a43aac34f4af3c9e0216 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='pystok-fdw',
version='0.0.1',
author=u'Jan Waś',
license='MIT',
packages=['pystok-fdw']
)
| 15.181818 | 28 | 0.616766 |
from setuptools import setup
setup(
name='pystok-fdw',
version='0.0.1',
author=u'Jan Waś',
license='MIT',
packages=['pystok-fdw']
)
| true | true |
f735aa6deaafb3dd4088c283930aece8d645525f | 1,870 | py | Python | tests/test_likeness.py | herbeeg/famitracker-instrument-generator | c07f0cc97124195aeafbdfb248fb30d6e4e6c3f6 | [
"MIT"
] | null | null | null | tests/test_likeness.py | herbeeg/famitracker-instrument-generator | c07f0cc97124195aeafbdfb248fb30d6e4e6c3f6 | [
"MIT"
] | null | null | null | tests/test_likeness.py | herbeeg/famitracker-instrument-generator | c07f0cc97124195aeafbdfb248fb30d6e4e6c3f6 | [
"MIT"
] | null | null | null | import pytest
import wave.data.likeness as likeness
class TestLikenessFDS:
def testRoundedLikenessFDS(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(64)],
comparison=[0 for i in range(64)],
ceiling=64
)
assert 31.46 == likeness_instance.getLikeness()
def testExactRoundedLikenessFDS(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(64)],
comparison=[i for i in range(64)],
ceiling=64
)
assert 100.00 == likeness_instance.getLikeness()
def testSinglePercentageNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(64)],
comparison=[0 for i in range(64)],
ceiling=64
)
assert 100.00 == likeness_instance.getPercentage(0, 0)
assert 0.00 == likeness_instance.getPercentage(63, 63)
class TestLikenessNamco:
def testExactRoundedLikenessNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(16)],
comparison=[i for i in range(16)],
ceiling=16
)
assert 100.00 == likeness_instance.getLikeness()
def testRoundedLikenessNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(16)],
comparison=[0 for i in range(16)],
ceiling=16
)
assert 33.71 == likeness_instance.getLikeness()
def testSinglePercentageNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(16)],
comparison=[0 for i in range(16)],
ceiling=16
)
assert 100.00 == likeness_instance.getPercentage(0, 0)
assert 0.00 == likeness_instance.getPercentage(15, 15)
| 30.655738 | 62 | 0.606417 | import pytest
import wave.data.likeness as likeness
class TestLikenessFDS:
def testRoundedLikenessFDS(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(64)],
comparison=[0 for i in range(64)],
ceiling=64
)
assert 31.46 == likeness_instance.getLikeness()
def testExactRoundedLikenessFDS(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(64)],
comparison=[i for i in range(64)],
ceiling=64
)
assert 100.00 == likeness_instance.getLikeness()
def testSinglePercentageNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(64)],
comparison=[0 for i in range(64)],
ceiling=64
)
assert 100.00 == likeness_instance.getPercentage(0, 0)
assert 0.00 == likeness_instance.getPercentage(63, 63)
class TestLikenessNamco:
def testExactRoundedLikenessNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(16)],
comparison=[i for i in range(16)],
ceiling=16
)
assert 100.00 == likeness_instance.getLikeness()
def testRoundedLikenessNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(16)],
comparison=[0 for i in range(16)],
ceiling=16
)
assert 33.71 == likeness_instance.getLikeness()
def testSinglePercentageNamco(self):
likeness_instance = likeness.WaveLikeness(
base=[i for i in range(16)],
comparison=[0 for i in range(16)],
ceiling=16
)
assert 100.00 == likeness_instance.getPercentage(0, 0)
assert 0.00 == likeness_instance.getPercentage(15, 15)
| true | true |
f735ab9b4b3d13ffd1e045001731cea5346ade5e | 475 | py | Python | tests/datasets/test_tafeng.py | shiodat/Sequential-Recommendation-Datasets | 198c43962e5f15a9d99d0b17c156a9603d91a1c2 | [
"Apache-2.0"
] | 74 | 2020-08-19T15:57:03.000Z | 2022-03-20T08:41:40.000Z | tests/datasets/test_tafeng.py | shiodat/Sequential-Recommendation-Datasets | 198c43962e5f15a9d99d0b17c156a9603d91a1c2 | [
"Apache-2.0"
] | null | null | null | tests/datasets/test_tafeng.py | shiodat/Sequential-Recommendation-Datasets | 198c43962e5f15a9d99d0b17c156a9603d91a1c2 | [
"Apache-2.0"
] | 18 | 2020-08-05T07:52:32.000Z | 2022-03-17T16:53:48.000Z | import os
from srdatasets.datasets import TaFeng
from srdatasets.utils import __warehouse__
def test_download_and_trandform():
rawdir = __warehouse__.joinpath("TaFeng", "raw")
os.makedirs(rawdir, exist_ok=True)
tafeng = TaFeng(rawdir)
tafeng.download()
assert all(rawdir.joinpath(cf).exists() for cf in tafeng.__corefile__)
df = tafeng.transform()
assert all(c in df.columns for c in ["user_id", "item_id", "timestamp"])
assert len(df) > 0
| 29.6875 | 76 | 0.717895 | import os
from srdatasets.datasets import TaFeng
from srdatasets.utils import __warehouse__
def test_download_and_trandform():
rawdir = __warehouse__.joinpath("TaFeng", "raw")
os.makedirs(rawdir, exist_ok=True)
tafeng = TaFeng(rawdir)
tafeng.download()
assert all(rawdir.joinpath(cf).exists() for cf in tafeng.__corefile__)
df = tafeng.transform()
assert all(c in df.columns for c in ["user_id", "item_id", "timestamp"])
assert len(df) > 0
| true | true |
f735ad0a59f3f1c340dc45bb136c4f80e6a41251 | 807 | py | Python | medium/python3/c0185_368_largest-divisible-subset/00_leetcode_0185.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | null | null | null | medium/python3/c0185_368_largest-divisible-subset/00_leetcode_0185.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | null | null | null | medium/python3/c0185_368_largest-divisible-subset/00_leetcode_0185.py | drunkwater/leetcode | 8cc4a07763e71efbaedb523015f0c1eff2927f60 | [
"Ruby"
] | 3 | 2018-02-09T02:46:48.000Z | 2021-02-20T08:32:03.000Z | # DRUNKWATER TEMPLATE(add description and prototypes)
# Question Title and Description on leetcode.com
# Function Declaration and Function Prototypes on leetcode.com
#368. Largest Divisible Subset
#Given a set of distinct positive integers, find the largest subset such that every pair (Si, Sj) of elements in this subset satisfies: Si % Sj = 0 or Sj % Si = 0.
#If there are multiple solutions, return any subset is fine.
#Example 1:
#nums: [1,2,3]
#Result: [1,2] (of course, [1,3] will also be ok)
#Example 2:
#nums: [1,2,4,8]
#Result: [1,2,4,8]
#Credits:
#Special thanks to @Stomach_ache for adding this problem and creating all test cases.
#class Solution:
# def largestDivisibleSubset(self, nums):
# """
# :type nums: List[int]
# :rtype: List[int]
# """
# Time Is Money | 33.625 | 163 | 0.696406 |
# :type nums: List[int]
# :rtype: List[int]
# """
| true | true |
f735ade660f32bb64e0d8d758d07b55ab389ad04 | 7,154 | py | Python | src/2_robot_picam_test_NCS2_mobilenet.py | lesliewright1977/RPi3_NCS2 | 8aad7a4869980798dcb800dcf1a35ee26ba93c61 | [
"Apache-2.0"
] | 44 | 2019-01-09T12:15:08.000Z | 2022-01-30T05:16:29.000Z | src/2_robot_picam_test_NCS2_mobilenet.py | lesliewright1977/RPi3_NCS2 | 8aad7a4869980798dcb800dcf1a35ee26ba93c61 | [
"Apache-2.0"
] | 3 | 2019-01-25T18:32:11.000Z | 2020-09-11T21:05:38.000Z | src/2_robot_picam_test_NCS2_mobilenet.py | lesliewright1977/RPi3_NCS2 | 8aad7a4869980798dcb800dcf1a35ee26ba93c61 | [
"Apache-2.0"
] | 16 | 2019-01-25T10:50:16.000Z | 2021-08-11T12:39:24.000Z | import cv2
import time
import numpy
import random
from multiprocessing import Process
from multiprocessing import Queue
from picamera.array import PiRGBArray
from picamera import PiCamera
#hacked from:
#https://software.intel.com/articles/OpenVINO-Install-RaspberryPI
#https://opencv2-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
#https://github.com/PINTO0309/MobileNet-SSD-RealSense/blob/master/SingleStickSSDwithUSBCamera_OpenVINO_NCS2.py
#https://raspberrypi.stackexchange.com/questions/87062/overhead-counter
#Les Wright Dec 24 2018
#modified to support picam 30 Dec 2018
#Robot code incorportated on 17 Jan 2019
# import curses and GPIO
import RPi.GPIO as GPIO
#set GPIO numbering mode and define output pins
GPIO.setmode(GPIO.BCM)
GPIO.setup(18,GPIO.OUT) #Left track fwd
GPIO.setup(23,GPIO.OUT) #lefttrack backwards
GPIO.setup(24,GPIO.OUT) #right track backwards
GPIO.setup(25,GPIO.OUT) #right track fwd
def motion(xminQueue,xmaxQueue):
def left(stime):
GPIO.output(18,False)
GPIO.output(25,True)
GPIO.output(23,True)
GPIO.output(24,False)
sustain(stime)
def right(stime):
GPIO.output(18,True)
GPIO.output(25,False)
GPIO.output(23,False)
GPIO.output(24,True)
sustain(stime)
def sustain(stime):
time.sleep(stime)
stop()
def forward():
GPIO.output(18,True)
GPIO.output(25,True)
GPIO.output(23,False)
GPIO.output(24,False)
def backward():
GPIO.output(18,False)
GPIO.output(25,False)
GPIO.output(23,True)
GPIO.output(24,True)
def stop():
GPIO.output(18,False)
GPIO.output(25,False)
GPIO.output(23,False)
GPIO.output(24,False)
def hunt():
right(0.2)
stop()
stop()
start = time.time() #start a timer
while True:
if not xminQueue.empty():
xmin = xminQueue.get()
xmax = xmaxQueue.get()
#print(str(xmin)+' '+str(xmax))
midpoint = (xmin+xmax)/2
width = xmax-xmin
#print("M:"+str(midpoint))
#print("W:"+str(width))
stime = abs(150-midpoint)/3000
#print(str(stime))
#align midoint with middle of the frame
if midpoint < 130:
left(stime)
if midpoint > 170:
right(stime)
if width:
if width < 50:
forward()
elif width > 90:
backward()
else:
stop()
start = time.time() #reset the timer
if xminQueue.empty():
seconds = time.time()-start
if seconds > 0.8: #if we are empty for longer than 0.8 sec, we probably lost the target...
#print('Hunting...')
hunt()
start = time.time() #reset the timer
# initialize the input queue (frames), output queue (out),
# and the list of actual detections returned by the child process
xminQueue = Queue(maxsize=1)
xmaxQueue = Queue(maxsize=1)
# construct a child process indepedent from our main process
print("[INFO] starting motion handling process...")
p2 = Process(target=motion, args=(xminQueue,xmaxQueue))
p2.daemon = True
p2.start()
# Note cv2.dnn.blobFromImage, the size is present in the XML files, we could write a preamble to go get that data,
# Then we dont have to explicitly set it!
# Load the model
net = cv2.dnn.readNet('models/MobileNetSSD_deploy.xml', 'models/MobileNetSSD_deploy.bin')
# Specify target device
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
#Misc vars
font = cv2.FONT_HERSHEY_SIMPLEX
frameWidth = 320
frameHeight = 240
framesPerSec = 24
secPerFrame = 0.0
detections = 0.0
confThreshold = 0.5
#initialize the camera and grab a reference to the raw camera capture
#well this is interesting, we can closely match the input of the network!
#this 'seems' to have improved accuracy!
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 20
rawCapture = PiRGBArray(camera, size=(320, 240))
# allow the camera to warmup
time.sleep(0.1)
labels_file = 'models/labels.txt'
with open(labels_file, 'r') as f:
labels = [x.strip() for x in f]
#print(labels)
#define the function that handles our processing thread
def classify_frame(net, inputQueue, outputQueue):
# keep looping
while True:
# check to see if there is a frame in our input queue
if not inputQueue.empty():
# grab the frame from the input queue, resize it, and
# construct a blob from it
frame = inputQueue.get()
resframe = cv2.resize(frame, (300, 300))
blob = cv2.dnn.blobFromImage(resframe, 0.007843, size=(300, 300),\
mean=(127.5,127.5,127.5), swapRB=False, crop=False)
net.setInput(blob)
out = net.forward()
# write the detections to the output queue
outputQueue.put(out)
# initialize the input queue (frames), output queue (out),
# and the list of actual detections returned by the child process
inputQueue = Queue(maxsize=1)
outputQueue = Queue(maxsize=1)
out = None
# construct a child process *indepedent* from our main process of
# execution
print("[INFO] starting inference process...")
p = Process(target=classify_frame, args=(net,inputQueue,outputQueue,))
p.daemon = True
p.start()
print("[INFO] starting capture...")
#time the frame rate....
start = time.time()
frames = 0
for frame in camera.capture_continuous(rawCapture, format="rgb", use_video_port=True):
# Capture frame-by-frame
frame = frame.array
# if the input queue *is* empty, give the current frame to
# classify
if inputQueue.empty():
inputQueue.put(frame)
# if the output queue *is not* empty, grab the detections
if not outputQueue.empty():
out = outputQueue.get()
# check to see if 'out' is not empty
if out is not None:
# loop over the detections
# Draw detections on the frame
for detection in out.reshape(-1, 7):
confidence = float(detection[2])
obj_type = int(detection[1]-1)
xmin = int(detection[3] * frame.shape[1])
ymin = int(detection[4] * frame.shape[0])
xmax = int(detection[5] * frame.shape[1])
ymax = int(detection[6] * frame.shape[0])
#bottle = 4, person = 14 , dog = 11
if obj_type == 4: #Our object
if confidence > confThreshold:
#bounding box
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, 255, 255))
#label
cv2.rectangle(frame, (xmin-1, ymin-1),\
(xmin+70, ymin-10), (0,255,255), -1)
#labeltext
cv2.putText(frame,labels[obj_type]+' '+str(round(confidence,2)),\
(xmin,ymin-2), font, 0.3,(0,0,0),1,cv2.LINE_AA)
detections += 1
xmaxQueue.put(xmax)
xminQueue.put(xmin)
# Display the resulting frame
cv2.putText(frame,'Threshold: '+str(round(confThreshold,1)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0, 0, 0), 1, cv2.LINE_AA)
cv2.namedWindow('frame',cv2.WINDOW_NORMAL)
cv2.resizeWindow('frame',frameWidth,frameHeight)
cv2.imshow('frame',frame)
frames+=1
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
keyPress = cv2.waitKey(1)
if keyPress == 113:
break
if keyPress == 82:
confThreshold += 0.1
if keyPress == 84:
confThreshold -= 0.1
if confThreshold >1:
confThreshold = 1
if confThreshold <0:
confThreshold = 0
end = time.time()
seconds = end-start
fps = frames/seconds
print("Avg Frames Per Sec: "+str(fps))
dts = detections/seconds
print("Avg detections Per Sec: "+str(dts))
cv2.destroyAllWindows()
GPIO.cleanup()
| 24.087542 | 128 | 0.704082 | import cv2
import time
import numpy
import random
from multiprocessing import Process
from multiprocessing import Queue
from picamera.array import PiRGBArray
from picamera import PiCamera
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(18,GPIO.OUT)
GPIO.setup(23,GPIO.OUT)
GPIO.setup(24,GPIO.OUT)
GPIO.setup(25,GPIO.OUT)
def motion(xminQueue,xmaxQueue):
def left(stime):
GPIO.output(18,False)
GPIO.output(25,True)
GPIO.output(23,True)
GPIO.output(24,False)
sustain(stime)
def right(stime):
GPIO.output(18,True)
GPIO.output(25,False)
GPIO.output(23,False)
GPIO.output(24,True)
sustain(stime)
def sustain(stime):
time.sleep(stime)
stop()
def forward():
GPIO.output(18,True)
GPIO.output(25,True)
GPIO.output(23,False)
GPIO.output(24,False)
def backward():
GPIO.output(18,False)
GPIO.output(25,False)
GPIO.output(23,True)
GPIO.output(24,True)
def stop():
GPIO.output(18,False)
GPIO.output(25,False)
GPIO.output(23,False)
GPIO.output(24,False)
def hunt():
right(0.2)
stop()
stop()
start = time.time()
while True:
if not xminQueue.empty():
xmin = xminQueue.get()
xmax = xmaxQueue.get()
midpoint = (xmin+xmax)/2
width = xmax-xmin
stime = abs(150-midpoint)/3000
if midpoint < 130:
left(stime)
if midpoint > 170:
right(stime)
if width:
if width < 50:
forward()
elif width > 90:
backward()
else:
stop()
start = time.time()
if xminQueue.empty():
seconds = time.time()-start
if seconds > 0.8:
hunt()
start = time.time()
xminQueue = Queue(maxsize=1)
xmaxQueue = Queue(maxsize=1)
print("[INFO] starting motion handling process...")
p2 = Process(target=motion, args=(xminQueue,xmaxQueue))
p2.daemon = True
p2.start()
net = cv2.dnn.readNet('models/MobileNetSSD_deploy.xml', 'models/MobileNetSSD_deploy.bin')
net.setPreferableTarget(cv2.dnn.DNN_TARGET_MYRIAD)
font = cv2.FONT_HERSHEY_SIMPLEX
frameWidth = 320
frameHeight = 240
framesPerSec = 24
secPerFrame = 0.0
detections = 0.0
confThreshold = 0.5
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = 20
rawCapture = PiRGBArray(camera, size=(320, 240))
time.sleep(0.1)
labels_file = 'models/labels.txt'
with open(labels_file, 'r') as f:
labels = [x.strip() for x in f]
def classify_frame(net, inputQueue, outputQueue):
while True:
if not inputQueue.empty():
frame = inputQueue.get()
resframe = cv2.resize(frame, (300, 300))
blob = cv2.dnn.blobFromImage(resframe, 0.007843, size=(300, 300),\
mean=(127.5,127.5,127.5), swapRB=False, crop=False)
net.setInput(blob)
out = net.forward()
outputQueue.put(out)
inputQueue = Queue(maxsize=1)
outputQueue = Queue(maxsize=1)
out = None
print("[INFO] starting inference process...")
p = Process(target=classify_frame, args=(net,inputQueue,outputQueue,))
p.daemon = True
p.start()
print("[INFO] starting capture...")
start = time.time()
frames = 0
for frame in camera.capture_continuous(rawCapture, format="rgb", use_video_port=True):
frame = frame.array
if inputQueue.empty():
inputQueue.put(frame)
if not outputQueue.empty():
out = outputQueue.get()
if out is not None:
for detection in out.reshape(-1, 7):
confidence = float(detection[2])
obj_type = int(detection[1]-1)
xmin = int(detection[3] * frame.shape[1])
ymin = int(detection[4] * frame.shape[0])
xmax = int(detection[5] * frame.shape[1])
ymax = int(detection[6] * frame.shape[0])
if obj_type == 4:
if confidence > confThreshold:
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color=(0, 255, 255))
cv2.rectangle(frame, (xmin-1, ymin-1),\
(xmin+70, ymin-10), (0,255,255), -1)
cv2.putText(frame,labels[obj_type]+' '+str(round(confidence,2)),\
(xmin,ymin-2), font, 0.3,(0,0,0),1,cv2.LINE_AA)
detections += 1
xmaxQueue.put(xmax)
xminQueue.put(xmin)
cv2.putText(frame,'Threshold: '+str(round(confThreshold,1)), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(0, 0, 0), 1, cv2.LINE_AA)
cv2.namedWindow('frame',cv2.WINDOW_NORMAL)
cv2.resizeWindow('frame',frameWidth,frameHeight)
cv2.imshow('frame',frame)
frames+=1
rawCapture.truncate(0)
keyPress = cv2.waitKey(1)
if keyPress == 113:
break
if keyPress == 82:
confThreshold += 0.1
if keyPress == 84:
confThreshold -= 0.1
if confThreshold >1:
confThreshold = 1
if confThreshold <0:
confThreshold = 0
end = time.time()
seconds = end-start
fps = frames/seconds
print("Avg Frames Per Sec: "+str(fps))
dts = detections/seconds
print("Avg detections Per Sec: "+str(dts))
cv2.destroyAllWindows()
GPIO.cleanup()
| true | true |
f735aed00824c375e7c9692215548a31c8120db7 | 2,399 | py | Python | samples/client/petstore/python/client/models/User.py | cranberyxl/swagger-codegen | bae40ded74853583e18ac643876eb5beba04a874 | [
"Apache-2.0"
] | null | null | null | samples/client/petstore/python/client/models/User.py | cranberyxl/swagger-codegen | bae40ded74853583e18ac643876eb5beba04a874 | [
"Apache-2.0"
] | 1 | 2021-01-21T01:34:10.000Z | 2021-01-21T01:34:10.000Z | samples/client/petstore/python/client/models/User.py | cranberyxl/swagger-codegen | bae40ded74853583e18ac643876eb5beba04a874 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class User(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'id': 'long',
'username': 'str',
'firstName': 'str',
'lastName': 'str',
'email': 'str',
'password': 'str',
'phone': 'str',
'userStatus': 'int'
}
self.attributeMap = {
'id': 'id',
'username': 'username',
'firstName': 'firstName',
'lastName': 'lastName',
'email': 'email',
'password': 'password',
'phone': 'phone',
'userStatus': 'userStatus'
}
self.id = None # long
self.username = None # str
self.firstName = None # str
self.lastName = None # str
self.email = None # str
self.password = None # str
self.phone = None # str
#User Status
self.userStatus = None # int
| 23.519608 | 97 | 0.457691 |
class User(object):
def __init__(self):
self.swaggerTypes = {
'id': 'long',
'username': 'str',
'firstName': 'str',
'lastName': 'str',
'email': 'str',
'password': 'str',
'phone': 'str',
'userStatus': 'int'
}
self.attributeMap = {
'id': 'id',
'username': 'username',
'firstName': 'firstName',
'lastName': 'lastName',
'email': 'email',
'password': 'password',
'phone': 'phone',
'userStatus': 'userStatus'
}
self.id = None
self.username = None
self.firstName = None
self.lastName = None
self.email = None
self.password = None
self.phone = None
self.userStatus = None
| true | true |
f735afbf3acae6f5812aed3ea540a8ad6adf1015 | 6,656 | py | Python | source_server/video_stream/main4.py | caganselim/thermal-camera-assisted-baby-monitor | 7d1d948eb4c029777d1359ad9c53b5918e9753dd | [
"MIT"
] | null | null | null | source_server/video_stream/main4.py | caganselim/thermal-camera-assisted-baby-monitor | 7d1d948eb4c029777d1359ad9c53b5918e9753dd | [
"MIT"
] | null | null | null | source_server/video_stream/main4.py | caganselim/thermal-camera-assisted-baby-monitor | 7d1d948eb4c029777d1359ad9c53b5918e9753dd | [
"MIT"
] | 1 | 2019-08-15T00:24:08.000Z | 2019-08-15T00:24:08.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 10 16:45:42 2019
@author: ASUS PC
"""
from flask import Flask, render_template, Response
import time
from threading import Lock, Thread
import queue
import socket
from threading import Thread
# emulated camera
from camera3 import Camera
# Raspberry Pi camera module (requires picamera package)
# from camera_pi import Camera
#server_socket = socket.socket()
#server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#server_socket.bind(('0.0.0.0', 2000))
#server_socket.listen(0)
#connection = server_socket.accept()[0] #.makefile('rb')
#connection_file = connection.makefile('rb')
server_socket = None
connection_file = None
connection = None
frame_queue = queue.Queue(maxsize=5)
stream_entered = False
socket_open = False
lock = Lock()
app = Flask(__name__)
import urllib.request
import json
def send_device_status(cond):
body = {'device_id': 26082007, 'status': cond}
myurl = "http://167.99.215.27:8000/api/updateDeviceStatus"
req = urllib.request.Request(myurl)
req.add_header('Content-Type', 'application/json')
jsondata = json.dumps(body)
jsondataasbytes = jsondata.encode('utf-8')
req.add_header('Content-Length', len(jsondataasbytes))
print (jsondataasbytes)
response = urllib.request.urlopen(req, jsondataasbytes)
def bar(camera, t):
global stream_entered
global lock
global frame_queue
global server_socket
global connection_file
global connection
global socket_open
first_time = True
while True:
if not socket_open:
if first_time:
t.sleep(2)
send_device_status(socket_open)
server_socket = socket.socket()
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('0.0.0.0', 2000))
keep_on = True
while keep_on:
try:
keep_on = False
server_socket.listen(0)
connection = server_socket.accept()[0] # .makefile('rb')
print('I am here')
except Exception as e:
print(e)
keep_on = True
# connection = server_socket.accept()[0] # .makefile('rb')
connection_file = connection.makefile('rb')
socket_open = True
send_device_status(socket_open)
elif stream_entered:
start = t.time()
try:
frame_temp = camera.get_frame(connection_file, time)
except Exception as e:
print("An exception occured")
print(e)
socket_open = False
stream_entered = False
send_device_status(socket_open)
print("Waiting for socket")
server_socket = socket.socket()
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('0.0.0.0', 2000))
keep_on = True
while keep_on:
try:
keep_on = False
server_socket.listen(0)
connection = server_socket.accept()[0]
except Exception as e:
print(e)
keep_on = True
#connection = server_socket.accept()[0] # .makefile('rb')
connection_file = connection.makefile('rb')
socket_open = True
send_device_status(socket_open)
print("Socket opened")
if frame_temp is not None:
lock.acquire()
frame_queue.put(frame_temp)
lock.release()
finish = t.time()
@app.route('/')
def index():
"""Video streaming home page."""
print('x')
return render_template('index.html')
def gen():
global frame_queue
global lock
"""Video streaming generator function."""
while True:
frame = frame_queue.get()
if frame is not None:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
else:
try:
file_name = "root/video_stream/RefreshImage.jpg"
with open(file_name, 'rb') as img_file:
frame_temp = img_file.read()
except Exception as e:
print(e)
file_name = "RefreshImage.jpg"
with open(file_name, 'rb') as img_file:
frame_temp = img_file.read()
frame = frame_temp
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
def gen2(frame):
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
print('in')
global stream_entered
global socket_open
global connection
if not stream_entered and not socket_open:
try:
file_name = "root/video_stream/RefreshImage.jpg"
with open(file_name, 'rb') as img_file:
frame_temp = img_file.read()
except Exception as e:
print(e)
file_name = "RefreshImage.jpg"
with open(file_name, 'rb') as img_file:
frame_temp = img_file.read()
return Response(gen2(frame_temp),
mimetype='multipart/x-mixed-replace; boundary=frame')
elif not stream_entered and socket_open:
# Start streaming
connection.sendall(b'w')
print('I am in01')
data = connection.recv(128)
print('I am in1')
if data == b's':
print('I am in')
stream_entered = True
if stream_entered:
print(stream_entered)
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def foo():
app.run('0.0.0.0', port=5000, debug=False, threaded=True,ssl_context=('/etc/letsencrypt/live/vestelagu.site/fullchain.pem','/etc/letsencrypt/live/vestelagu.site/privkey.pem'))
if __name__ == '__main__':
dum = Thread(target= bar, args=(Camera(), time))
dum.start()
foo()
| 27.618257 | 180 | 0.550631 |
from flask import Flask, render_template, Response
import time
from threading import Lock, Thread
import queue
import socket
from threading import Thread
from camera3 import Camera
None
connection_file = None
connection = None
frame_queue = queue.Queue(maxsize=5)
stream_entered = False
socket_open = False
lock = Lock()
app = Flask(__name__)
import urllib.request
import json
def send_device_status(cond):
body = {'device_id': 26082007, 'status': cond}
myurl = "http://167.99.215.27:8000/api/updateDeviceStatus"
req = urllib.request.Request(myurl)
req.add_header('Content-Type', 'application/json')
jsondata = json.dumps(body)
jsondataasbytes = jsondata.encode('utf-8')
req.add_header('Content-Length', len(jsondataasbytes))
print (jsondataasbytes)
response = urllib.request.urlopen(req, jsondataasbytes)
def bar(camera, t):
global stream_entered
global lock
global frame_queue
global server_socket
global connection_file
global connection
global socket_open
first_time = True
while True:
if not socket_open:
if first_time:
t.sleep(2)
send_device_status(socket_open)
server_socket = socket.socket()
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('0.0.0.0', 2000))
keep_on = True
while keep_on:
try:
keep_on = False
server_socket.listen(0)
connection = server_socket.accept()[0]
print('I am here')
except Exception as e:
print(e)
keep_on = True
nection_file = connection.makefile('rb')
socket_open = True
send_device_status(socket_open)
elif stream_entered:
start = t.time()
try:
frame_temp = camera.get_frame(connection_file, time)
except Exception as e:
print("An exception occured")
print(e)
socket_open = False
stream_entered = False
send_device_status(socket_open)
print("Waiting for socket")
server_socket = socket.socket()
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('0.0.0.0', 2000))
keep_on = True
while keep_on:
try:
keep_on = False
server_socket.listen(0)
connection = server_socket.accept()[0]
except Exception as e:
print(e)
keep_on = True
onnection_file = connection.makefile('rb')
socket_open = True
send_device_status(socket_open)
print("Socket opened")
if frame_temp is not None:
lock.acquire()
frame_queue.put(frame_temp)
lock.release()
finish = t.time()
@app.route('/')
def index():
print('x')
return render_template('index.html')
def gen():
global frame_queue
global lock
while True:
frame = frame_queue.get()
if frame is not None:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
else:
try:
file_name = "root/video_stream/RefreshImage.jpg"
with open(file_name, 'rb') as img_file:
frame_temp = img_file.read()
except Exception as e:
print(e)
file_name = "RefreshImage.jpg"
with open(file_name, 'rb') as img_file:
frame_temp = img_file.read()
frame = frame_temp
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
def gen2(frame):
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed')
def video_feed():
print('in')
global stream_entered
global socket_open
global connection
if not stream_entered and not socket_open:
try:
file_name = "root/video_stream/RefreshImage.jpg"
with open(file_name, 'rb') as img_file:
frame_temp = img_file.read()
except Exception as e:
print(e)
file_name = "RefreshImage.jpg"
with open(file_name, 'rb') as img_file:
frame_temp = img_file.read()
return Response(gen2(frame_temp),
mimetype='multipart/x-mixed-replace; boundary=frame')
elif not stream_entered and socket_open:
connection.sendall(b'w')
print('I am in01')
data = connection.recv(128)
print('I am in1')
if data == b's':
print('I am in')
stream_entered = True
if stream_entered:
print(stream_entered)
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
def foo():
app.run('0.0.0.0', port=5000, debug=False, threaded=True,ssl_context=('/etc/letsencrypt/live/vestelagu.site/fullchain.pem','/etc/letsencrypt/live/vestelagu.site/privkey.pem'))
if __name__ == '__main__':
dum = Thread(target= bar, args=(Camera(), time))
dum.start()
foo()
| true | true |
f735b007affed977c83d7016dcea7c249f5339c5 | 2,706 | py | Python | pytorch_ssim.py | minhmanho/rrdncnn | f09ef7d92e31bfd43a548bb476970cfe38d32508 | [
"X11"
] | 22 | 2019-10-07T07:54:46.000Z | 2022-03-22T14:22:52.000Z | pytorch_ssim.py | minhmanho/rrdncnn | f09ef7d92e31bfd43a548bb476970cfe38d32508 | [
"X11"
] | 2 | 2020-09-27T06:45:19.000Z | 2022-03-18T07:48:54.000Z | pytorch_ssim.py | minhmanho/rrdncnn | f09ef7d92e31bfd43a548bb476970cfe38d32508 | [
"X11"
] | 3 | 2021-01-18T03:46:14.000Z | 2021-09-07T09:24:58.000Z | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average) | 37.068493 | 105 | 0.629712 | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average) | true | true |
f735b19427ad7d8a33dc3bf0e10c2bd58c3010b7 | 2,766 | py | Python | tags/errors.py | Myst1c-a/phen-cogs | 672f9022ddbbd9a84b0a05357347e99e64a776fc | [
"MIT"
] | null | null | null | tags/errors.py | Myst1c-a/phen-cogs | 672f9022ddbbd9a84b0a05357347e99e64a776fc | [
"MIT"
] | null | null | null | tags/errors.py | Myst1c-a/phen-cogs | 672f9022ddbbd9a84b0a05357347e99e64a776fc | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2020-present phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Optional
from redbot.core.commands import UserFeedbackCheckFailure
from redbot.core.utils.chat_formatting import humanize_number as hn
__all__ = (
"TagError",
"MissingTagPermissions",
"RequireCheckFailure",
"WhitelistCheckFailure",
"BlacklistCheckFailure",
"TagFeedbackError",
"TagAliasError",
)
class TagError(Exception):
"""Base exception class."""
class MissingTagPermissions(TagError):
"""Raised when a user doesn't have permissions to use a block in a tag."""
class RequireCheckFailure(TagError):
"""
Raised during tag invocation if the user fails to fulfill
blacklist or whitelist requirements.
"""
def __init__(self, response: Optional[str] = None):
self.response = response
super().__init__(response)
class WhitelistCheckFailure(RequireCheckFailure):
"""Raised when a user is not in a whitelisted channel or has a whitelisted role."""
class BlacklistCheckFailure(RequireCheckFailure):
"""Raised when a user is in a blacklisted channel or has a blacklisted role."""
class TagFeedbackError(UserFeedbackCheckFailure, TagError):
"""Provides feedback to the user when running tag commands."""
class TagAliasError(TagFeedbackError):
"""Raised to provide feedback if an error occurs while adding/removing a tag alias."""
class BlockCompileError(TagError):
"""Raised when a block fails to compile."""
class TagCharacterLimitReached(TagError):
"""Raised when the TagScript character limit is reached."""
def __init__(self, limit: int, length: int):
super().__init__(f"TagScript cannot be longer than {hn(limit)} (**{hn(length)}**).")
| 32.541176 | 92 | 0.754519 |
from typing import Optional
from redbot.core.commands import UserFeedbackCheckFailure
from redbot.core.utils.chat_formatting import humanize_number as hn
__all__ = (
"TagError",
"MissingTagPermissions",
"RequireCheckFailure",
"WhitelistCheckFailure",
"BlacklistCheckFailure",
"TagFeedbackError",
"TagAliasError",
)
class TagError(Exception):
class MissingTagPermissions(TagError):
class RequireCheckFailure(TagError):
def __init__(self, response: Optional[str] = None):
self.response = response
super().__init__(response)
class WhitelistCheckFailure(RequireCheckFailure):
class BlacklistCheckFailure(RequireCheckFailure):
class TagFeedbackError(UserFeedbackCheckFailure, TagError):
class TagAliasError(TagFeedbackError):
class BlockCompileError(TagError):
class TagCharacterLimitReached(TagError):
def __init__(self, limit: int, length: int):
super().__init__(f"TagScript cannot be longer than {hn(limit)} (**{hn(length)}**).")
| true | true |
f735b1c40eb715c9d12e3f92d00bc2e92e50fa58 | 4,754 | py | Python | src/converter/gen_boundary_data.py | Kento75/mozc | 0abed62d6f9cd9c6ce2142407a1f80e02a3230f1 | [
"BSD-3-Clause"
] | 1,144 | 2015-04-23T16:18:45.000Z | 2022-03-29T19:37:33.000Z | src/converter/gen_boundary_data.py | kirameister/mozc | 18b2b32b4d3fe585d38134606773239781b6be82 | [
"BSD-3-Clause"
] | 291 | 2015-05-04T07:53:37.000Z | 2022-03-22T00:09:05.000Z | src/converter/gen_boundary_data.py | kirameister/mozc | 18b2b32b4d3fe585d38134606773239781b6be82 | [
"BSD-3-Clause"
] | 301 | 2015-05-03T00:07:18.000Z | 2022-03-21T10:48:29.000Z | # -*- coding: utf-8 -*-
# Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A tool to generate boundary data.
Bounday data binary image is an array of uint16 whose length is 2N, where N is
the number of POS IDs including special POS. The array has the following
structure:
-------------------------------------
prefix penalty of POS ID 0 (2 bytes)
-------------------------------------
suffix penalty of POS ID 0 (2 bytes)
-------------------------------------
prefix penalty of POS ID 1 (2 bytes)
-------------------------------------
suffix penalty of POS ID 1 (2 bytes)
-------------------------------------
.
.
.
-------------------------------------
prefix penalty of POS ID N (2 bytes)
-------------------------------------
suffix penalty of POS ID N (2 bytes)
-------------------------------------
See converter/segmenter.cc for how it's used.
"""
from __future__ import absolute_import
from __future__ import print_function
import codecs
import optparse
import re
import struct
import sys
from six.moves import range
def PatternToRegexp(pattern):
return '^' + pattern.replace('*', '[^,]+')
def LoadPatterns(file):
prefix = []
suffix = []
for line in codecs.open(file, 'r', encoding='utf-8'):
if len(line) <= 1 or line[0] == '#':
continue
fields = line.split()
label = fields[0]
feature = fields[1]
cost = int(fields[2])
if cost < 0 or cost > 0xffff:
sys.exit(-1)
if label == 'PREFIX':
prefix.append([re.compile(PatternToRegexp(feature)), cost])
elif label == 'SUFFIX':
suffix.append([re.compile(PatternToRegexp(feature)), cost])
else:
print('format error %s' % (line))
sys.exit(0)
return (prefix, suffix)
def GetCost(patterns, feature):
for p in patterns:
pat = p[0]
cost = p[1]
if pat.match(feature):
return cost
return 0
def LoadFeatures(filename):
features = []
for line in codecs.open(filename, 'r', encoding='utf-8'):
fields = line.split()
features.append(fields[1])
return features
def CountSpecialPos(filename):
count = 0
for line in codecs.open(filename, 'r', encoding='utf-8'):
line = line.rstrip()
if not line or line[0] == '#':
continue
count += 1
return count
def ParseOptions():
parser = optparse.OptionParser()
parser.add_option('--boundary_def', dest='boundary_def',
help='Boundary definition file')
parser.add_option('--id_def', dest='id_def',
help='Boundary definition file')
parser.add_option('--special_pos', dest='special_pos',
help='Special POS definition file')
parser.add_option('--output', dest='output',
help='Output binary file')
return parser.parse_args()[0]
def main():
opts = ParseOptions()
prefix, suffix = LoadPatterns(opts.boundary_def)
features = LoadFeatures(opts.id_def)
num_special_pos = CountSpecialPos(opts.special_pos)
with open(opts.output, 'wb') as f:
for feature in features:
f.write(struct.pack('<H', GetCost(prefix, feature)))
f.write(struct.pack('<H', GetCost(suffix, feature)))
for _ in range(num_special_pos):
f.write(struct.pack('<H', 0))
f.write(struct.pack('<H', 0))
if __name__ == '__main__':
main()
| 30.670968 | 78 | 0.649769 |
from __future__ import absolute_import
from __future__ import print_function
import codecs
import optparse
import re
import struct
import sys
from six.moves import range
def PatternToRegexp(pattern):
return '^' + pattern.replace('*', '[^,]+')
def LoadPatterns(file):
prefix = []
suffix = []
for line in codecs.open(file, 'r', encoding='utf-8'):
if len(line) <= 1 or line[0] == '#':
continue
fields = line.split()
label = fields[0]
feature = fields[1]
cost = int(fields[2])
if cost < 0 or cost > 0xffff:
sys.exit(-1)
if label == 'PREFIX':
prefix.append([re.compile(PatternToRegexp(feature)), cost])
elif label == 'SUFFIX':
suffix.append([re.compile(PatternToRegexp(feature)), cost])
else:
print('format error %s' % (line))
sys.exit(0)
return (prefix, suffix)
def GetCost(patterns, feature):
for p in patterns:
pat = p[0]
cost = p[1]
if pat.match(feature):
return cost
return 0
def LoadFeatures(filename):
features = []
for line in codecs.open(filename, 'r', encoding='utf-8'):
fields = line.split()
features.append(fields[1])
return features
def CountSpecialPos(filename):
count = 0
for line in codecs.open(filename, 'r', encoding='utf-8'):
line = line.rstrip()
if not line or line[0] == '#':
continue
count += 1
return count
def ParseOptions():
parser = optparse.OptionParser()
parser.add_option('--boundary_def', dest='boundary_def',
help='Boundary definition file')
parser.add_option('--id_def', dest='id_def',
help='Boundary definition file')
parser.add_option('--special_pos', dest='special_pos',
help='Special POS definition file')
parser.add_option('--output', dest='output',
help='Output binary file')
return parser.parse_args()[0]
def main():
opts = ParseOptions()
prefix, suffix = LoadPatterns(opts.boundary_def)
features = LoadFeatures(opts.id_def)
num_special_pos = CountSpecialPos(opts.special_pos)
with open(opts.output, 'wb') as f:
for feature in features:
f.write(struct.pack('<H', GetCost(prefix, feature)))
f.write(struct.pack('<H', GetCost(suffix, feature)))
for _ in range(num_special_pos):
f.write(struct.pack('<H', 0))
f.write(struct.pack('<H', 0))
if __name__ == '__main__':
main()
| true | true |
f735b38333c649bea49b0c547e0ecbe745f5c70d | 1,590 | py | Python | gym_minigrid/envs/blockedunlockpickup.py | kanaadp/gym-minigrid | b8e95a3b5ac86f791c529d12561f73e19b9b5192 | [
"Apache-2.0"
] | null | null | null | gym_minigrid/envs/blockedunlockpickup.py | kanaadp/gym-minigrid | b8e95a3b5ac86f791c529d12561f73e19b9b5192 | [
"Apache-2.0"
] | null | null | null | gym_minigrid/envs/blockedunlockpickup.py | kanaadp/gym-minigrid | b8e95a3b5ac86f791c529d12561f73e19b9b5192 | [
"Apache-2.0"
] | null | null | null | from gym_minigrid.minigrid import *
from gym_minigrid.roomgrid import RoomGrid
from gym_minigrid.register import register
class BlockedUnlockPickup(RoomGrid):
"""
Unlock a door blocked by a ball, then pick up a box
in another room
"""
def __init__(self, seed=None):
room_size = 6
super().__init__(
num_rows=1,
num_cols=2,
room_size=room_size,
max_steps=16*room_size**2,
seed=seed
)
def _gen_grid(self, width, height):
super()._gen_grid(width, height)
# Add a box to the room on the right
obj, _ = self.add_object(1, 0, kind="box")
# Make sure the two rooms are directly connected by a locked door
door, pos = self.add_door(0, 0, 0, locked=True)
# Block the door with a ball
color = self._rand_color()
self.grid.set(pos[0]-1, pos[1], Ball(color))
# Add a key to unlock the door
self.add_object(0, 0, 'key', door.color)
self.place_agent(0, 0)
self.obj = obj
self.mission = "pick up the %s %s" % (obj.color, obj.type)
def step(self, action):
obs, reward, done, info = super().step(action)
if action == self.actions.pickup:
if self.agents[DEFAULT_AGENT_ID].carrying and self.agents[DEFAULT_AGENT_ID].carrying == self.obj:
reward = self._reward()
done = True
return obs, reward, done, info
register(
id='MiniGrid-BlockedUnlockPickup-v0',
entry_point='gym_minigrid.envs:BlockedUnlockPickup'
)
| 29.444444 | 109 | 0.604403 | from gym_minigrid.minigrid import *
from gym_minigrid.roomgrid import RoomGrid
from gym_minigrid.register import register
class BlockedUnlockPickup(RoomGrid):
def __init__(self, seed=None):
room_size = 6
super().__init__(
num_rows=1,
num_cols=2,
room_size=room_size,
max_steps=16*room_size**2,
seed=seed
)
def _gen_grid(self, width, height):
super()._gen_grid(width, height)
obj, _ = self.add_object(1, 0, kind="box")
door, pos = self.add_door(0, 0, 0, locked=True)
color = self._rand_color()
self.grid.set(pos[0]-1, pos[1], Ball(color))
self.add_object(0, 0, 'key', door.color)
self.place_agent(0, 0)
self.obj = obj
self.mission = "pick up the %s %s" % (obj.color, obj.type)
def step(self, action):
obs, reward, done, info = super().step(action)
if action == self.actions.pickup:
if self.agents[DEFAULT_AGENT_ID].carrying and self.agents[DEFAULT_AGENT_ID].carrying == self.obj:
reward = self._reward()
done = True
return obs, reward, done, info
register(
id='MiniGrid-BlockedUnlockPickup-v0',
entry_point='gym_minigrid.envs:BlockedUnlockPickup'
)
| true | true |
f735b3f97d1d978d00f3e1fa7e560ec79438faf4 | 35,417 | py | Python | python/ccxt/hitbtc.py | longniao/ccxt | 3af5494bd16865f9e141f902adbd1deba7a3135e | [
"MIT"
] | 1 | 2021-03-01T17:45:33.000Z | 2021-03-01T17:45:33.000Z | python/ccxt/hitbtc.py | longniao/ccxt | 3af5494bd16865f9e141f902adbd1deba7a3135e | [
"MIT"
] | 4 | 2020-09-06T22:46:57.000Z | 2021-05-10T08:35:02.000Z | python/ccxt/hitbtc.py | longniao/ccxt | 3af5494bd16865f9e141f902adbd1deba7a3135e | [
"MIT"
] | 1 | 2019-11-08T12:36:13.000Z | 2019-11-08T12:36:13.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
class hitbtc (Exchange):
def describe(self):
return self.deep_extend(super(hitbtc, self).describe(), {
'id': 'hitbtc',
'name': 'HitBTC',
'countries': ['HK'],
'rateLimit': 1500,
'version': '1',
'has': {
'CORS': False,
'fetchTrades': True,
'fetchTickers': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchOrderTrades': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766555-8eaec20e-5edc-11e7-9c5b-6dc69fc42f5e.jpg',
'api': 'https://api.hitbtc.com',
'www': 'https://hitbtc.com',
'referral': 'https://hitbtc.com/?ref_id=5a5d39a65d466',
'doc': 'https://github.com/hitbtc-com/hitbtc-api/blob/master/APIv1.md',
'fees': [
'https://hitbtc.com/fees-and-limits',
'https://support.hitbtc.com/hc/en-us/articles/115005148605-Fees-and-limits',
],
},
'api': {
'public': {
'get': [
'{symbol}/orderbook',
'{symbol}/ticker',
'{symbol}/trades',
'{symbol}/trades/recent',
'symbols',
'ticker',
'time',
],
},
'trading': {
'get': [
'balance',
'orders/active',
'orders/recent',
'order',
'trades/by/order',
'trades',
],
'post': [
'new_order',
'cancel_order',
'cancel_orders',
],
},
'payment': {
'get': [
'balance',
'address/{currency}',
'transactions',
'transactions/{transaction}',
],
'post': [
'transfer_to_trading',
'transfer_to_main',
'address/{currency}',
'payout',
],
},
},
# hardcoded fees are deprecated and should only be used when there's no other way to get fee info
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': -0.01 / 100,
'taker': 0.1 / 100,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
'BCC': 0.0018,
'ETH': 0.00215,
'BCH': 0.0018,
'USDT': 100,
'DASH': 0.03,
'BTG': 0.0005,
'LTC': 0.003,
'ZEC': 0.0001,
'XMR': 0.09,
'1ST': 0.84,
'ADX': 5.7,
'AE': 6.7,
'AEON': 0.01006,
'AIR': 565,
'AMP': 9,
'ANT': 6.7,
'ARDR': 1,
'ARN': 18.5,
'ART': 26,
'ATB': 0.0004,
'ATL': 27,
'ATM': 504,
'ATS': 860,
'AVT': 1.9,
'BAS': 113,
'BCN': 0.1,
'DAO.Casino': 124, # id = 'BET'
'BKB': 46,
'BMC': 32,
'BMT': 100,
'BNT': 2.57,
'BQX': 4.7,
'BTM': 40,
'BTX': 0.04,
'BUS': 0.004,
'CCT': 115,
'CDT': 100,
'CDX': 30,
'CFI': 61,
'CLD': 0.88,
'CND': 574,
'CNX': 0.04,
'COSS': 65,
'CSNO': 16,
'CTR': 15,
'CTX': 146,
'CVC': 8.46,
'DBIX': 0.0168,
'DCN': 120000,
'DCT': 0.02,
'DDF': 342,
'DENT': 6240,
'DGB': 0.4,
'DGD': 0.01,
'DICE': 0.32,
'DLT': 0.26,
'DNT': 0.21,
'DOGE': 2,
'DOV': 34,
'DRPU': 24,
'DRT': 240,
'DSH': 0.017,
'EBET': 84,
'EBTC': 20,
'EBTCOLD': 6.6,
'ECAT': 14,
'EDG': 2,
'EDO': 2.9,
'ELE': 0.00172,
'ELM': 0.004,
'EMC': 0.03,
'EMGO': 14,
'ENJ': 163,
'EOS': 1.5,
'ERO': 34,
'ETBS': 15,
'ETC': 0.002,
'ETP': 0.004,
'EVX': 5.4,
'EXN': 456,
'FRD': 65,
'FUEL': 123.00105,
'FUN': 202.9598309,
'FYN': 1.849,
'FYP': 66.13,
'GNO': 0.0034,
'GUP': 4,
'GVT': 1.2,
'HAC': 144,
'HDG': 7,
'HGT': 1082,
'HPC': 0.4,
'HVN': 120,
'ICN': 0.55,
'ICO': 34,
'ICOS': 0.35,
'IND': 76,
'INDI': 5913,
'ITS': 15.0012,
'IXT': 11,
'KBR': 143,
'KICK': 112,
'LA': 41,
'LAT': 1.44,
'LIFE': 13000,
'LRC': 27,
'LSK': 0.3,
'LUN': 0.34,
'MAID': 5,
'MANA': 143,
'MCAP': 5.44,
'MIPS': 43,
'MNE': 1.33,
'MSP': 121,
'MTH': 92,
'MYB': 3.9,
'NDC': 165,
'NEBL': 0.04,
'NET': 3.96,
'NTO': 998,
'NXC': 13.39,
'NXT': 3,
'OAX': 15,
'ODN': 0.004,
'OMG': 2,
'OPT': 335,
'ORME': 2.8,
'OTN': 0.57,
'PAY': 3.1,
'PIX': 96,
'PLBT': 0.33,
'PLR': 114,
'PLU': 0.87,
'POE': 784,
'POLL': 3.5,
'PPT': 2,
'PRE': 32,
'PRG': 39,
'PRO': 41,
'PRS': 60,
'PTOY': 0.5,
'QAU': 63,
'QCN': 0.03,
'QTUM': 0.04,
'QVT': 64,
'REP': 0.02,
'RKC': 15,
'RVT': 14,
'SAN': 2.24,
'SBD': 0.03,
'SCL': 2.6,
'SISA': 1640,
'SKIN': 407,
'SMART': 0.4,
'SMS': 0.0375,
'SNC': 36,
'SNGLS': 4,
'SNM': 48,
'SNT': 233,
'STEEM': 0.01,
'STRAT': 0.01,
'STU': 14,
'STX': 11,
'SUB': 17,
'SUR': 3,
'SWT': 0.51,
'TAAS': 0.91,
'TBT': 2.37,
'TFL': 15,
'TIME': 0.03,
'TIX': 7.1,
'TKN': 1,
'TKR': 84,
'TNT': 90,
'TRST': 1.6,
'TRX': 1395,
'UET': 480,
'UGT': 15,
'VEN': 14,
'VERI': 0.037,
'VIB': 50,
'VIBE': 145,
'VOISE': 618,
'WEALTH': 0.0168,
'WINGS': 2.4,
'WTC': 0.75,
'XAUR': 3.23,
'XDN': 0.01,
'XEM': 15,
'XUC': 0.9,
'YOYOW': 140,
'ZAP': 24,
'ZRX': 23,
'ZSC': 191,
},
'deposit': {
'BTC': 0.0006,
'ETH': 0.003,
'BCH': 0,
'USDT': 0,
'BTG': 0,
'LTC': 0,
'ZEC': 0,
'XMR': 0,
'1ST': 0,
'ADX': 0,
'AE': 0,
'AEON': 0,
'AIR': 0,
'AMP': 0,
'ANT': 0,
'ARDR': 0,
'ARN': 0,
'ART': 0,
'ATB': 0,
'ATL': 0,
'ATM': 0,
'ATS': 0,
'AVT': 0,
'BAS': 0,
'BCN': 0,
'DAO.Casino': 0, # id = 'BET'
'BKB': 0,
'BMC': 0,
'BMT': 0,
'BNT': 0,
'BQX': 0,
'BTM': 0,
'BTX': 0,
'BUS': 0,
'CCT': 0,
'CDT': 0,
'CDX': 0,
'CFI': 0,
'CLD': 0,
'CND': 0,
'CNX': 0,
'COSS': 0,
'CSNO': 0,
'CTR': 0,
'CTX': 0,
'CVC': 0,
'DBIX': 0,
'DCN': 0,
'DCT': 0,
'DDF': 0,
'DENT': 0,
'DGB': 0,
'DGD': 0,
'DICE': 0,
'DLT': 0,
'DNT': 0,
'DOGE': 0,
'DOV': 0,
'DRPU': 0,
'DRT': 0,
'DSH': 0,
'EBET': 0,
'EBTC': 0,
'EBTCOLD': 0,
'ECAT': 0,
'EDG': 0,
'EDO': 0,
'ELE': 0,
'ELM': 0,
'EMC': 0,
'EMGO': 0,
'ENJ': 0,
'EOS': 0,
'ERO': 0,
'ETBS': 0,
'ETC': 0,
'ETP': 0,
'EVX': 0,
'EXN': 0,
'FRD': 0,
'FUEL': 0,
'FUN': 0,
'FYN': 0,
'FYP': 0,
'GNO': 0,
'GUP': 0,
'GVT': 0,
'HAC': 0,
'HDG': 0,
'HGT': 0,
'HPC': 0,
'HVN': 0,
'ICN': 0,
'ICO': 0,
'ICOS': 0,
'IND': 0,
'INDI': 0,
'ITS': 0,
'IXT': 0,
'KBR': 0,
'KICK': 0,
'LA': 0,
'LAT': 0,
'LIFE': 0,
'LRC': 0,
'LSK': 0,
'LUN': 0,
'MAID': 0,
'MANA': 0,
'MCAP': 0,
'MIPS': 0,
'MNE': 0,
'MSP': 0,
'MTH': 0,
'MYB': 0,
'NDC': 0,
'NEBL': 0,
'NET': 0,
'NTO': 0,
'NXC': 0,
'NXT': 0,
'OAX': 0,
'ODN': 0,
'OMG': 0,
'OPT': 0,
'ORME': 0,
'OTN': 0,
'PAY': 0,
'PIX': 0,
'PLBT': 0,
'PLR': 0,
'PLU': 0,
'POE': 0,
'POLL': 0,
'PPT': 0,
'PRE': 0,
'PRG': 0,
'PRO': 0,
'PRS': 0,
'PTOY': 0,
'QAU': 0,
'QCN': 0,
'QTUM': 0,
'QVT': 0,
'REP': 0,
'RKC': 0,
'RVT': 0,
'SAN': 0,
'SBD': 0,
'SCL': 0,
'SISA': 0,
'SKIN': 0,
'SMART': 0,
'SMS': 0,
'SNC': 0,
'SNGLS': 0,
'SNM': 0,
'SNT': 0,
'STEEM': 0,
'STRAT': 0,
'STU': 0,
'STX': 0,
'SUB': 0,
'SUR': 0,
'SWT': 0,
'TAAS': 0,
'TBT': 0,
'TFL': 0,
'TIME': 0,
'TIX': 0,
'TKN': 0,
'TKR': 0,
'TNT': 0,
'TRST': 0,
'TRX': 0,
'UET': 0,
'UGT': 0,
'VEN': 0,
'VERI': 0,
'VIB': 0,
'VIBE': 0,
'VOISE': 0,
'WEALTH': 0,
'WINGS': 0,
'WTC': 0,
'XAUR': 0,
'XDN': 0,
'XEM': 0,
'XUC': 0,
'YOYOW': 0,
'ZAP': 0,
'ZRX': 0,
'ZSC': 0,
},
},
},
'commonCurrencies': {
'BET': 'DAO.Casino',
'CAT': 'BitClave',
'DRK': 'DASH',
'EMGO': 'MGO',
'GET': 'Themis',
'HSR': 'HC',
'LNC': 'LinkerCoin',
'UNC': 'Unigame',
'USD': 'USDT',
'XBT': 'BTC',
},
'options': {
'defaultTimeInForce': 'FOK',
},
})
def fetch_markets(self, params={}):
response = self.publicGetSymbols(params)
markets = self.safe_value(response, 'symbols')
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'commodity')
quoteId = self.safe_string(market, 'currency')
lot = self.safe_float(market, 'lot')
step = self.safe_float(market, 'step')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
result.append({
'info': market,
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'lot': lot,
'step': step,
'active': True,
'maker': self.safe_float(market, 'provideLiquidityRate'),
'taker': self.safe_float(market, 'takeLiquidityRate'),
'precision': {
'amount': self.precision_from_string(market['lot']),
'price': self.precision_from_string(market['step']),
},
'limits': {
'amount': {
'min': lot,
'max': None,
},
'price': {
'min': step,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def fetch_balance(self, params={}):
self.load_markets()
method = self.safe_string(params, 'type', 'trading')
method += 'GetBalance'
query = self.omit(params, 'type')
response = getattr(self, method)(query)
balances = self.safe_value(response, 'balance', [])
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency_code')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float_2(balance, 'cash', 'balance')
account['used'] = self.safe_float(balance, 'reserved')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
orderbook = self.publicGetSymbolOrderbook(self.extend({
'symbol': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_integer(ticker, 'timestamp')
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'volume_quote'),
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTicker(params)
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetSymbolTicker(self.extend(request, params))
if 'message' in response:
raise ExchangeError(self.id + ' ' + response['message'])
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
if isinstance(trade, list):
return self.parse_public_trade(trade, market)
return self.parse_order_trade(trade, market)
def parse_public_trade(self, trade, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
side = None
tradeLength = len(trade)
if tradeLength > 4:
side = trade[4]
price = float(trade[1])
amount = float(trade[2])
cost = price * amount
return {
'info': trade,
'id': str(trade[0]),
'timestamp': trade[3],
'datetime': self.iso8601(trade[3]),
'symbol': symbol,
'type': None,
'side': side,
'order': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def parse_order_trade(self, trade, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
amount = self.safe_float(trade, 'execQuantity')
if market:
amount *= market['lot']
price = self.safe_float(trade, 'execPrice')
cost = price * amount
fee = {
'cost': self.safe_float(trade, 'fee'),
'currency': None,
'rate': None,
}
timestamp = self.safe_integer(trade, 'timestamp')
id = self.safe_string(trade, 'tradeId')
# we use clientOrderId as the order id with HitBTC intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
orderId = self.safe_string(trade, 'clientOrderId')
side = self.safe_string(trade, 'side')
return {
'info': trade,
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'from': 0,
# 'till': 100,
# 'by': 'ts', # or by trade_id
# 'sort': 'desc', # or asc
# 'start_index': 0,
# 'max_results': 1000,
# 'format_item': 'object',
# 'format_price': 'number',
# 'format_amount': 'number',
# 'format_tid': 'string',
# 'format_timestamp': 'millisecond',
# 'format_wrap': False,
# 'side': 'true',
}
if since is not None:
request['by'] = 'ts'
request['from'] = since
if limit is not None:
request['max_results'] = limit
response = self.publicGetSymbolTrades(self.extend(request, params))
return self.parse_trades(response['trades'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
# check if amount can be evenly divided into lots
# they want integer quantity in lot units
quantity = float(amount) / market['lot']
wholeLots = int(round(quantity))
difference = quantity - wholeLots
if abs(difference) > market['step']:
raise ExchangeError(self.id + ' order amount should be evenly divisible by lot unit size of ' + str(market['lot']))
clientOrderId = self.milliseconds()
request = {
'clientOrderId': str(clientOrderId),
'symbol': market['id'],
'side': side,
'quantity': str(wholeLots), # quantity in integer lot units
'type': type,
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
else:
request['timeInForce'] = self.options['defaultTimeInForce']
response = self.tradingPostNewOrder(self.extend(request, params))
order = self.parse_order(response['ExecutionReport'], market)
if order['status'] == 'rejected':
raise InvalidOrder(self.id + ' order was rejected by the exchange ' + self.json(order))
return order
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
# we use clientOrderId as the order id with HitBTC intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
request = {
'clientOrderId': id,
}
return self.tradingPostCancelOrder(self.extend(request, params))
def parse_order_status(self, status):
statuses = {
'new': 'open',
'partiallyFilled': 'open',
'filled': 'closed',
'canceled': 'canceled',
'rejected': 'rejected',
'expired': 'expired',
}
return self.safe_string(statuses, status)
def parse_order(self, order, market=None):
timestamp = self.safe_integer(order, 'lastTimestamp')
if timestamp is None:
timestamp = self.safe_integer(order, 'timestamp')
symbol = None
if market is None:
market = self.markets_by_id[order['symbol']]
status = self.parse_order_status(self.safe_string(order, 'orderStatus'))
price = self.safe_float(order, 'orderPrice')
price = self.safe_float(order, 'price', price)
price = self.safe_float(order, 'avgPrice', price)
amount = self.safe_float(order, 'orderQuantity')
amount = self.safe_float(order, 'quantity', amount)
remaining = self.safe_float(order, 'quantityLeaves')
remaining = self.safe_float(order, 'leavesQuantity', remaining)
filled = None
cost = None
amountDefined = (amount is not None)
remainingDefined = (remaining is not None)
if market is not None:
symbol = market['symbol']
if amountDefined:
amount *= market['lot']
if remainingDefined:
remaining *= market['lot']
else:
marketId = self.safe_string(order, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if amountDefined:
if remainingDefined:
filled = amount - remaining
if price is not None:
cost = price * filled
feeCost = self.safe_float(order, 'fee')
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': None,
}
# we use clientOrderId as the order id with HitBTC intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
id = self.safe_string(order, 'clientOrderId')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
return {
'id': id,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
}
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
# we use clientOrderId as the order id with HitBTC intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
request = {
'clientOrderId': id,
}
response = self.tradingGetOrder(self.extend(request, params))
if response['orders'][0]:
return self.parse_order(response['orders'][0])
raise OrderNotFound(self.id + ' fetchOrder() error: ' + self.response)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
statuses = ['new', 'partiallyFiiled']
market = None
request = {
'sort': 'desc',
'statuses': ','.join(statuses),
}
if symbol is not None:
market = self.market(symbol)
request['symbols'] = market['id']
response = self.tradingGetOrdersActive(self.extend(request, params))
return self.parse_orders(response['orders'], market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
statuses = ['filled', 'canceled', 'rejected', 'expired']
request = {
'sort': 'desc',
'statuses': ','.join(statuses),
'max_results': 1000,
}
if symbol is not None:
market = self.market(symbol)
request['symbols'] = market['id']
response = self.tradingGetOrdersRecent(self.extend(request, params))
return self.parse_orders(response['orders'], market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
# we use clientOrderId as the order id with HitBTC intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
request = {
'clientOrderId': id,
}
response = self.tradingGetTradesByOrder(self.extend(request, params))
return self.parse_trades(response['trades'], market, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'currency_code': currency['id'],
'amount': amount,
'address': address,
}
if tag is not None:
request['extra_id'] = tag
response = self.paymentPostPayout(self.extend(request, params))
return {
'info': response,
'id': response['transaction'],
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + 'api' + '/' + self.version + '/' + api + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
payload = {'nonce': nonce, 'apikey': self.apiKey}
query = self.extend(payload, query)
if method == 'GET':
url += '?' + self.urlencode(query)
else:
url += '?' + self.urlencode(payload)
auth = url
if method == 'POST':
if query:
body = self.urlencode(query)
auth += body
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'X-Signature': self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512).lower(),
}
url = self.urls['api'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'code' in response:
if 'ExecutionReport' in response:
if response['ExecutionReport']['orderRejectReason'] == 'orderExceedsLimit':
raise InsufficientFunds(self.id + ' ' + self.json(response))
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| 37.008359 | 127 | 0.365926 |
ge import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
class hitbtc (Exchange):
def describe(self):
return self.deep_extend(super(hitbtc, self).describe(), {
'id': 'hitbtc',
'name': 'HitBTC',
'countries': ['HK'],
'rateLimit': 1500,
'version': '1',
'has': {
'CORS': False,
'fetchTrades': True,
'fetchTickers': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchOrderTrades': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766555-8eaec20e-5edc-11e7-9c5b-6dc69fc42f5e.jpg',
'api': 'https://api.hitbtc.com',
'www': 'https://hitbtc.com',
'referral': 'https://hitbtc.com/?ref_id=5a5d39a65d466',
'doc': 'https://github.com/hitbtc-com/hitbtc-api/blob/master/APIv1.md',
'fees': [
'https://hitbtc.com/fees-and-limits',
'https://support.hitbtc.com/hc/en-us/articles/115005148605-Fees-and-limits',
],
},
'api': {
'public': {
'get': [
'{symbol}/orderbook',
'{symbol}/ticker',
'{symbol}/trades',
'{symbol}/trades/recent',
'symbols',
'ticker',
'time',
],
},
'trading': {
'get': [
'balance',
'orders/active',
'orders/recent',
'order',
'trades/by/order',
'trades',
],
'post': [
'new_order',
'cancel_order',
'cancel_orders',
],
},
'payment': {
'get': [
'balance',
'address/{currency}',
'transactions',
'transactions/{transaction}',
],
'post': [
'transfer_to_trading',
'transfer_to_main',
'address/{currency}',
'payout',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': -0.01 / 100,
'taker': 0.1 / 100,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
'BCC': 0.0018,
'ETH': 0.00215,
'BCH': 0.0018,
'USDT': 100,
'DASH': 0.03,
'BTG': 0.0005,
'LTC': 0.003,
'ZEC': 0.0001,
'XMR': 0.09,
'1ST': 0.84,
'ADX': 5.7,
'AE': 6.7,
'AEON': 0.01006,
'AIR': 565,
'AMP': 9,
'ANT': 6.7,
'ARDR': 1,
'ARN': 18.5,
'ART': 26,
'ATB': 0.0004,
'ATL': 27,
'ATM': 504,
'ATS': 860,
'AVT': 1.9,
'BAS': 113,
'BCN': 0.1,
'DAO.Casino': 124, # id = 'BET'
'BKB': 46,
'BMC': 32,
'BMT': 100,
'BNT': 2.57,
'BQX': 4.7,
'BTM': 40,
'BTX': 0.04,
'BUS': 0.004,
'CCT': 115,
'CDT': 100,
'CDX': 30,
'CFI': 61,
'CLD': 0.88,
'CND': 574,
'CNX': 0.04,
'COSS': 65,
'CSNO': 16,
'CTR': 15,
'CTX': 146,
'CVC': 8.46,
'DBIX': 0.0168,
'DCN': 120000,
'DCT': 0.02,
'DDF': 342,
'DENT': 6240,
'DGB': 0.4,
'DGD': 0.01,
'DICE': 0.32,
'DLT': 0.26,
'DNT': 0.21,
'DOGE': 2,
'DOV': 34,
'DRPU': 24,
'DRT': 240,
'DSH': 0.017,
'EBET': 84,
'EBTC': 20,
'EBTCOLD': 6.6,
'ECAT': 14,
'EDG': 2,
'EDO': 2.9,
'ELE': 0.00172,
'ELM': 0.004,
'EMC': 0.03,
'EMGO': 14,
'ENJ': 163,
'EOS': 1.5,
'ERO': 34,
'ETBS': 15,
'ETC': 0.002,
'ETP': 0.004,
'EVX': 5.4,
'EXN': 456,
'FRD': 65,
'FUEL': 123.00105,
'FUN': 202.9598309,
'FYN': 1.849,
'FYP': 66.13,
'GNO': 0.0034,
'GUP': 4,
'GVT': 1.2,
'HAC': 144,
'HDG': 7,
'HGT': 1082,
'HPC': 0.4,
'HVN': 120,
'ICN': 0.55,
'ICO': 34,
'ICOS': 0.35,
'IND': 76,
'INDI': 5913,
'ITS': 15.0012,
'IXT': 11,
'KBR': 143,
'KICK': 112,
'LA': 41,
'LAT': 1.44,
'LIFE': 13000,
'LRC': 27,
'LSK': 0.3,
'LUN': 0.34,
'MAID': 5,
'MANA': 143,
'MCAP': 5.44,
'MIPS': 43,
'MNE': 1.33,
'MSP': 121,
'MTH': 92,
'MYB': 3.9,
'NDC': 165,
'NEBL': 0.04,
'NET': 3.96,
'NTO': 998,
'NXC': 13.39,
'NXT': 3,
'OAX': 15,
'ODN': 0.004,
'OMG': 2,
'OPT': 335,
'ORME': 2.8,
'OTN': 0.57,
'PAY': 3.1,
'PIX': 96,
'PLBT': 0.33,
'PLR': 114,
'PLU': 0.87,
'POE': 784,
'POLL': 3.5,
'PPT': 2,
'PRE': 32,
'PRG': 39,
'PRO': 41,
'PRS': 60,
'PTOY': 0.5,
'QAU': 63,
'QCN': 0.03,
'QTUM': 0.04,
'QVT': 64,
'REP': 0.02,
'RKC': 15,
'RVT': 14,
'SAN': 2.24,
'SBD': 0.03,
'SCL': 2.6,
'SISA': 1640,
'SKIN': 407,
'SMART': 0.4,
'SMS': 0.0375,
'SNC': 36,
'SNGLS': 4,
'SNM': 48,
'SNT': 233,
'STEEM': 0.01,
'STRAT': 0.01,
'STU': 14,
'STX': 11,
'SUB': 17,
'SUR': 3,
'SWT': 0.51,
'TAAS': 0.91,
'TBT': 2.37,
'TFL': 15,
'TIME': 0.03,
'TIX': 7.1,
'TKN': 1,
'TKR': 84,
'TNT': 90,
'TRST': 1.6,
'TRX': 1395,
'UET': 480,
'UGT': 15,
'VEN': 14,
'VERI': 0.037,
'VIB': 50,
'VIBE': 145,
'VOISE': 618,
'WEALTH': 0.0168,
'WINGS': 2.4,
'WTC': 0.75,
'XAUR': 3.23,
'XDN': 0.01,
'XEM': 15,
'XUC': 0.9,
'YOYOW': 140,
'ZAP': 24,
'ZRX': 23,
'ZSC': 191,
},
'deposit': {
'BTC': 0.0006,
'ETH': 0.003,
'BCH': 0,
'USDT': 0,
'BTG': 0,
'LTC': 0,
'ZEC': 0,
'XMR': 0,
'1ST': 0,
'ADX': 0,
'AE': 0,
'AEON': 0,
'AIR': 0,
'AMP': 0,
'ANT': 0,
'ARDR': 0,
'ARN': 0,
'ART': 0,
'ATB': 0,
'ATL': 0,
'ATM': 0,
'ATS': 0,
'AVT': 0,
'BAS': 0,
'BCN': 0,
'DAO.Casino': 0, # id = 'BET'
'BKB': 0,
'BMC': 0,
'BMT': 0,
'BNT': 0,
'BQX': 0,
'BTM': 0,
'BTX': 0,
'BUS': 0,
'CCT': 0,
'CDT': 0,
'CDX': 0,
'CFI': 0,
'CLD': 0,
'CND': 0,
'CNX': 0,
'COSS': 0,
'CSNO': 0,
'CTR': 0,
'CTX': 0,
'CVC': 0,
'DBIX': 0,
'DCN': 0,
'DCT': 0,
'DDF': 0,
'DENT': 0,
'DGB': 0,
'DGD': 0,
'DICE': 0,
'DLT': 0,
'DNT': 0,
'DOGE': 0,
'DOV': 0,
'DRPU': 0,
'DRT': 0,
'DSH': 0,
'EBET': 0,
'EBTC': 0,
'EBTCOLD': 0,
'ECAT': 0,
'EDG': 0,
'EDO': 0,
'ELE': 0,
'ELM': 0,
'EMC': 0,
'EMGO': 0,
'ENJ': 0,
'EOS': 0,
'ERO': 0,
'ETBS': 0,
'ETC': 0,
'ETP': 0,
'EVX': 0,
'EXN': 0,
'FRD': 0,
'FUEL': 0,
'FUN': 0,
'FYN': 0,
'FYP': 0,
'GNO': 0,
'GUP': 0,
'GVT': 0,
'HAC': 0,
'HDG': 0,
'HGT': 0,
'HPC': 0,
'HVN': 0,
'ICN': 0,
'ICO': 0,
'ICOS': 0,
'IND': 0,
'INDI': 0,
'ITS': 0,
'IXT': 0,
'KBR': 0,
'KICK': 0,
'LA': 0,
'LAT': 0,
'LIFE': 0,
'LRC': 0,
'LSK': 0,
'LUN': 0,
'MAID': 0,
'MANA': 0,
'MCAP': 0,
'MIPS': 0,
'MNE': 0,
'MSP': 0,
'MTH': 0,
'MYB': 0,
'NDC': 0,
'NEBL': 0,
'NET': 0,
'NTO': 0,
'NXC': 0,
'NXT': 0,
'OAX': 0,
'ODN': 0,
'OMG': 0,
'OPT': 0,
'ORME': 0,
'OTN': 0,
'PAY': 0,
'PIX': 0,
'PLBT': 0,
'PLR': 0,
'PLU': 0,
'POE': 0,
'POLL': 0,
'PPT': 0,
'PRE': 0,
'PRG': 0,
'PRO': 0,
'PRS': 0,
'PTOY': 0,
'QAU': 0,
'QCN': 0,
'QTUM': 0,
'QVT': 0,
'REP': 0,
'RKC': 0,
'RVT': 0,
'SAN': 0,
'SBD': 0,
'SCL': 0,
'SISA': 0,
'SKIN': 0,
'SMART': 0,
'SMS': 0,
'SNC': 0,
'SNGLS': 0,
'SNM': 0,
'SNT': 0,
'STEEM': 0,
'STRAT': 0,
'STU': 0,
'STX': 0,
'SUB': 0,
'SUR': 0,
'SWT': 0,
'TAAS': 0,
'TBT': 0,
'TFL': 0,
'TIME': 0,
'TIX': 0,
'TKN': 0,
'TKR': 0,
'TNT': 0,
'TRST': 0,
'TRX': 0,
'UET': 0,
'UGT': 0,
'VEN': 0,
'VERI': 0,
'VIB': 0,
'VIBE': 0,
'VOISE': 0,
'WEALTH': 0,
'WINGS': 0,
'WTC': 0,
'XAUR': 0,
'XDN': 0,
'XEM': 0,
'XUC': 0,
'YOYOW': 0,
'ZAP': 0,
'ZRX': 0,
'ZSC': 0,
},
},
},
'commonCurrencies': {
'BET': 'DAO.Casino',
'CAT': 'BitClave',
'DRK': 'DASH',
'EMGO': 'MGO',
'GET': 'Themis',
'HSR': 'HC',
'LNC': 'LinkerCoin',
'UNC': 'Unigame',
'USD': 'USDT',
'XBT': 'BTC',
},
'options': {
'defaultTimeInForce': 'FOK',
},
})
def fetch_markets(self, params={}):
response = self.publicGetSymbols(params)
markets = self.safe_value(response, 'symbols')
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'commodity')
quoteId = self.safe_string(market, 'currency')
lot = self.safe_float(market, 'lot')
step = self.safe_float(market, 'step')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
result.append({
'info': market,
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'lot': lot,
'step': step,
'active': True,
'maker': self.safe_float(market, 'provideLiquidityRate'),
'taker': self.safe_float(market, 'takeLiquidityRate'),
'precision': {
'amount': self.precision_from_string(market['lot']),
'price': self.precision_from_string(market['step']),
},
'limits': {
'amount': {
'min': lot,
'max': None,
},
'price': {
'min': step,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
})
return result
def fetch_balance(self, params={}):
self.load_markets()
method = self.safe_string(params, 'type', 'trading')
method += 'GetBalance'
query = self.omit(params, 'type')
response = getattr(self, method)(query)
balances = self.safe_value(response, 'balance', [])
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency_code')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float_2(balance, 'cash', 'balance')
account['used'] = self.safe_float(balance, 'reserved')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
orderbook = self.publicGetSymbolOrderbook(self.extend({
'symbol': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_integer(ticker, 'timestamp')
symbol = None
if market:
symbol = market['symbol']
last = self.safe_float(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'volume_quote'),
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTicker(params)
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.publicGetSymbolTicker(self.extend(request, params))
if 'message' in response:
raise ExchangeError(self.id + ' ' + response['message'])
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
if isinstance(trade, list):
return self.parse_public_trade(trade, market)
return self.parse_order_trade(trade, market)
def parse_public_trade(self, trade, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
side = None
tradeLength = len(trade)
if tradeLength > 4:
side = trade[4]
price = float(trade[1])
amount = float(trade[2])
cost = price * amount
return {
'info': trade,
'id': str(trade[0]),
'timestamp': trade[3],
'datetime': self.iso8601(trade[3]),
'symbol': symbol,
'type': None,
'side': side,
'order': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def parse_order_trade(self, trade, market=None):
symbol = None
if market is not None:
symbol = market['symbol']
amount = self.safe_float(trade, 'execQuantity')
if market:
amount *= market['lot']
price = self.safe_float(trade, 'execPrice')
cost = price * amount
fee = {
'cost': self.safe_float(trade, 'fee'),
'currency': None,
'rate': None,
}
timestamp = self.safe_integer(trade, 'timestamp')
id = self.safe_string(trade, 'tradeId')
# we use clientOrderId as the order id with HitBTC intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
orderId = self.safe_string(trade, 'clientOrderId')
side = self.safe_string(trade, 'side')
return {
'info': trade,
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'from': 0,
# 'till': 100,
# 'by': 'ts', # or by trade_id
# 'sort': 'desc', # or asc
# 'start_index': 0,
# 'max_results': 1000,
# 'format_item': 'object',
# 'format_price': 'number',
# 'format_amount': 'number',
# 'format_tid': 'string',
# 'format_timestamp': 'millisecond',
# 'format_wrap': False,
# 'side': 'true',
}
if since is not None:
request['by'] = 'ts'
request['from'] = since
if limit is not None:
request['max_results'] = limit
response = self.publicGetSymbolTrades(self.extend(request, params))
return self.parse_trades(response['trades'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
# check if amount can be evenly divided into lots
# they want integer quantity in lot units
quantity = float(amount) / market['lot']
wholeLots = int(round(quantity))
difference = quantity - wholeLots
if abs(difference) > market['step']:
raise ExchangeError(self.id + ' order amount should be evenly divisible by lot unit size of ' + str(market['lot']))
clientOrderId = self.milliseconds()
request = {
'clientOrderId': str(clientOrderId),
'symbol': market['id'],
'side': side,
'quantity': str(wholeLots), # quantity in integer lot units
'type': type,
}
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
else:
request['timeInForce'] = self.options['defaultTimeInForce']
response = self.tradingPostNewOrder(self.extend(request, params))
order = self.parse_order(response['ExecutionReport'], market)
if order['status'] == 'rejected':
raise InvalidOrder(self.id + ' order was rejected by the exchange ' + self.json(order))
return order
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
# we use clientOrderId as the order id with HitBTC intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
request = {
'clientOrderId': id,
}
return self.tradingPostCancelOrder(self.extend(request, params))
def parse_order_status(self, status):
statuses = {
'new': 'open',
'partiallyFilled': 'open',
'filled': 'closed',
'canceled': 'canceled',
'rejected': 'rejected',
'expired': 'expired',
}
return self.safe_string(statuses, status)
def parse_order(self, order, market=None):
timestamp = self.safe_integer(order, 'lastTimestamp')
if timestamp is None:
timestamp = self.safe_integer(order, 'timestamp')
symbol = None
if market is None:
market = self.markets_by_id[order['symbol']]
status = self.parse_order_status(self.safe_string(order, 'orderStatus'))
price = self.safe_float(order, 'orderPrice')
price = self.safe_float(order, 'price', price)
price = self.safe_float(order, 'avgPrice', price)
amount = self.safe_float(order, 'orderQuantity')
amount = self.safe_float(order, 'quantity', amount)
remaining = self.safe_float(order, 'quantityLeaves')
remaining = self.safe_float(order, 'leavesQuantity', remaining)
filled = None
cost = None
amountDefined = (amount is not None)
remainingDefined = (remaining is not None)
if market is not None:
symbol = market['symbol']
if amountDefined:
amount *= market['lot']
if remainingDefined:
remaining *= market['lot']
else:
marketId = self.safe_string(order, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if amountDefined:
if remainingDefined:
filled = amount - remaining
if price is not None:
cost = price * filled
feeCost = self.safe_float(order, 'fee')
feeCurrency = None
if market is not None:
symbol = market['symbol']
feeCurrency = market['quote']
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': None,
}
# we use clientOrderId as the order id with HitBTC intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
id = self.safe_string(order, 'clientOrderId')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
return {
'id': id,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
}
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
# we use clientOrderId as the order id with HitBTC intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
request = {
'clientOrderId': id,
}
response = self.tradingGetOrder(self.extend(request, params))
if response['orders'][0]:
return self.parse_order(response['orders'][0])
raise OrderNotFound(self.id + ' fetchOrder() error: ' + self.response)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
statuses = ['new', 'partiallyFiiled']
market = None
request = {
'sort': 'desc',
'statuses': ','.join(statuses),
}
if symbol is not None:
market = self.market(symbol)
request['symbols'] = market['id']
response = self.tradingGetOrdersActive(self.extend(request, params))
return self.parse_orders(response['orders'], market, since, limit)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
statuses = ['filled', 'canceled', 'rejected', 'expired']
request = {
'sort': 'desc',
'statuses': ','.join(statuses),
'max_results': 1000,
}
if symbol is not None:
market = self.market(symbol)
request['symbols'] = market['id']
response = self.tradingGetOrdersRecent(self.extend(request, params))
return self.parse_orders(response['orders'], market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
# we use clientOrderId as the order id with HitBTC intentionally
# because most of their endpoints will require clientOrderId
# explained here: https://github.com/ccxt/ccxt/issues/5674
request = {
'clientOrderId': id,
}
response = self.tradingGetTradesByOrder(self.extend(request, params))
return self.parse_trades(response['trades'], market, since, limit)
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'currency_code': currency['id'],
'amount': amount,
'address': address,
}
if tag is not None:
request['extra_id'] = tag
response = self.paymentPostPayout(self.extend(request, params))
return {
'info': response,
'id': response['transaction'],
}
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + 'api' + '/' + self.version + '/' + api + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = self.nonce()
payload = {'nonce': nonce, 'apikey': self.apiKey}
query = self.extend(payload, query)
if method == 'GET':
url += '?' + self.urlencode(query)
else:
url += '?' + self.urlencode(payload)
auth = url
if method == 'POST':
if query:
body = self.urlencode(query)
auth += body
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'X-Signature': self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512).lower(),
}
url = self.urls['api'] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if 'code' in response:
if 'ExecutionReport' in response:
if response['ExecutionReport']['orderRejectReason'] == 'orderExceedsLimit':
raise InsufficientFunds(self.id + ' ' + self.json(response))
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| true | true |
f735b3fb7909a46e6e9bc51be0a8832ba8df5746 | 554 | py | Python | esrt/engine/base_input_feed.py | PTYin/ESRT | 4d3e5c523cef7bd15ea8ce10e5cf8b7e05ad2d5c | [
"BSD-2-Clause"
] | null | null | null | esrt/engine/base_input_feed.py | PTYin/ESRT | 4d3e5c523cef7bd15ea8ce10e5cf8b7e05ad2d5c | [
"BSD-2-Clause"
] | null | null | null | esrt/engine/base_input_feed.py | PTYin/ESRT | 4d3e5c523cef7bd15ea8ce10e5cf8b7e05ad2d5c | [
"BSD-2-Clause"
] | null | null | null | import abc
import typing
class BaseInputFeed(abc.ABC):
"""
"""
def __init__(self, model, batch_size):
self.model = model
self.batch_size = batch_size
@abc.abstractmethod
def get_train_batch(self):
"""Defien a batch feed dictionary the model needs for training, each sub class should
implement this method."""
@abc.abstractmethod
def get_test_batch(self):
"""Defien a batch feed dictionary the model needs for testing, each sub class should
implement this method."""
| 25.181818 | 93 | 0.655235 | import abc
import typing
class BaseInputFeed(abc.ABC):
def __init__(self, model, batch_size):
self.model = model
self.batch_size = batch_size
@abc.abstractmethod
def get_train_batch(self):
@abc.abstractmethod
def get_test_batch(self):
| true | true |
f735b4d20caffd320fd7db03fafe6d0948d2456f | 4,370 | py | Python | conf.py | bcattoor/topohelper-docs | 63af3b365e5e3b7eac59d4c9bc574283c1d2177f | [
"Unlicense"
] | null | null | null | conf.py | bcattoor/topohelper-docs | 63af3b365e5e3b7eac59d4c9bc574283c1d2177f | [
"Unlicense"
] | null | null | null | conf.py | bcattoor/topohelper-docs | 63af3b365e5e3b7eac59d4c9bc574283c1d2177f | [
"Unlicense"
] | 1 | 2020-11-13T20:42:11.000Z | 2020-11-13T20:42:11.000Z | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "TopoHelper"
copyright = "2020, Bjorn Cattoor"
author = "Bjorn Cattoor"
company = "Infrabel"
# The full version, including alpha/beta/rc tags
release = "1.0"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"myst_parser",
"sphinx_inline_tabs",
"sphinxcontrib.yt",
"sphinx.ext.autosectionlabel",
]
source_suffix = {
".rst": "restructuredtext",
".txt": "markdown",
".md": "markdown",
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# Prefix document path to section labels, to use:
# `path/to/file:heading` instead of just `heading`
# see https://myst-parser.readthedocs.io/en/latest/using/howto.html#automatically-create-targets-for-section-headers
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 3
#
# -- Options for extlinks ----------------------------------------------------
#
extlinks = {
"pypi": ("https://pypi.org/project/%s/", ""),
}
#
# -- Options for intersphinx -------------------------------------------------
#
intersphinx_mapping = {
# "python": ("https://docs.python.org/3", None),
# "sphinx": ("https://www.sphinx-doc.org/", None),
}
#
# -- Options for TODOs -------------------------------------------------------
#
todo_include_todos = True
#
# -- Options for Markdown files ----------------------------------------------
#
myst_admonition_enable = True
myst_deflist_enable = True
myst_heading_anchors = 3
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = "nl"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# Change prefered image order
# see: https://stackoverflow.com/a/45970146
# To customize the "best image" order for a given builder, edit your conf.py to
# override the StandaloneHTMLBuilder class with the supported_image_types
# order you prefer.
from sphinx.builders.html import StandaloneHTMLBuilder
StandaloneHTMLBuilder.supported_image_types = [
"image/svg+xml",
"image/gif",
"image/png",
"image/jpeg",
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
html_title = "TopoHelper"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_extra_path = ["_html_extra"]
# These paths are either relative to html_static_path
# or fully qualified paths (eg. https://...)
html_css_files = [
"css/custom.css",
]
html_js_files = [
"js/_blank-links.js",
]
# This is a site wide anouncement!
html_theme_options = {
"announcement": (
"Topohelper is in voortdurende ontwikkeling, deze documentatie kan fouten bevatten!"
)
}
| 29.931507 | 116 | 0.653089 |
project = "TopoHelper"
copyright = "2020, Bjorn Cattoor"
author = "Bjorn Cattoor"
company = "Infrabel"
release = "1.0"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"myst_parser",
"sphinx_inline_tabs",
"sphinxcontrib.yt",
"sphinx.ext.autosectionlabel",
]
source_suffix = {
".rst": "restructuredtext",
".txt": "markdown",
".md": "markdown",
}
templates_path = ["_templates"]
ionlabel_maxdepth = 3
extlinks = {
"pypi": ("https://pypi.org/project/%s/", ""),
}
intersphinx_mapping = {
}
todo_include_todos = True
myst_admonition_enable = True
myst_deflist_enable = True
myst_heading_anchors = 3
language = "nl"
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
from sphinx.builders.html import StandaloneHTMLBuilder
StandaloneHTMLBuilder.supported_image_types = [
"image/svg+xml",
"image/gif",
"image/png",
"image/jpeg",
]
html_theme = "furo"
html_title = "TopoHelper"
html_static_path = ["_static"]
html_extra_path = ["_html_extra"]
html_css_files = [
"css/custom.css",
]
html_js_files = [
"js/_blank-links.js",
]
html_theme_options = {
"announcement": (
"Topohelper is in voortdurende ontwikkeling, deze documentatie kan fouten bevatten!"
)
}
| true | true |
f735b560281cfda6bec6bd167865c4a4301011a2 | 7,811 | py | Python | model/archival_gnns.py | kay-wong/DiscoBERT | 814c741e2a049de3afc489835e0df3ccf9fb4fe9 | [
"MIT"
] | 154 | 2020-04-22T07:14:36.000Z | 2022-03-20T06:16:39.000Z | model/archival_gnns.py | kay-wong/DiscoBERT | 814c741e2a049de3afc489835e0df3ccf9fb4fe9 | [
"MIT"
] | 13 | 2020-05-15T03:12:19.000Z | 2021-08-10T16:08:09.000Z | model/archival_gnns.py | kay-wong/DiscoBERT | 814c741e2a049de3afc489835e0df3ccf9fb4fe9 | [
"MIT"
] | 28 | 2020-04-23T12:54:56.000Z | 2022-02-09T11:48:12.000Z | # Graph Conv and Relational Graph Conv
import itertools
import torch
from typing import List, Union
import dgl
import torch.nn as nn
import torch.nn.functional as F
from allennlp.common import FromParams
from allennlp.common import Registrable
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.layer_norm import LayerNorm
from allennlp.modules.masked_layer_norm import MaskedLayerNorm
from overrides import overrides
class GraphEncoder(_EncoderBase, Registrable):
def get_input_dim(self) -> int:
raise NotImplementedError
def get_output_dim(self) -> int:
raise NotImplementedError
def is_bidirectional(self):
raise NotImplementedError
#
def convert_sent_tensors_to_graphs(self, sent, sent_mask, meta_field, key):
batch_size, max_sent_num, hdim = sent.shape
effective_length = torch.sum(sent_mask, dim=1).long().tolist()
graph_bag = []
for b in range(batch_size):
this_sent = sent[b] # max_sent, hdim
this_len = effective_length[b]
graph_seed = meta_field[b][key] # List of tuples
G = dgl.DGLGraph()
G.add_nodes(max_sent_num)
# fc_src = [i for i in range(this_len)] * this_len
# fc_tgt = [[i] * this_len for i in range(this_len)]
# fc_tgt = list(itertools.chain.from_iterable(fc_tgt))
fc_src = [x[0] for x in graph_seed]
fc_tgt = [x[1] for x in graph_seed]
G.add_edges(fc_src, fc_tgt)
G.ndata['h'] = this_sent # every node has the parameter
graph_bag.append(G)
return graph_bag
@GraphEncoder.register("easy_graph_encoder")
class EasyGraph(GraphEncoder, torch.nn.Module, FromParams):
def __init__(self,
input_dim: int,
num_layers: int,
hidden_dims: Union[int, List[int]],
dropout=0.1):
super().__init__()
if not isinstance(hidden_dims, list):
hidden_dims = [hidden_dims] * num_layers
if not isinstance(dropout, list):
dropout = [dropout] * num_layers # type: ignore
self._activations = [torch.nn.functional.relu] * num_layers
input_dims = [input_dim] + hidden_dims[:-1]
linear_layers = []
for layer_input_dim, layer_output_dim in zip(input_dims, hidden_dims):
linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim))
self._linear_layers = torch.nn.ModuleList(linear_layers)
dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]
self._dropout = torch.nn.ModuleList(dropout_layers)
self._output_dim = hidden_dims[-1]
self.lin = torch.nn.Linear(self._output_dim, self._output_dim)
self.ln = MaskedLayerNorm(size=hidden_dims[0])
def transform_sent_rep(self, sent_rep, sent_mask, graphs):
# LayerNorm(x + Sublayer(x))
output = sent_rep
for layer, activation, dropout in zip(self._linear_layers, self._activations, self._dropout):
mid = layer(output) # output: batch, seq, feat
mid = mid.permute(0, 2, 1) # mid: batch, feat, seq
nex = torch.bmm(mid, graphs)
output = dropout(activation(nex))
output = output.permute(0, 2, 1) # mid: batch, seq, feat
middle = sent_rep + self.lin(output)
output = self.ln.forward(middle, sent_mask)
return output
@GraphEncoder.register("old_gcn")
class GCN_layers(GraphEncoder, torch.nn.Module, FromParams):
def __init__(self, input_dims: List[int],
num_layers: int,
hidden_dims: Union[int, List[int]],
activations='relu'):
super(GCN_layers, self).__init__()
if not isinstance(hidden_dims, list):
hidden_dims = [hidden_dims] * num_layers
# TODO remove hard code relu
activations = [torch.nn.functional.tanh] * num_layers
assert len(input_dims) == len(hidden_dims) == len(activations) == num_layers
gcn_layers = []
for layer_input_dim, layer_output_dim, activate in zip(input_dims, hidden_dims, activations):
gcn_layers.append(GCN(layer_input_dim, layer_output_dim, activate))
self.layers = nn.ModuleList(gcn_layers)
self._output_dim = hidden_dims[-1]
self.input_dim = input_dims[0]
self.ln = LayerNorm(hidden_dims[0])
self._mlp = FeedForward(hidden_dims[0], 1, hidden_dims[0], torch.nn.functional.sigmoid)
def transform_sent_rep(self, sent_rep, sent_mask, sent_graph):
init_graphs = self.convert_sent_tensors_to_graphs(sent_rep, sent_mask)
unpadated_graphs = []
for g in init_graphs:
updated_graph = self.forward(g)
unpadated_graphs.append(updated_graph)
recovered_sent = torch.stack(unpadated_graphs, dim=0)
assert recovered_sent.shape == sent_rep.shape
return recovered_sent
def convert_sent_tensors_to_graphs(self, sent, sent_mask):
batch_size, max_sent_num, hdim = sent.shape
effective_length = torch.sum(sent_mask, dim=1).long().tolist()
graph_bag = []
for b in range(batch_size):
this_sent = sent[b] # max_sent, hdim
# this_mask = sent_mask[b]
this_len = effective_length[b]
G = dgl.DGLGraph()
G.add_nodes(max_sent_num)
fc_src = [i for i in range(this_len)] * this_len
fc_tgt = [[i] * this_len for i in range(this_len)]
fc_tgt = list(itertools.chain.from_iterable(fc_tgt))
G.add_edges(fc_src, fc_tgt)
G.ndata['h'] = this_sent # every node has the parameter
graph_bag.append(G)
return graph_bag
@overrides
def forward(self, g):
# h = g.in_degrees().view(-1, 1).float()
h = g.ndata['h']
output = h
for conv in self.layers:
output = conv(g, output)
print(output)
norm_output = self.ln(h + output)
# print(norm_output)
# m = self._mlp(norm_output)
# h = self.ln(norm_output + m)
h = norm_output
g.ndata['h'] = h
hg = dgl.mean_nodes(g, 'h')
# return g, g.ndata['h'], hg # g is the raw graph, h is the node rep, and hg is the mean of all h
return g.ndata['h']
def get_input_dim(self) -> int:
return self.input_dim
def get_output_dim(self) -> int:
return self._output_dim
@overrides
def is_bidirectional(self):
return False
def discourse_oracle(disco_txt, ):
# oracle labels
docs = [disc.get_readable_words_as_list() for disc in disco_bag]
# rewrite the docs to accomodate the dependency
modified_docs_w_deps = []
oracle_inclusion = []
for idx, disco in enumerate(disco_bag):
# tmp_txt, tmp_oracle_inclusion = copy.deepcopy(docs[idx]),[idx]
tmp_txt, tmp_oracle_inclusion = [], []
if disco.dep != []:
for _d in disco.dep:
if _d < len(docs):
tmp_txt += docs[_d]
tmp_oracle_inclusion.append(_d)
tmp_txt += copy.deepcopy(docs[idx])
tmp_oracle_inclusion.append(idx)
modified_docs_w_deps.append(" ".join(tmp_txt))
oracle_inclusion.append(tmp_oracle_inclusion)
else:
modified_docs_w_deps.append(
" ".join(docs[idx])
)
oracle_inclusion.append([idx])
yangliu_label = original_greedy_selection([x.split(" ") for x in modified_docs_w_deps], summary, 5)
# oracle_ids = greedy_selection(modified_docs_w_deps, summary, oracle_size)
return yangliu_labelf
| 38.860697 | 106 | 0.630137 |
import itertools
import torch
from typing import List, Union
import dgl
import torch.nn as nn
import torch.nn.functional as F
from allennlp.common import FromParams
from allennlp.common import Registrable
from allennlp.modules.encoder_base import _EncoderBase
from allennlp.modules.feedforward import FeedForward
from allennlp.modules.layer_norm import LayerNorm
from allennlp.modules.masked_layer_norm import MaskedLayerNorm
from overrides import overrides
class GraphEncoder(_EncoderBase, Registrable):
def get_input_dim(self) -> int:
raise NotImplementedError
def get_output_dim(self) -> int:
raise NotImplementedError
def is_bidirectional(self):
raise NotImplementedError
def convert_sent_tensors_to_graphs(self, sent, sent_mask, meta_field, key):
batch_size, max_sent_num, hdim = sent.shape
effective_length = torch.sum(sent_mask, dim=1).long().tolist()
graph_bag = []
for b in range(batch_size):
this_sent = sent[b]
this_len = effective_length[b]
graph_seed = meta_field[b][key]
G = dgl.DGLGraph()
G.add_nodes(max_sent_num)
fc_src = [x[0] for x in graph_seed]
fc_tgt = [x[1] for x in graph_seed]
G.add_edges(fc_src, fc_tgt)
G.ndata['h'] = this_sent
graph_bag.append(G)
return graph_bag
@GraphEncoder.register("easy_graph_encoder")
class EasyGraph(GraphEncoder, torch.nn.Module, FromParams):
def __init__(self,
input_dim: int,
num_layers: int,
hidden_dims: Union[int, List[int]],
dropout=0.1):
super().__init__()
if not isinstance(hidden_dims, list):
hidden_dims = [hidden_dims] * num_layers
if not isinstance(dropout, list):
dropout = [dropout] * num_layers
self._activations = [torch.nn.functional.relu] * num_layers
input_dims = [input_dim] + hidden_dims[:-1]
linear_layers = []
for layer_input_dim, layer_output_dim in zip(input_dims, hidden_dims):
linear_layers.append(torch.nn.Linear(layer_input_dim, layer_output_dim))
self._linear_layers = torch.nn.ModuleList(linear_layers)
dropout_layers = [torch.nn.Dropout(p=value) for value in dropout]
self._dropout = torch.nn.ModuleList(dropout_layers)
self._output_dim = hidden_dims[-1]
self.lin = torch.nn.Linear(self._output_dim, self._output_dim)
self.ln = MaskedLayerNorm(size=hidden_dims[0])
def transform_sent_rep(self, sent_rep, sent_mask, graphs):
output = sent_rep
for layer, activation, dropout in zip(self._linear_layers, self._activations, self._dropout):
mid = layer(output)
mid = mid.permute(0, 2, 1)
nex = torch.bmm(mid, graphs)
output = dropout(activation(nex))
output = output.permute(0, 2, 1)
middle = sent_rep + self.lin(output)
output = self.ln.forward(middle, sent_mask)
return output
@GraphEncoder.register("old_gcn")
class GCN_layers(GraphEncoder, torch.nn.Module, FromParams):
def __init__(self, input_dims: List[int],
num_layers: int,
hidden_dims: Union[int, List[int]],
activations='relu'):
super(GCN_layers, self).__init__()
if not isinstance(hidden_dims, list):
hidden_dims = [hidden_dims] * num_layers
activations = [torch.nn.functional.tanh] * num_layers
assert len(input_dims) == len(hidden_dims) == len(activations) == num_layers
gcn_layers = []
for layer_input_dim, layer_output_dim, activate in zip(input_dims, hidden_dims, activations):
gcn_layers.append(GCN(layer_input_dim, layer_output_dim, activate))
self.layers = nn.ModuleList(gcn_layers)
self._output_dim = hidden_dims[-1]
self.input_dim = input_dims[0]
self.ln = LayerNorm(hidden_dims[0])
self._mlp = FeedForward(hidden_dims[0], 1, hidden_dims[0], torch.nn.functional.sigmoid)
def transform_sent_rep(self, sent_rep, sent_mask, sent_graph):
init_graphs = self.convert_sent_tensors_to_graphs(sent_rep, sent_mask)
unpadated_graphs = []
for g in init_graphs:
updated_graph = self.forward(g)
unpadated_graphs.append(updated_graph)
recovered_sent = torch.stack(unpadated_graphs, dim=0)
assert recovered_sent.shape == sent_rep.shape
return recovered_sent
def convert_sent_tensors_to_graphs(self, sent, sent_mask):
batch_size, max_sent_num, hdim = sent.shape
effective_length = torch.sum(sent_mask, dim=1).long().tolist()
graph_bag = []
for b in range(batch_size):
this_sent = sent[b]
this_len = effective_length[b]
G = dgl.DGLGraph()
G.add_nodes(max_sent_num)
fc_src = [i for i in range(this_len)] * this_len
fc_tgt = [[i] * this_len for i in range(this_len)]
fc_tgt = list(itertools.chain.from_iterable(fc_tgt))
G.add_edges(fc_src, fc_tgt)
G.ndata['h'] = this_sent
graph_bag.append(G)
return graph_bag
@overrides
def forward(self, g):
h = g.ndata['h']
output = h
for conv in self.layers:
output = conv(g, output)
print(output)
norm_output = self.ln(h + output)
h = norm_output
g.ndata['h'] = h
hg = dgl.mean_nodes(g, 'h')
return self.input_dim
def get_output_dim(self) -> int:
return self._output_dim
@overrides
def is_bidirectional(self):
return False
def discourse_oracle(disco_txt, ):
docs = [disc.get_readable_words_as_list() for disc in disco_bag]
modified_docs_w_deps = []
oracle_inclusion = []
for idx, disco in enumerate(disco_bag):
tmp_txt, tmp_oracle_inclusion = [], []
if disco.dep != []:
for _d in disco.dep:
if _d < len(docs):
tmp_txt += docs[_d]
tmp_oracle_inclusion.append(_d)
tmp_txt += copy.deepcopy(docs[idx])
tmp_oracle_inclusion.append(idx)
modified_docs_w_deps.append(" ".join(tmp_txt))
oracle_inclusion.append(tmp_oracle_inclusion)
else:
modified_docs_w_deps.append(
" ".join(docs[idx])
)
oracle_inclusion.append([idx])
yangliu_label = original_greedy_selection([x.split(" ") for x in modified_docs_w_deps], summary, 5)
return yangliu_labelf
| true | true |
f735b5aac888d7a0bb6d2eff2c7545d190c58ac9 | 377 | py | Python | Python/Syntax/Else.py | piovezan/SOpt | a5ec90796b7bdf98f0675457fc4bb99c8695bc40 | [
"MIT"
] | 148 | 2017-08-03T01:49:27.000Z | 2022-03-26T10:39:30.000Z | Python/Syntax/Else.py | piovezan/SOpt | a5ec90796b7bdf98f0675457fc4bb99c8695bc40 | [
"MIT"
] | 3 | 2017-11-23T19:52:05.000Z | 2020-04-01T00:44:40.000Z | Python/Syntax/Else.py | piovezan/SOpt | a5ec90796b7bdf98f0675457fc4bb99c8695bc40 | [
"MIT"
] | 59 | 2017-08-03T01:49:19.000Z | 2022-03-31T23:24:38.000Z | if carro == 'Peugeot':
c = 50
elif carro == 'C3':
c = 60
elif carro == 'Cruze':
c = 70
elif carro == 'CRV':
c = 75
else:
print('O carro digitado não está cadastrado no nosso sistema. Verifique se está digitado corretamente ou comunique a empresa.')
return #ou pode usar sys.exit() se quiser acabar de vez
#https://pt.stackoverflow.com/q/302129/101
| 26.928571 | 131 | 0.65252 | if carro == 'Peugeot':
c = 50
elif carro == 'C3':
c = 60
elif carro == 'Cruze':
c = 70
elif carro == 'CRV':
c = 75
else:
print('O carro digitado não está cadastrado no nosso sistema. Verifique se está digitado corretamente ou comunique a empresa.')
return
| true | true |
f735b6a365a72a09f1f4baa82e51348f1b5803fc | 1,929 | py | Python | openprescribing/dmd/gen_models/gen_models.py | annapowellsmith/openpresc | cfa9fb07d6fc2ee304159c04fcc132cefcf78745 | [
"MIT"
] | 91 | 2015-10-14T09:10:32.000Z | 2022-03-10T22:09:21.000Z | openprescribing/dmd/gen_models/gen_models.py | annapowellsmith/openpresc | cfa9fb07d6fc2ee304159c04fcc132cefcf78745 | [
"MIT"
] | 1,828 | 2015-12-04T14:52:27.000Z | 2022-03-31T08:51:14.000Z | openprescribing/dmd/gen_models/gen_models.py | HDRUK/openprescribing | 510e8c07e841cd42284c109774d1730b6463f376 | [
"MIT"
] | 27 | 2015-12-03T18:26:56.000Z | 2021-01-09T21:58:53.000Z | # flake8: noqa
import csv
def model_name(table_name):
if table_name in ["vtm", "vpi", "vmp", "vmpp", "amp", "ampp", "gtin"]:
return table_name.upper()
else:
return "".join(tok.title() for tok in table_name.split("_"))
def quote(s):
assert '"' not in s
return '"' + s + '"'
with open("schema.csv") as f:
lines = list(csv.DictReader(f))
print("from django.db import models")
table = None
for line in lines:
if line["table"] == "ccontent":
continue
if line["table"] != table:
table = line["table"]
print()
print()
print(f"class {model_name(table)}(models.Model):")
print("# class Meta:")
print('# verbose_name = "TODO"')
print()
if line["type"] == "retired":
continue
options = []
if line["primary_key"] == "True":
options.append(("primary_key", "True"))
if line["db_column"]:
options.append(("db_column", quote(line["db_column"])))
if line["type"] in ["ForeignKey", "OneToOneField"]:
options.append(("to", quote(model_name(line["to"]))))
options.append(("on_delete", "models.CASCADE"))
if "prevcd" in line["db_column"] or "uomcd" in line["db_column"]:
options.append(("related_name", quote("+")))
elif line["type"] == "CharField":
options.append(("max_length", line["max_length"]))
elif line["type"] == "DecimalField":
options.append(("max_digits", line["max_digits"]))
options.append(("decimal_places", line["decimal_places"]))
if line["optional"] == "Y":
if line["type"] != "BooleanField" and line["primary_key"] != "True":
options.append(("null", "True"))
options.append(("help_text", quote(line["descr"])))
print(f' {line["field"]} = models.{line["type"]}(')
for k, v in options:
print(f" {k}={v},")
print(" )")
| 25.72 | 76 | 0.553136 |
import csv
def model_name(table_name):
if table_name in ["vtm", "vpi", "vmp", "vmpp", "amp", "ampp", "gtin"]:
return table_name.upper()
else:
return "".join(tok.title() for tok in table_name.split("_"))
def quote(s):
assert '"' not in s
return '"' + s + '"'
with open("schema.csv") as f:
lines = list(csv.DictReader(f))
print("from django.db import models")
table = None
for line in lines:
if line["table"] == "ccontent":
continue
if line["table"] != table:
table = line["table"]
print()
print()
print(f"class {model_name(table)}(models.Model):")
print("
print('# verbose_name = "TODO"')
print()
if line["type"] == "retired":
continue
options = []
if line["primary_key"] == "True":
options.append(("primary_key", "True"))
if line["db_column"]:
options.append(("db_column", quote(line["db_column"])))
if line["type"] in ["ForeignKey", "OneToOneField"]:
options.append(("to", quote(model_name(line["to"]))))
options.append(("on_delete", "models.CASCADE"))
if "prevcd" in line["db_column"] or "uomcd" in line["db_column"]:
options.append(("related_name", quote("+")))
elif line["type"] == "CharField":
options.append(("max_length", line["max_length"]))
elif line["type"] == "DecimalField":
options.append(("max_digits", line["max_digits"]))
options.append(("decimal_places", line["decimal_places"]))
if line["optional"] == "Y":
if line["type"] != "BooleanField" and line["primary_key"] != "True":
options.append(("null", "True"))
options.append(("help_text", quote(line["descr"])))
print(f' {line["field"]} = models.{line["type"]}(')
for k, v in options:
print(f" {k}={v},")
print(" )")
| true | true |
f735b84c5f4f7ab12673110257de977b99be7f9f | 1,280 | py | Python | programmers/lv2_review/lv_2_19.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | programmers/lv2_review/lv_2_19.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | programmers/lv2_review/lv_2_19.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | # 위장
"""
def solution(clothes):
def dfs_left(s, idx):
if idx == half:
left.append(s)
return
dfs_left(s, idx + 1)
dfs_left(s * values[idx], idx + 1)
def dfs_right(s, idx):
if idx == len(values):
right.append(s)
return
dfs_right(s, idx + 1)
dfs_right(s * values[idx], idx + 1)
clothes_dic = {}
for _, v in clothes:
clothes_dic[v] = clothes_dic.get(v, 0) + 1
values = list(clothes_dic.values())
half = len(values) // 2
left, right = [], []
dfs_left(1, 0)
dfs_right(1, half)
answer = sum(left) + sum(right) - 2
for y in range(1, len(left)):
s = 0
for x in range(1, len(right)):
s += left[y] * right[x]
answer += s
return answer
"""
def solution(clothes):
categories = {}
for _, c in clothes:
if c not in categories:
categories[c] = 2
else:
categories[c] += 1
cnt = 1
values = list(categories.values())
for i in range(len(values)):
cnt *= values[i]
cnt -= 1
return cnt
clothes = [
["yellow_hat", "headgear"],
["blue_sunglasses", "eyewear"],
["green_turban", "headgear"],
]
print(solution(clothes)) | 20.645161 | 50 | 0.508594 |
def solution(clothes):
categories = {}
for _, c in clothes:
if c not in categories:
categories[c] = 2
else:
categories[c] += 1
cnt = 1
values = list(categories.values())
for i in range(len(values)):
cnt *= values[i]
cnt -= 1
return cnt
clothes = [
["yellow_hat", "headgear"],
["blue_sunglasses", "eyewear"],
["green_turban", "headgear"],
]
print(solution(clothes)) | true | true |
f735b8c878d53733e89af5109438eaed85f247f8 | 3,668 | py | Python | pyFAI/utils/logging_utils.py | yugangzhang/pyFAI | e0453b279dac1f165f637e2a2ed1d4ddf57d31ba | [
"MIT"
] | 1 | 2021-04-28T20:09:13.000Z | 2021-04-28T20:09:13.000Z | pyFAI/utils/logging_utils.py | yugangzhang/pyFAI | e0453b279dac1f165f637e2a2ed1d4ddf57d31ba | [
"MIT"
] | null | null | null | pyFAI/utils/logging_utils.py | yugangzhang/pyFAI | e0453b279dac1f165f637e2a2ed1d4ddf57d31ba | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2019 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""This modules contains helper function relative to logging system.
"""
from __future__ import division, print_function
__author__ = "Valentin Valls"
__contact__ = "valentin.valls@esrf.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "25/02/2019"
import logging
import contextlib
class PrePostEmitStreamHandler(logging.Handler):
"""Handler to allow to hook a function before and after the emit function.
The main logging feature is delegated to a sub handler.
"""
def __init__(self, handler):
self._handler = handler
def emit(self, record):
"""
Call pre_emit function then delegate the emit to the sub handler.
:type record: logging.LogRecord
"""
self.pre_emit()
self._handler.emit(record)
self.post_emit()
def __getattr__(self, attr):
"""Reach the attribute from the sub handler and cache it to the current
object"""
value = getattr(self._handler, attr)
setattr(self, attr, value)
return value
def pre_emit(self):
pass
def post_emit(self):
pass
def set_prepost_emit_callback(logger, pre_callback, post_callback):
"""Patch the logging system to have a working progress bar without glitch.
pyFAI define a default handler then we have to rework it
:return: The new handler
"""
# assume there is a logger
assert(len(logger.handlers) == 1)
previous_handler = logger.handlers[0]
logger.removeHandler(previous_handler)
# use our custom handler
handler = PrePostEmitStreamHandler(previous_handler)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
if pre_callback:
handler.pre_emit = pre_callback
if post_callback:
handler.post_emit = post_callback
return handler
@contextlib.contextmanager
def prepost_emit_callback(logger, pre_callback, post_callback):
"""Context manager to add pre/post emit callback to a logger"""
patched_handler = set_prepost_emit_callback(logger, pre_callback, post_callback)
yield
previous_handler = patched_handler._handler
logger.removeHandler(patched_handler)
# use the previous handler
logger.addHandler(previous_handler)
logger.setLevel(logging.INFO)
| 33.651376 | 84 | 0.724918 |
from __future__ import division, print_function
__author__ = "Valentin Valls"
__contact__ = "valentin.valls@esrf.eu"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "25/02/2019"
import logging
import contextlib
class PrePostEmitStreamHandler(logging.Handler):
def __init__(self, handler):
self._handler = handler
def emit(self, record):
self.pre_emit()
self._handler.emit(record)
self.post_emit()
def __getattr__(self, attr):
value = getattr(self._handler, attr)
setattr(self, attr, value)
return value
def pre_emit(self):
pass
def post_emit(self):
pass
def set_prepost_emit_callback(logger, pre_callback, post_callback):
assert(len(logger.handlers) == 1)
previous_handler = logger.handlers[0]
logger.removeHandler(previous_handler)
handler = PrePostEmitStreamHandler(previous_handler)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
if pre_callback:
handler.pre_emit = pre_callback
if post_callback:
handler.post_emit = post_callback
return handler
@contextlib.contextmanager
def prepost_emit_callback(logger, pre_callback, post_callback):
patched_handler = set_prepost_emit_callback(logger, pre_callback, post_callback)
yield
previous_handler = patched_handler._handler
logger.removeHandler(patched_handler)
logger.addHandler(previous_handler)
logger.setLevel(logging.INFO)
| true | true |
f735b8da17d0e01f03e8d45e3b111c70242b92d1 | 89 | py | Python | drivers/apps.py | loribonna/EsameLDPython | 02f671d0813e4e3cfed5a977018ab295b8675d60 | [
"MIT"
] | 1 | 2021-03-11T04:48:21.000Z | 2021-03-11T04:48:21.000Z | drivers/apps.py | loribonna/EsameLDPython | 02f671d0813e4e3cfed5a977018ab295b8675d60 | [
"MIT"
] | 18 | 2019-08-29T12:42:11.000Z | 2022-02-12T13:04:58.000Z | drivers/apps.py | loribonna/EsameLDPython | 02f671d0813e4e3cfed5a977018ab295b8675d60 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class DriversConfig(AppConfig):
name = 'drivers'
| 14.833333 | 33 | 0.752809 | from django.apps import AppConfig
class DriversConfig(AppConfig):
name = 'drivers'
| true | true |
f735b9971416c7293b52ba2809611609a9c22da8 | 133 | py | Python | app/task/__init__.py | Darasimi-Ajewole/PdfConv | c19d84c549a0ff7183e0338fbe6fcdf1ff2420a9 | [
"MIT"
] | 3 | 2019-09-01T18:55:50.000Z | 2022-01-12T22:38:44.000Z | app/task/__init__.py | Darasimi-Ajewole/PdfConv | c19d84c549a0ff7183e0338fbe6fcdf1ff2420a9 | [
"MIT"
] | 9 | 2020-02-12T00:21:55.000Z | 2021-09-03T22:52:24.000Z | app/task/__init__.py | Darasimi-Ajewole/PdfConv | c19d84c549a0ff7183e0338fbe6fcdf1ff2420a9 | [
"MIT"
] | null | null | null | from flask_restx import Namespace
task_api = Namespace(
'task',
description='Internal APIs for background task processing')
| 22.166667 | 63 | 0.759398 | from flask_restx import Namespace
task_api = Namespace(
'task',
description='Internal APIs for background task processing')
| true | true |
f735b9e82a29b0bb1f4c955b4280cff1a8e38ecd | 1,386 | py | Python | neutron/extensions/subnetallocation.py | BobzhouCH/neutron-nfv-acc | 05ab9d2079f2b652a4df771c1d6d9ce3b822aaa0 | [
"Apache-2.0"
] | 1 | 2016-03-25T21:13:13.000Z | 2016-03-25T21:13:13.000Z | neutron/extensions/subnetallocation.py | BobzhouCH/neutron-nfv-acc | 05ab9d2079f2b652a4df771c1d6d9ce3b822aaa0 | [
"Apache-2.0"
] | 1 | 2021-03-21T11:39:22.000Z | 2021-03-21T11:39:22.000Z | neutron/extensions/subnetallocation.py | BobzhouCH/neutron-nfv-acc | 05ab9d2079f2b652a4df771c1d6d9ce3b822aaa0 | [
"Apache-2.0"
] | 1 | 2021-03-21T11:37:18.000Z | 2021-03-21T11:37:18.000Z | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.common import constants
class Subnetallocation(extensions.ExtensionDescriptor):
"""Extension class supporting subnet allocation."""
@classmethod
def get_name(cls):
return "Subnet Allocation"
@classmethod
def get_alias(cls):
return constants.SUBNET_ALLOCATION_EXT_ALIAS
@classmethod
def get_description(cls):
return "Enables allocation of subnets from a subnet pool"
@classmethod
def get_updated(cls):
return "2015-03-30T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
return []
def get_extended_resources(self, version):
return {}
| 30.130435 | 78 | 0.704906 |
from neutron.api import extensions
from neutron.common import constants
class Subnetallocation(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Subnet Allocation"
@classmethod
def get_alias(cls):
return constants.SUBNET_ALLOCATION_EXT_ALIAS
@classmethod
def get_description(cls):
return "Enables allocation of subnets from a subnet pool"
@classmethod
def get_updated(cls):
return "2015-03-30T10:00:00-00:00"
@classmethod
def get_resources(cls):
return []
def get_extended_resources(self, version):
return {}
| true | true |
f735b9ec063ea54e221a7beeb79cbfe540054145 | 640 | py | Python | project/adapter/models.py | vicinityh2020/vicinity-vas-energy-monitoring | 1ffb51868abcc1b5b52ef27a056ba278cd0f7f65 | [
"MIT"
] | null | null | null | project/adapter/models.py | vicinityh2020/vicinity-vas-energy-monitoring | 1ffb51868abcc1b5b52ef27a056ba278cd0f7f65 | [
"MIT"
] | 3 | 2020-06-05T19:06:48.000Z | 2021-06-10T20:58:13.000Z | project/adapter/models.py | vicinityh2020/vicinity-vas-energy-monitoring | 1ffb51868abcc1b5b52ef27a056ba278cd0f7f65 | [
"MIT"
] | null | null | null | from django.db import models
class Appliance(models.Model):
name = models.CharField(max_length=100, null=False)
oid = models.UUIDField(unique=True)
pid = models.CharField(max_length=50)
appliance_type = models.CharField(max_length=30)
def __str__(self):
return self.name
class Sensor(models.Model):
oid = models.UUIDField(unique=True)
unit = models.CharField(max_length=50)
element = models.CharField(max_length=50)
pid = models.CharField(max_length=50)
monitors = models.CharField(max_length=1, choices=(("W", "water"), ("P", "power")))
def __str__(self):
return self.unit
| 27.826087 | 87 | 0.692188 | from django.db import models
class Appliance(models.Model):
name = models.CharField(max_length=100, null=False)
oid = models.UUIDField(unique=True)
pid = models.CharField(max_length=50)
appliance_type = models.CharField(max_length=30)
def __str__(self):
return self.name
class Sensor(models.Model):
oid = models.UUIDField(unique=True)
unit = models.CharField(max_length=50)
element = models.CharField(max_length=50)
pid = models.CharField(max_length=50)
monitors = models.CharField(max_length=1, choices=(("W", "water"), ("P", "power")))
def __str__(self):
return self.unit
| true | true |
f735bb1bf8e966d1381c9f54b56aa8880934fa90 | 735 | py | Python | tests/test_battery.py | bexxmodd/vizex | c7be2bc7fcc98c43457248fb13748c43fc49392d | [
"MIT"
] | 195 | 2020-09-05T17:33:18.000Z | 2022-03-20T11:42:22.000Z | tests/test_battery.py | bexxmodd/vizex | c7be2bc7fcc98c43457248fb13748c43fc49392d | [
"MIT"
] | 23 | 2020-09-14T12:10:14.000Z | 2022-03-25T07:49:08.000Z | tests/test_battery.py | bexxmodd/vizex | c7be2bc7fcc98c43457248fb13748c43fc49392d | [
"MIT"
] | 16 | 2020-09-15T18:03:51.000Z | 2022-02-02T17:02:25.000Z | # add path to the main package and test battery.py
if __name__ == '__main__':
from __access import ADD_PATH
ADD_PATH()
import unittest
import psutil
from battery import Battery
class TestBattery(unittest.TestCase):
""" Test battry module """
def test_Battery_constructor(self):
if not (has_battery := psutil.sensors_battery()):
with self.assertRaises(Exception):
Battery()
else:
self.assertTrue(has_battery.percent > 0)
def test_create_details_text(self):
if not psutil.sensors_battery():
pass
else:
self.assertTrue(isinstance(Batter().create_details_text(), str))
if __name__ == '__main__':
unittest.main() | 24.5 | 76 | 0.64898 |
if __name__ == '__main__':
from __access import ADD_PATH
ADD_PATH()
import unittest
import psutil
from battery import Battery
class TestBattery(unittest.TestCase):
def test_Battery_constructor(self):
if not (has_battery := psutil.sensors_battery()):
with self.assertRaises(Exception):
Battery()
else:
self.assertTrue(has_battery.percent > 0)
def test_create_details_text(self):
if not psutil.sensors_battery():
pass
else:
self.assertTrue(isinstance(Batter().create_details_text(), str))
if __name__ == '__main__':
unittest.main() | true | true |
f735bb779c324b1d4bece56bb0a8b5f13cd9cd30 | 2,311 | py | Python | epytope/Data/pssms/smmpmbec/mat/A_01_01_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 7 | 2021-02-01T18:11:28.000Z | 2022-01-31T19:14:07.000Z | epytope/Data/pssms/smmpmbec/mat/A_01_01_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 22 | 2021-01-02T15:25:23.000Z | 2022-03-14T11:32:53.000Z | epytope/Data/pssms/smmpmbec/mat/A_01_01_9.py | christopher-mohr/epytope | 8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd | [
"BSD-3-Clause"
] | 4 | 2021-05-28T08:50:38.000Z | 2022-03-14T11:45:32.000Z | A_01_01_9 = {0: {'A': 0.202, 'C': -0.151, 'E': 0.177, 'D': 0.207, 'G': 0.239, 'F': -0.334, 'I': 0.083, 'H': -0.029, 'K': 0.205, 'M': -0.211, 'L': -0.218, 'N': -0.065, 'Q': 0.071, 'P': 0.066, 'S': 0.162, 'R': 0.23, 'T': 0.071, 'W': -0.317, 'V': -0.036, 'Y': -0.353}, 1: {'A': -0.302, 'C': 0.137, 'E': 0.387, 'D': 0.126, 'G': 0.091, 'F': -0.015, 'I': -0.254, 'H': 0.389, 'K': 0.666, 'M': -0.249, 'L': -0.151, 'N': 0.26, 'Q': 0.153, 'P': 0.373, 'S': -0.927, 'R': 0.3, 'T': -0.995, 'W': 0.078, 'V': -0.269, 'Y': 0.203}, 2: {'A': -0.12, 'C': -0.166, 'E': -0.413, 'D': -0.981, 'G': 0.091, 'F': 0.172, 'I': 0.035, 'H': 0.053, 'K': 0.435, 'M': -0.048, 'L': 0.265, 'N': -0.158, 'Q': -0.011, 'P': 0.307, 'S': -0.222, 'R': 0.279, 'T': 0.047, 'W': 0.149, 'V': 0.237, 'Y': 0.048}, 3: {'A': -0.074, 'C': -0.035, 'E': -0.01, 'D': -0.088, 'G': 0.143, 'F': -0.026, 'I': 0.001, 'H': 0.052, 'K': 0.164, 'M': -0.165, 'L': -0.048, 'N': -0.038, 'Q': 0.045, 'P': 0.082, 'S': -0.111, 'R': 0.071, 'T': -0.064, 'W': -0.006, 'V': 0.018, 'Y': 0.087}, 4: {'A': 0.218, 'C': 0.028, 'E': 0.083, 'D': 0.176, 'G': 0.071, 'F': -0.147, 'I': -0.06, 'H': -0.049, 'K': 0.068, 'M': -0.079, 'L': -0.132, 'N': -0.024, 'Q': -0.022, 'P': 0.281, 'S': -0.021, 'R': 0.11, 'T': -0.023, 'W': -0.208, 'V': -0.034, 'Y': -0.235}, 5: {'A': 0.065, 'C': 0.035, 'E': 0.101, 'D': -0.012, 'G': 0.009, 'F': -0.083, 'I': 0.096, 'H': -0.059, 'K': 0.254, 'M': -0.049, 'L': -0.007, 'N': -0.125, 'Q': -0.048, 'P': 0.008, 'S': -0.021, 'R': 0.126, 'T': -0.164, 'W': -0.106, 'V': 0.002, 'Y': -0.021}, 6: {'A': 0.13, 'C': -0.156, 'E': -0.014, 'D': 0.046, 'G': 0.095, 'F': -0.134, 'I': -0.096, 'H': -0.124, 'K': 0.371, 'M': -0.157, 'L': -0.35, 'N': -0.073, 'Q': -0.042, 'P': 0.268, 'S': -0.018, 'R': 0.451, 'T': -0.026, 'W': -0.099, 'V': -0.01, 'Y': -0.062}, 7: {'A': -0.127, 'C': 0.016, 'E': 0.037, 'D': 0.083, 'G': 0.074, 'F': -0.226, 'I': -0.026, 'H': 0.07, 'K': 0.149, 'M': -0.136, 'L': -0.3, 'N': 0.223, 'Q': 0.248, 'P': 0.028, 'S': 0.07, 'R': -0.009, 'T': 0.068, 'W': 0.051, 'V': 0.008, 'Y': -0.301}, 8: {'A': -0.341, 'C': -0.012, 'E': 0.355, 'D': 0.478, 'G': 0.21, 'F': -0.504, 'I': -0.276, 'H': 0.158, 'K': 0.028, 'M': -0.247, 'L': -0.256, 'N': 0.28, 'Q': 0.652, 'P': 0.775, 'S': 0.245, 'R': 0.188, 'T': 0.123, 'W': -0.089, 'V': -0.316, 'Y': -1.451}, -1: {'con': 5.08242}} | 2,311 | 2,311 | 0.394202 | A_01_01_9 = {0: {'A': 0.202, 'C': -0.151, 'E': 0.177, 'D': 0.207, 'G': 0.239, 'F': -0.334, 'I': 0.083, 'H': -0.029, 'K': 0.205, 'M': -0.211, 'L': -0.218, 'N': -0.065, 'Q': 0.071, 'P': 0.066, 'S': 0.162, 'R': 0.23, 'T': 0.071, 'W': -0.317, 'V': -0.036, 'Y': -0.353}, 1: {'A': -0.302, 'C': 0.137, 'E': 0.387, 'D': 0.126, 'G': 0.091, 'F': -0.015, 'I': -0.254, 'H': 0.389, 'K': 0.666, 'M': -0.249, 'L': -0.151, 'N': 0.26, 'Q': 0.153, 'P': 0.373, 'S': -0.927, 'R': 0.3, 'T': -0.995, 'W': 0.078, 'V': -0.269, 'Y': 0.203}, 2: {'A': -0.12, 'C': -0.166, 'E': -0.413, 'D': -0.981, 'G': 0.091, 'F': 0.172, 'I': 0.035, 'H': 0.053, 'K': 0.435, 'M': -0.048, 'L': 0.265, 'N': -0.158, 'Q': -0.011, 'P': 0.307, 'S': -0.222, 'R': 0.279, 'T': 0.047, 'W': 0.149, 'V': 0.237, 'Y': 0.048}, 3: {'A': -0.074, 'C': -0.035, 'E': -0.01, 'D': -0.088, 'G': 0.143, 'F': -0.026, 'I': 0.001, 'H': 0.052, 'K': 0.164, 'M': -0.165, 'L': -0.048, 'N': -0.038, 'Q': 0.045, 'P': 0.082, 'S': -0.111, 'R': 0.071, 'T': -0.064, 'W': -0.006, 'V': 0.018, 'Y': 0.087}, 4: {'A': 0.218, 'C': 0.028, 'E': 0.083, 'D': 0.176, 'G': 0.071, 'F': -0.147, 'I': -0.06, 'H': -0.049, 'K': 0.068, 'M': -0.079, 'L': -0.132, 'N': -0.024, 'Q': -0.022, 'P': 0.281, 'S': -0.021, 'R': 0.11, 'T': -0.023, 'W': -0.208, 'V': -0.034, 'Y': -0.235}, 5: {'A': 0.065, 'C': 0.035, 'E': 0.101, 'D': -0.012, 'G': 0.009, 'F': -0.083, 'I': 0.096, 'H': -0.059, 'K': 0.254, 'M': -0.049, 'L': -0.007, 'N': -0.125, 'Q': -0.048, 'P': 0.008, 'S': -0.021, 'R': 0.126, 'T': -0.164, 'W': -0.106, 'V': 0.002, 'Y': -0.021}, 6: {'A': 0.13, 'C': -0.156, 'E': -0.014, 'D': 0.046, 'G': 0.095, 'F': -0.134, 'I': -0.096, 'H': -0.124, 'K': 0.371, 'M': -0.157, 'L': -0.35, 'N': -0.073, 'Q': -0.042, 'P': 0.268, 'S': -0.018, 'R': 0.451, 'T': -0.026, 'W': -0.099, 'V': -0.01, 'Y': -0.062}, 7: {'A': -0.127, 'C': 0.016, 'E': 0.037, 'D': 0.083, 'G': 0.074, 'F': -0.226, 'I': -0.026, 'H': 0.07, 'K': 0.149, 'M': -0.136, 'L': -0.3, 'N': 0.223, 'Q': 0.248, 'P': 0.028, 'S': 0.07, 'R': -0.009, 'T': 0.068, 'W': 0.051, 'V': 0.008, 'Y': -0.301}, 8: {'A': -0.341, 'C': -0.012, 'E': 0.355, 'D': 0.478, 'G': 0.21, 'F': -0.504, 'I': -0.276, 'H': 0.158, 'K': 0.028, 'M': -0.247, 'L': -0.256, 'N': 0.28, 'Q': 0.652, 'P': 0.775, 'S': 0.245, 'R': 0.188, 'T': 0.123, 'W': -0.089, 'V': -0.316, 'Y': -1.451}, -1: {'con': 5.08242}} | true | true |
f735bc19fe5b97a9a42d0e2c6be1f0fdfde91374 | 2,204 | py | Python | clue-nrf52/apds9960-testing/v2/clue-testing/config_regs_apds.py | fivesixzero/circuitpython-experiments | d1ddb79357aa7c8ed06c783403dafa59b3e4fe58 | [
"MIT"
] | 3 | 2021-11-12T01:37:54.000Z | 2021-12-19T23:43:44.000Z | clue-nrf52/apds9960-testing/v2/clue-testing/config_regs_apds.py | fivesixzero/circuitpython-experiments | d1ddb79357aa7c8ed06c783403dafa59b3e4fe58 | [
"MIT"
] | null | null | null | clue-nrf52/apds9960-testing/v2/clue-testing/config_regs_apds.py | fivesixzero/circuitpython-experiments | d1ddb79357aa7c8ed06c783403dafa59b3e4fe58 | [
"MIT"
] | null | null | null | from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_apds9960.apds9960 import APDS9960
try:
# Only used for typing
from typing import Dict
except ImportError:
pass
class ConfigRegsAPDS:
def __init__(self, *, apds: APDS9960=None, i2c_bus=None):
if not apds:
if not i2c_bus:
import board
i2c = board.I2C()
self.i2c_device = I2CDevice(i2c, 0x39)
else:
self.i2c_device = apds.i2c_device
config_regs = {
"_APDS9960_ENABLE": 0x80,
"_APDS9960_ATIME": 0x81,
"_APDS9960_WTIME": 0x83,
"_APDS9960_AILTIL": 0x84,
"_APDS9960_AILTH": 0x85,
"_APDS9960_AIHTL": 0x86,
"_APDS9960_AIHTH": 0x87,
"_APDS9960_PILT": 0x89,
"_APDS9960_PIHT": 0x8B,
"_APDS9960_PERS": 0x8C,
"_APDS9960_CONFIG1": 0x8D,
"_APDS9960_PPULSE": 0x8E,
"_APDS9960_CONTROL": 0x8F,
"_APDS9960_CONFIG2": 0x90,
"_APDS9960_STATUS": 0x93,
"_APDS9960_POFFSET_UR": 0x9D,
"_APDS9960_POFFSET_DL": 0x9E,
"_APDS9960_CONFIG3": 0x9F,
"_APDS9960_GPENTH": 0xA0,
"_APDS9960_GEXTH": 0xA1,
"_APDS9960_GCONF1": 0xA2,
"_APDS9960_GCONF2": 0xA3,
"_APDS9960_GOFFSET_U": 0xA4,
"_APDS9960_GOFFSET_D": 0xA5,
"_APDS9960_GOFFSET_L": 0xA7,
"_APDS9960_GOFFSET_R": 0xA9,
"_APDS9960_GPULSE": 0xA6,
"_APDS9960_GCONF3": 0xAA,
"_APDS9960_GCONF4": 0xAB,
"_APDS9960_GFLVL": 0xAE,
"_APDS9960_GSTATUS": 0xAF,
}
def sorted_reg_dict(self) -> Dict[str, int]:
return sorted(self.config_regs, key=self.config_regs.get)
def print_reg_states(self) -> None:
buf2 = bytearray(2)
for key in self.sorted_reg_dict():
reg_val = self._read8(buf2, self.config_regs[key])
print(" {0:22} 0x{1:02X} | 0x{2:02X} | b{2:08b} | {2:3d}".format(key, self.config_regs[key], reg_val))
def _read8(self, buf: bytearray, addr: int) -> int:
buf[0] = addr
with self.i2c_device as i2c:
i2c.write_then_readinto(buf, buf, out_end=1, in_end=1)
return buf[0] | 32.411765 | 114 | 0.602995 | from adafruit_bus_device.i2c_device import I2CDevice
from adafruit_apds9960.apds9960 import APDS9960
try:
from typing import Dict
except ImportError:
pass
class ConfigRegsAPDS:
def __init__(self, *, apds: APDS9960=None, i2c_bus=None):
if not apds:
if not i2c_bus:
import board
i2c = board.I2C()
self.i2c_device = I2CDevice(i2c, 0x39)
else:
self.i2c_device = apds.i2c_device
config_regs = {
"_APDS9960_ENABLE": 0x80,
"_APDS9960_ATIME": 0x81,
"_APDS9960_WTIME": 0x83,
"_APDS9960_AILTIL": 0x84,
"_APDS9960_AILTH": 0x85,
"_APDS9960_AIHTL": 0x86,
"_APDS9960_AIHTH": 0x87,
"_APDS9960_PILT": 0x89,
"_APDS9960_PIHT": 0x8B,
"_APDS9960_PERS": 0x8C,
"_APDS9960_CONFIG1": 0x8D,
"_APDS9960_PPULSE": 0x8E,
"_APDS9960_CONTROL": 0x8F,
"_APDS9960_CONFIG2": 0x90,
"_APDS9960_STATUS": 0x93,
"_APDS9960_POFFSET_UR": 0x9D,
"_APDS9960_POFFSET_DL": 0x9E,
"_APDS9960_CONFIG3": 0x9F,
"_APDS9960_GPENTH": 0xA0,
"_APDS9960_GEXTH": 0xA1,
"_APDS9960_GCONF1": 0xA2,
"_APDS9960_GCONF2": 0xA3,
"_APDS9960_GOFFSET_U": 0xA4,
"_APDS9960_GOFFSET_D": 0xA5,
"_APDS9960_GOFFSET_L": 0xA7,
"_APDS9960_GOFFSET_R": 0xA9,
"_APDS9960_GPULSE": 0xA6,
"_APDS9960_GCONF3": 0xAA,
"_APDS9960_GCONF4": 0xAB,
"_APDS9960_GFLVL": 0xAE,
"_APDS9960_GSTATUS": 0xAF,
}
def sorted_reg_dict(self) -> Dict[str, int]:
return sorted(self.config_regs, key=self.config_regs.get)
def print_reg_states(self) -> None:
buf2 = bytearray(2)
for key in self.sorted_reg_dict():
reg_val = self._read8(buf2, self.config_regs[key])
print(" {0:22} 0x{1:02X} | 0x{2:02X} | b{2:08b} | {2:3d}".format(key, self.config_regs[key], reg_val))
def _read8(self, buf: bytearray, addr: int) -> int:
buf[0] = addr
with self.i2c_device as i2c:
i2c.write_then_readinto(buf, buf, out_end=1, in_end=1)
return buf[0] | true | true |
f735bc49387622b0b3605fd0ea721fdc2eb10832 | 9,127 | py | Python | src/main.py | thuongton999/flappy-peter-teo | 0b4c91670a98bef5128400f98cbf1e06a10e0b02 | [
"MIT"
] | 4 | 2021-09-12T07:22:16.000Z | 2022-02-14T10:03:46.000Z | src/main.py | thuongton999/flappy-peter-teo | 0b4c91670a98bef5128400f98cbf1e06a10e0b02 | [
"MIT"
] | null | null | null | src/main.py | thuongton999/flappy-peter-teo | 0b4c91670a98bef5128400f98cbf1e06a10e0b02 | [
"MIT"
] | null | null | null | # Flappy Bird made by Thuongton999
# Ez-ist mode
from settings import *
from objects import *
def birdCollision(bird, column):
return (
bird.positionX < column.positionX + column.WIDTH and
bird.positionX + bird.WIDTH > column.positionX and
bird.positionY < column.positionY + column.HEIGHT and
bird.positionY + bird.HEIGHT > column.positionY
)
window = Window()
bird = Bird()
environment = Environment()
columns = Columns(interface=window)
score = Score(interface=window)
def gameQuit():
os.sys.exit("You dont want to play this game? Fvck you!")
pygame.quit()
def gameStartScreen():
startGame = False
startButton = Button(
position_x=window.WIDTH//2,
position_y=window.HEIGHT//2,
button_width=150, button_height=90
)
startButton.positionX -= startButton.WIDTH//2
startButton.positionY -= startButton.HEIGHT//2
while not startGame:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
window.backgroundPosX -= window.speed if window.backgroundPosX + window.WIDTH > 0 else -window.WIDTH
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
marginTop = 20
marginBottom = 10
titleRendered = bordered(
"Flappy Peter Teo",
gameDefaultSettings["DEFAULT_TITLE"],
gfcolor=colors["white"],
ocolor=colors["clotting"],
opx=5
)
header2Rendered = bordered(
"thuongton999 code this, ya :))",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["sun"],
ocolor=colors["white"],
opx=3
)
copyrightRendered = bordered(
"Copyright by thuongton999",
gameDefaultSettings["COPYRIGHT"],
gfcolor=colors["sun"],
ocolor=colors["white"],
opx=3
)
window.interface.blit(titleRendered, (window.WIDTH//2-titleRendered.get_width()//2, marginTop))
window.interface.blit(header2Rendered, (window.WIDTH//2-header2Rendered.get_width()//2, marginTop*2+titleRendered.get_height()))
window.interface.blit(
copyrightRendered,
(window.WIDTH//2-copyrightRendered.get_width()//2, window.HEIGHT-marginBottom-copyrightRendered.get_height())
)
window.interface.blit(startButton.buttonImage, (startButton.positionX, startButton.positionY))
mousePosX, mousePosY = pygame.mouse.get_pos()
mouseButtonPressed = pygame.mouse.get_pressed(3)
if startButton.onClick(mousePosX=mousePosX, mousePosY=mousePosY, clicked=mouseButtonPressed[0]):
startGame = True
break
pygame.display.update()
window.frame.tick(window.FPS)
while startGame:
bird.__init__()
columns.__init__(interface=window)
score.__init__(interface=window)
getReady()
gamePlay()
startGame = gameOver()
return startGame
def getReady():
ready = False
while not ready:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
window.backgroundPosX -= window.speed if window.backgroundPosX + window.WIDTH > 0 else -window.WIDTH
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
elif event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.KEYDOWN:
return
marginLeft = 30
getReadyTextRendered = bordered(
"Get ready? Tap or press any key",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["grey"],
ocolor=colors["white"],
opx=3
)
window.interface.blit(bird.birdRotatedImage, (bird.positionX, bird.positionY))
window.interface.blit(
getReadyTextRendered,
(
bird.positionX+bird.WIDTH+marginLeft,
bird.positionY+getReadyTextRendered.get_height()//2
)
)
pygame.display.update()
window.frame.tick(window.FPS)
def gamePlay():
while not bird.dead:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
window.backgroundPosX -= window.speed if window.backgroundPosX + window.WIDTH > 0 else -window.WIDTH
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:
environment.tapSound.play()
bird.positionY -= bird.speed if bird.positionY >= 0 else 0
bird.speed = bird.defaultSpeed
for topColumn, bottomColumn, passed in columns.columns:
topColumn.positionX -= window.speed
bottomColumn.positionX -= window.speed
window.interface.blit(topColumn.columnImage, (topColumn.positionX, -(topColumn.imageHeight - topColumn.HEIGHT)))
window.interface.blit(bottomColumn.columnImage, (bottomColumn.positionX, bottomColumn.positionY))
if birdCollision(bird, topColumn) or birdCollision(bird, bottomColumn):
bird.dead = True
break
if columns.columns[0][0].positionX + columns.columns[0][0].WIDTH < bird.positionX and not columns.columns[0][2]:
columns.columns[0][2] = True
score.points += 1
if columns.columns[0][0].positionX + columns.columns[0][0].WIDTH < 0:
columns.columns.pop(0)
columns.addNewColumn()
bird.positionY += bird.speed + 0.5*environment.gravity
bird.speed += environment.gravity
bird.birdRotatedImage = pygame.transform.rotate(bird.birdDefaultImage, -bird.speed*2)
bird.updateBirdSize()
window.interface.blit(bird.birdRotatedImage, (bird.positionX, bird.positionY))
score.render()
if not (0 <= bird.positionY <= window.HEIGHT - bird.HEIGHT):
bird.dead = True
pygame.display.update()
window.frame.tick(window.FPS)
def gameOver():
environment.gameOverSound.play()
scoreBoard = ScoreBoard(points=score.points, interface=window)
titleRendered = bordered(
"GAME OVER",
gameDefaultSettings["DEFAULT_TITLE"],
gfcolor=colors["white"],
ocolor=colors["clotting"],
opx=5
)
cakhiaRendered = bordered(
"You have been addicted xD",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["white"],
ocolor=colors["clotting"],
opx=3
)
notificationRendered = bordered(
"Press SPACE to play again or ESC to go back to Menu",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["lemon_chiffon"],
ocolor=colors["sun"],
opx=3
)
titleDropDownSpeed = 6
titlePositionX = window.WIDTH//2-titleRendered.get_width()//2
titlePositionY = -titleRendered.get_height()
titleHeight = titleRendered.get_height()
marginBottom = 10
marginTop = 20
notificationPositionX = window.WIDTH//2-notificationRendered.get_width()//2
notificationPositionY = scoreBoard.positionY+scoreBoard.HEIGHT+marginTop
cakhiaPositionX = window.WIDTH//2-cakhiaRendered.get_width()//2
cakhiaPositionY = scoreBoard.positionY-marginBottom-cakhiaRendered.get_height()
playAgain = False
while not playAgain:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
return True
elif event.key == pygame.K_ESCAPE:
return False
titlePositionY += titleDropDownSpeed if titlePositionY+titleHeight+marginBottom < cakhiaPositionY else 0
window.interface.blit(cakhiaRendered, (cakhiaPositionX, cakhiaPositionY))
window.interface.blit(notificationRendered, (notificationPositionX,notificationPositionY))
window.interface.blit(titleRendered, (titlePositionX, titlePositionY))
scoreBoard.renderScoreBoard()
pygame.display.update()
window.frame.tick(window.FPS)
return playAgain
if __name__ == "__main__":
os.system("cls")
home = True
while home:
gameStartScreen() | 39.855895 | 137 | 0.617728 |
from settings import *
from objects import *
def birdCollision(bird, column):
return (
bird.positionX < column.positionX + column.WIDTH and
bird.positionX + bird.WIDTH > column.positionX and
bird.positionY < column.positionY + column.HEIGHT and
bird.positionY + bird.HEIGHT > column.positionY
)
window = Window()
bird = Bird()
environment = Environment()
columns = Columns(interface=window)
score = Score(interface=window)
def gameQuit():
os.sys.exit("You dont want to play this game? Fvck you!")
pygame.quit()
def gameStartScreen():
startGame = False
startButton = Button(
position_x=window.WIDTH//2,
position_y=window.HEIGHT//2,
button_width=150, button_height=90
)
startButton.positionX -= startButton.WIDTH//2
startButton.positionY -= startButton.HEIGHT//2
while not startGame:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
window.backgroundPosX -= window.speed if window.backgroundPosX + window.WIDTH > 0 else -window.WIDTH
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
marginTop = 20
marginBottom = 10
titleRendered = bordered(
"Flappy Peter Teo",
gameDefaultSettings["DEFAULT_TITLE"],
gfcolor=colors["white"],
ocolor=colors["clotting"],
opx=5
)
header2Rendered = bordered(
"thuongton999 code this, ya :))",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["sun"],
ocolor=colors["white"],
opx=3
)
copyrightRendered = bordered(
"Copyright by thuongton999",
gameDefaultSettings["COPYRIGHT"],
gfcolor=colors["sun"],
ocolor=colors["white"],
opx=3
)
window.interface.blit(titleRendered, (window.WIDTH//2-titleRendered.get_width()//2, marginTop))
window.interface.blit(header2Rendered, (window.WIDTH//2-header2Rendered.get_width()//2, marginTop*2+titleRendered.get_height()))
window.interface.blit(
copyrightRendered,
(window.WIDTH//2-copyrightRendered.get_width()//2, window.HEIGHT-marginBottom-copyrightRendered.get_height())
)
window.interface.blit(startButton.buttonImage, (startButton.positionX, startButton.positionY))
mousePosX, mousePosY = pygame.mouse.get_pos()
mouseButtonPressed = pygame.mouse.get_pressed(3)
if startButton.onClick(mousePosX=mousePosX, mousePosY=mousePosY, clicked=mouseButtonPressed[0]):
startGame = True
break
pygame.display.update()
window.frame.tick(window.FPS)
while startGame:
bird.__init__()
columns.__init__(interface=window)
score.__init__(interface=window)
getReady()
gamePlay()
startGame = gameOver()
return startGame
def getReady():
ready = False
while not ready:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
window.backgroundPosX -= window.speed if window.backgroundPosX + window.WIDTH > 0 else -window.WIDTH
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
elif event.type == pygame.MOUSEBUTTONDOWN or event.type == pygame.KEYDOWN:
return
marginLeft = 30
getReadyTextRendered = bordered(
"Get ready? Tap or press any key",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["grey"],
ocolor=colors["white"],
opx=3
)
window.interface.blit(bird.birdRotatedImage, (bird.positionX, bird.positionY))
window.interface.blit(
getReadyTextRendered,
(
bird.positionX+bird.WIDTH+marginLeft,
bird.positionY+getReadyTextRendered.get_height()//2
)
)
pygame.display.update()
window.frame.tick(window.FPS)
def gamePlay():
while not bird.dead:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
window.backgroundPosX -= window.speed if window.backgroundPosX + window.WIDTH > 0 else -window.WIDTH
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:
environment.tapSound.play()
bird.positionY -= bird.speed if bird.positionY >= 0 else 0
bird.speed = bird.defaultSpeed
for topColumn, bottomColumn, passed in columns.columns:
topColumn.positionX -= window.speed
bottomColumn.positionX -= window.speed
window.interface.blit(topColumn.columnImage, (topColumn.positionX, -(topColumn.imageHeight - topColumn.HEIGHT)))
window.interface.blit(bottomColumn.columnImage, (bottomColumn.positionX, bottomColumn.positionY))
if birdCollision(bird, topColumn) or birdCollision(bird, bottomColumn):
bird.dead = True
break
if columns.columns[0][0].positionX + columns.columns[0][0].WIDTH < bird.positionX and not columns.columns[0][2]:
columns.columns[0][2] = True
score.points += 1
if columns.columns[0][0].positionX + columns.columns[0][0].WIDTH < 0:
columns.columns.pop(0)
columns.addNewColumn()
bird.positionY += bird.speed + 0.5*environment.gravity
bird.speed += environment.gravity
bird.birdRotatedImage = pygame.transform.rotate(bird.birdDefaultImage, -bird.speed*2)
bird.updateBirdSize()
window.interface.blit(bird.birdRotatedImage, (bird.positionX, bird.positionY))
score.render()
if not (0 <= bird.positionY <= window.HEIGHT - bird.HEIGHT):
bird.dead = True
pygame.display.update()
window.frame.tick(window.FPS)
def gameOver():
environment.gameOverSound.play()
scoreBoard = ScoreBoard(points=score.points, interface=window)
titleRendered = bordered(
"GAME OVER",
gameDefaultSettings["DEFAULT_TITLE"],
gfcolor=colors["white"],
ocolor=colors["clotting"],
opx=5
)
cakhiaRendered = bordered(
"You have been addicted xD",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["white"],
ocolor=colors["clotting"],
opx=3
)
notificationRendered = bordered(
"Press SPACE to play again or ESC to go back to Menu",
gameDefaultSettings["DEFAULT_TEXT"],
gfcolor=colors["lemon_chiffon"],
ocolor=colors["sun"],
opx=3
)
titleDropDownSpeed = 6
titlePositionX = window.WIDTH//2-titleRendered.get_width()//2
titlePositionY = -titleRendered.get_height()
titleHeight = titleRendered.get_height()
marginBottom = 10
marginTop = 20
notificationPositionX = window.WIDTH//2-notificationRendered.get_width()//2
notificationPositionY = scoreBoard.positionY+scoreBoard.HEIGHT+marginTop
cakhiaPositionX = window.WIDTH//2-cakhiaRendered.get_width()//2
cakhiaPositionY = scoreBoard.positionY-marginBottom-cakhiaRendered.get_height()
playAgain = False
while not playAgain:
window.interface.blit(window.background, (window.backgroundPosX, 0))
window.interface.blit(window.background, (window.backgroundPosX+window.WIDTH, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameQuit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
return True
elif event.key == pygame.K_ESCAPE:
return False
titlePositionY += titleDropDownSpeed if titlePositionY+titleHeight+marginBottom < cakhiaPositionY else 0
window.interface.blit(cakhiaRendered, (cakhiaPositionX, cakhiaPositionY))
window.interface.blit(notificationRendered, (notificationPositionX,notificationPositionY))
window.interface.blit(titleRendered, (titlePositionX, titlePositionY))
scoreBoard.renderScoreBoard()
pygame.display.update()
window.frame.tick(window.FPS)
return playAgain
if __name__ == "__main__":
os.system("cls")
home = True
while home:
gameStartScreen() | true | true |
f735bd31e6bc4abf4872889f80575cdf0932bcac | 2,043 | py | Python | src/AngleMeasurement/RP1DClustering.py | paigeco/VirtualGoniometer | 536e7e77fbb036ad8d777b42e751a0f3e80b8242 | [
"CC0-1.0"
] | 1 | 2021-02-22T02:53:26.000Z | 2021-02-22T02:53:26.000Z | src/AngleMeasurement/RP1DClustering.py | paigeco/VirtualGoniometer | 536e7e77fbb036ad8d777b42e751a0f3e80b8242 | [
"CC0-1.0"
] | 5 | 2021-03-26T15:15:34.000Z | 2021-06-11T20:16:00.000Z | src/AngleMeasurement/RP1DClustering.py | paigeco/VirtualGoniometer | 536e7e77fbb036ad8d777b42e751a0f3e80b8242 | [
"CC0-1.0"
] | null | null | null | import numpy as np
from .PCASmallestEig import pca_smallest_eig, pca_smallest_eig_powermethod
from .Withness import withness
from .CalculateAngle import get_angle
#RP1D clustering from
#Han, Sangchun, and Mireille Boutin. "The hidden structure of image datasets." 2015 IEEE International Conference on Image Processing (ICIP). IEEE, 2015.
############################################
def ClusteringMeanRP1D(P,N,T,A=0,UsePCA=True,UsePower=False):
n = N.shape[0]
d = N.shape[1]
v = np.random.rand(T,d)
#u = np.mean(N,axis=0)
if UsePower:
N1 = pca_smallest_eig_powermethod(N, center=False)
N1 = np.reshape(N1,(3,))
else:
N1 = pca_smallest_eig(N, center=False)
N2 = np.sum(N,axis=0)
v = np.cross(N1,N2)
v = v/np.linalg.norm(v)
m = np.mean(P,axis=0)
dist = np.sqrt(np.sum((P - m)**2,axis=1))
i = np.argmin(dist)
radius = np.max(dist)
D = (P - P[i,:])/radius
#The A=2 is just hand tuned. Larger A encourages the clustering to split the patch in half
#A=0 is the previous version of the virtual goniometer
x = np.sum(v*N,axis=1) + A*np.sum(v*D,axis=1)
#Clustering
_, m = withness(x)
C = np.zeros(n,)
C[x>m] = 1
C[x<=m] = 2
P1 = P[C==1,:]
P2 = P[C==2,:]
N1 = N[C==1,:]
N2 = N[C==2,:]
theta, n1, n2 = get_angle(P1,P2,N1,N2,UsePCA = UsePCA, UsePower = UsePower)
return C,n1,n2,theta
def ClusteringRandomRP1D(X,T):
n = X.shape[0]
d = X.shape[1]
v = np.random.rand(T,d)
u = np.mean(X,axis=0)
wmin = float("inf")
imin = 0
#w_list = []
#m_list = []
for i in range(T):
x = np.sum((v[i,:]-(np.dot(v[i,:],u)/np.dot(v[i,:],v[i,:]))*u)*X,axis=1)
w,m = withness(x)
if w < wmin:
wmin = w
imin = i
x = np.sum((v[imin,:]-(np.dot(v[imin,:],u)/np.dot(v[imin,:],v[imin,:]))*u)*X,axis=1)
_,m = withness(x)
C = np.zeros(n,)
C[x>m] = 1
C[x<=m] = 2
return C | 25.860759 | 153 | 0.541361 | import numpy as np
from .PCASmallestEig import pca_smallest_eig, pca_smallest_eig_powermethod
from .Withness import withness
from .CalculateAngle import get_angle
d = X.shape[1]
v = np.random.rand(T,d)
u = np.mean(X,axis=0)
wmin = float("inf")
imin = 0
for i in range(T):
x = np.sum((v[i,:]-(np.dot(v[i,:],u)/np.dot(v[i,:],v[i,:]))*u)*X,axis=1)
w,m = withness(x)
if w < wmin:
wmin = w
imin = i
x = np.sum((v[imin,:]-(np.dot(v[imin,:],u)/np.dot(v[imin,:],v[imin,:]))*u)*X,axis=1)
_,m = withness(x)
C = np.zeros(n,)
C[x>m] = 1
C[x<=m] = 2
return C | true | true |
f735bd7fdd31df3cdede5e7ae4babf9182954552 | 17,836 | py | Python | mriqc/interfaces/anatomical.py | nipreps/mriqc | e021008da0a2ef1c48e882baf932139a673349f9 | [
"Apache-2.0"
] | 15 | 2021-09-25T17:37:17.000Z | 2022-03-31T05:29:38.000Z | mriqc/interfaces/anatomical.py | oesteban/mriqc | e021008da0a2ef1c48e882baf932139a673349f9 | [
"Apache-2.0"
] | 87 | 2021-09-24T09:48:41.000Z | 2022-03-28T11:00:28.000Z | mriqc/interfaces/anatomical.py | nipreps/mriqc | e021008da0a2ef1c48e882baf932139a673349f9 | [
"Apache-2.0"
] | 9 | 2021-09-29T17:03:57.000Z | 2022-03-28T11:57:25.000Z | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Nipype interfaces to support anatomical workflow."""
import os.path as op
from builtins import zip
import nibabel as nb
import numpy as np
import scipy.ndimage as nd
from mriqc.qc.anatomical import (
art_qi1,
art_qi2,
cjv,
cnr,
efc,
fber,
rpve,
snr,
snr_dietrich,
summary_stats,
volume_fraction,
wm2max,
)
from mriqc.utils.misc import _flatten_dict
from nipype.interfaces.base import (
BaseInterfaceInputSpec,
File,
InputMultiPath,
SimpleInterface,
TraitedSpec,
isdefined,
traits,
)
from nipype.utils.filemanip import fname_presuffix
class StructuralQCInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="file to be plotted")
in_noinu = File(exists=True, mandatory=True, desc="image after INU correction")
in_segm = File(exists=True, mandatory=True, desc="segmentation file from FSL FAST")
in_bias = File(exists=True, mandatory=True, desc="bias file")
head_msk = File(exists=True, mandatory=True, desc="head mask")
air_msk = File(exists=True, mandatory=True, desc="air mask")
rot_msk = File(exists=True, mandatory=True, desc="rotation mask")
artifact_msk = File(exists=True, mandatory=True, desc="air mask")
in_pvms = InputMultiPath(
File(exists=True),
mandatory=True,
desc="partial volume maps from FSL FAST",
)
in_tpms = InputMultiPath(File(), desc="tissue probability maps from FSL FAST")
mni_tpms = InputMultiPath(File(), desc="tissue probability maps from FSL FAST")
in_fwhm = traits.List(
traits.Float, mandatory=True, desc="smoothness estimated with AFNI"
)
human = traits.Bool(True, usedefault=True, desc="human workflow")
class StructuralQCOutputSpec(TraitedSpec):
summary = traits.Dict(desc="summary statistics per tissue")
icvs = traits.Dict(desc="intracranial volume (ICV) fractions")
rpve = traits.Dict(desc="partial volume fractions")
size = traits.Dict(desc="image sizes")
spacing = traits.Dict(desc="image sizes")
fwhm = traits.Dict(desc="full width half-maximum measure")
inu = traits.Dict(desc="summary statistics of the bias field")
snr = traits.Dict
snrd = traits.Dict
cnr = traits.Float
fber = traits.Float
efc = traits.Float
qi_1 = traits.Float
wm2max = traits.Float
cjv = traits.Float
out_qc = traits.Dict(desc="output flattened dictionary with all measures")
out_noisefit = File(exists=True, desc="plot of background noise and chi fitting")
tpm_overlap = traits.Dict
class StructuralQC(SimpleInterface):
"""
Computes anatomical :abbr:`QC (Quality Control)` measures on the
structural image given as input
"""
input_spec = StructuralQCInputSpec
output_spec = StructuralQCOutputSpec
def _run_interface(self, runtime): # pylint: disable=R0914,E1101
imnii = nb.load(self.inputs.in_noinu)
erode = (
np.all(np.array(imnii.header.get_zooms()[:3], dtype=np.float32) < 1.9)
if self.inputs.human
else False
)
# Load image corrected for INU
inudata = np.nan_to_num(imnii.get_fdata())
inudata[inudata < 0] = 0
if np.all(inudata < 1e-5):
raise RuntimeError(
"Input inhomogeneity-corrected data seem empty. "
"MRIQC failed to process this dataset."
)
# Load binary segmentation from FSL FAST
segnii = nb.load(self.inputs.in_segm)
segdata = np.asanyarray(segnii.dataobj).astype(np.uint8)
if np.sum(segdata > 0) < 1e3:
raise RuntimeError(
"Input segmentation data is likely corrupt. "
"MRIQC failed to process this dataset."
)
# Load air, artifacts and head masks
airdata = np.asanyarray(nb.load(self.inputs.air_msk).dataobj).astype(np.uint8)
artdata = np.asanyarray(nb.load(self.inputs.artifact_msk).dataobj).astype(
np.uint8
)
headdata = np.asanyarray(nb.load(self.inputs.head_msk).dataobj).astype(np.uint8)
if np.sum(headdata > 0) < 100:
raise RuntimeError(
"Detected less than 100 voxels belonging to the head mask. "
"MRIQC failed to process this dataset."
)
rotdata = np.asanyarray(nb.load(self.inputs.rot_msk).dataobj).astype(np.uint8)
# Load Partial Volume Maps (pvms) from FSL FAST
pvmdata = []
for fname in self.inputs.in_pvms:
pvmdata.append(nb.load(fname).get_fdata(dtype="float32"))
if np.sum(pvmdata[-1] > 1e-4) < 10:
raise RuntimeError(
"Detected less than 10 voxels belonging to one tissue prob. map. "
"MRIQC failed to process this dataset."
)
# Summary stats
stats = summary_stats(inudata, pvmdata, airdata, erode=erode)
self._results["summary"] = stats
# SNR
snrvals = []
self._results["snr"] = {}
for tlabel in ["csf", "wm", "gm"]:
snrvals.append(
snr(
stats[tlabel]["median"],
stats[tlabel]["stdv"],
stats[tlabel]["n"],
)
)
self._results["snr"][tlabel] = snrvals[-1]
self._results["snr"]["total"] = float(np.mean(snrvals))
snrvals = []
self._results["snrd"] = {
tlabel: snr_dietrich(
stats[tlabel]["median"],
mad_air=stats["bg"]["mad"],
sigma_air=stats["bg"]["stdv"],
)
for tlabel in ["csf", "wm", "gm"]
}
self._results["snrd"]["total"] = float(
np.mean([val for _, val in list(self._results["snrd"].items())])
)
# CNR
self._results["cnr"] = cnr(
stats["wm"]["median"],
stats["gm"]["median"],
stats["bg"]["stdv"],
stats["wm"]["stdv"],
stats["gm"]["stdv"],
)
# FBER
self._results["fber"] = fber(inudata, headdata, rotdata)
# EFC
self._results["efc"] = efc(inudata, rotdata)
# M2WM
self._results["wm2max"] = wm2max(inudata, stats["wm"]["median"])
# Artifacts
self._results["qi_1"] = art_qi1(airdata, artdata)
# CJV
self._results["cjv"] = cjv(
# mu_wm, mu_gm, sigma_wm, sigma_gm
stats["wm"]["median"],
stats["gm"]["median"],
stats["wm"]["mad"],
stats["gm"]["mad"],
)
# FWHM
fwhm = np.array(self.inputs.in_fwhm[:3]) / np.array(
imnii.header.get_zooms()[:3]
)
self._results["fwhm"] = {
"x": float(fwhm[0]),
"y": float(fwhm[1]),
"z": float(fwhm[2]),
"avg": float(np.average(fwhm)),
}
# ICVs
self._results["icvs"] = volume_fraction(pvmdata)
# RPVE
self._results["rpve"] = rpve(pvmdata, segdata)
# Image specs
self._results["size"] = {
"x": int(inudata.shape[0]),
"y": int(inudata.shape[1]),
"z": int(inudata.shape[2]),
}
self._results["spacing"] = {
i: float(v) for i, v in zip(["x", "y", "z"], imnii.header.get_zooms()[:3])
}
try:
self._results["size"]["t"] = int(inudata.shape[3])
except IndexError:
pass
try:
self._results["spacing"]["tr"] = float(imnii.header.get_zooms()[3])
except IndexError:
pass
# Bias
bias = nb.load(self.inputs.in_bias).get_fdata()[segdata > 0]
self._results["inu"] = {
"range": float(
np.abs(np.percentile(bias, 95.0) - np.percentile(bias, 5.0))
),
"med": float(np.median(bias)),
} # pylint: disable=E1101
mni_tpms = [nb.load(tpm).get_fdata() for tpm in self.inputs.mni_tpms]
in_tpms = [nb.load(tpm).get_fdata() for tpm in self.inputs.in_pvms]
overlap = fuzzy_jaccard(in_tpms, mni_tpms)
self._results["tpm_overlap"] = {
"csf": overlap[0],
"gm": overlap[1],
"wm": overlap[2],
}
# Flatten the dictionary
self._results["out_qc"] = _flatten_dict(self._results)
return runtime
class ArtifactMaskInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="File to be plotted")
head_mask = File(exists=True, mandatory=True, desc="head mask")
rot_mask = File(exists=True, desc="a rotation mask")
nasion_post_mask = File(
exists=True,
mandatory=True,
desc="nasion to posterior of cerebellum mask",
)
class ArtifactMaskOutputSpec(TraitedSpec):
out_hat_msk = File(exists=True, desc='output "hat" mask')
out_art_msk = File(exists=True, desc="output artifacts mask")
out_air_msk = File(exists=True, desc='output "hat" mask, without artifacts')
class ArtifactMask(SimpleInterface):
"""
Computes the artifact mask using the method described in [Mortamet2009]_.
"""
input_spec = ArtifactMaskInputSpec
output_spec = ArtifactMaskOutputSpec
def _run_interface(self, runtime):
imnii = nb.load(self.inputs.in_file)
imdata = np.nan_to_num(imnii.get_fdata().astype(np.float32))
# Remove negative values
imdata[imdata < 0] = 0
hmdata = np.asanyarray(nb.load(self.inputs.head_mask).dataobj)
npdata = np.asanyarray(nb.load(self.inputs.nasion_post_mask).dataobj)
# Invert head mask
airdata = np.ones_like(hmdata, dtype=np.uint8)
airdata[hmdata == 1] = 0
# Calculate distance to border
dist = nd.morphology.distance_transform_edt(airdata)
# Apply nasion-to-posterior mask
airdata[npdata == 1] = 0
dist[npdata == 1] = 0
dist /= dist.max()
# Apply rotation mask (if supplied)
if isdefined(self.inputs.rot_mask):
rotmskdata = np.asanyarray(nb.load(self.inputs.rot_mask).dataobj)
airdata[rotmskdata == 1] = 0
# Run the artifact detection
qi1_img = artifact_mask(imdata, airdata, dist)
fname, ext = op.splitext(op.basename(self.inputs.in_file))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
self._results["out_hat_msk"] = op.abspath("{}_hat{}".format(fname, ext))
self._results["out_art_msk"] = op.abspath("{}_art{}".format(fname, ext))
self._results["out_air_msk"] = op.abspath("{}_air{}".format(fname, ext))
hdr = imnii.header.copy()
hdr.set_data_dtype(np.uint8)
nb.Nifti1Image(qi1_img, imnii.affine, hdr).to_filename(
self._results["out_art_msk"]
)
nb.Nifti1Image(airdata, imnii.affine, hdr).to_filename(
self._results["out_hat_msk"]
)
airdata[qi1_img > 0] = 0
nb.Nifti1Image(airdata, imnii.affine, hdr).to_filename(
self._results["out_air_msk"]
)
return runtime
class ComputeQI2InputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="File to be plotted")
air_msk = File(exists=True, mandatory=True, desc="air (without artifacts) mask")
class ComputeQI2OutputSpec(TraitedSpec):
qi2 = traits.Float(desc="computed QI2 value")
out_file = File(desc="output plot: noise fit")
class ComputeQI2(SimpleInterface):
"""
Computes the artifact mask using the method described in [Mortamet2009]_.
"""
input_spec = ComputeQI2InputSpec
output_spec = ComputeQI2OutputSpec
def _run_interface(self, runtime):
imdata = nb.load(self.inputs.in_file).get_fdata()
airdata = nb.load(self.inputs.air_msk).get_fdata()
qi2, out_file = art_qi2(imdata, airdata)
self._results["qi2"] = qi2
self._results["out_file"] = out_file
return runtime
class HarmonizeInputSpec(BaseInterfaceInputSpec):
in_file = File(
exists=True, mandatory=True, desc="input data (after bias correction)"
)
wm_mask = File(exists=True, mandatory=True, desc="white-matter mask")
erodemsk = traits.Bool(True, usedefault=True, desc="erode mask")
thresh = traits.Float(0.9, usedefault=True, desc="WM probability threshold")
class HarmonizeOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="input data (after intensity harmonization)")
class Harmonize(SimpleInterface):
"""
Computes the artifact mask using the method described in [Mortamet2009]_.
"""
input_spec = HarmonizeInputSpec
output_spec = HarmonizeOutputSpec
def _run_interface(self, runtime):
in_file = nb.load(self.inputs.in_file)
wm_mask = nb.load(self.inputs.wm_mask).get_fdata()
wm_mask[wm_mask < 0.9] = 0
wm_mask[wm_mask > 0] = 1
wm_mask = wm_mask.astype(np.uint8)
if self.inputs.erodemsk:
# Create a structural element to be used in an opening operation.
struc = nd.generate_binary_structure(3, 2)
# Perform an opening operation on the background data.
wm_mask = nd.binary_erosion(wm_mask, structure=struc).astype(np.uint8)
data = in_file.get_fdata()
data *= 1000.0 / np.median(data[wm_mask > 0])
out_file = fname_presuffix(
self.inputs.in_file, suffix="_harmonized", newpath="."
)
in_file.__class__(data, in_file.affine, in_file.header).to_filename(out_file)
self._results["out_file"] = out_file
return runtime
class RotationMaskInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="input data")
class RotationMaskOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="rotation mask (if any)")
class RotationMask(SimpleInterface):
"""
Computes the artifact mask using the method described in [Mortamet2009]_.
"""
input_spec = RotationMaskInputSpec
output_spec = RotationMaskOutputSpec
def _run_interface(self, runtime):
in_file = nb.load(self.inputs.in_file)
data = in_file.get_fdata()
mask = data <= 0
# Pad one pixel to control behavior on borders of binary_opening
mask = np.pad(mask, pad_width=(1,), mode="constant", constant_values=1)
# Remove noise
struc = nd.generate_binary_structure(3, 2)
mask = nd.binary_opening(mask, structure=struc).astype(np.uint8)
# Remove small objects
label_im, nb_labels = nd.label(mask)
if nb_labels > 2:
sizes = nd.sum(mask, label_im, list(range(nb_labels + 1)))
ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))
for _, label in ordered[2:]:
mask[label_im == label] = 0
# Un-pad
mask = mask[1:-1, 1:-1, 1:-1]
# If mask is small, clean-up
if mask.sum() < 500:
mask = np.zeros_like(mask, dtype=np.uint8)
out_img = in_file.__class__(mask, in_file.affine, in_file.header)
out_img.header.set_data_dtype(np.uint8)
out_file = fname_presuffix(self.inputs.in_file, suffix="_rotmask", newpath=".")
out_img.to_filename(out_file)
self._results["out_file"] = out_file
return runtime
def artifact_mask(imdata, airdata, distance, zscore=10.0):
"""Computes a mask of artifacts found in the air region"""
from statsmodels.robust.scale import mad
if not np.issubdtype(airdata.dtype, np.integer):
airdata[airdata < 0.95] = 0
airdata[airdata > 0.0] = 1
bg_img = imdata * airdata
if np.sum((bg_img > 0).astype(np.uint8)) < 100:
return np.zeros_like(airdata)
# Find the background threshold (the most frequently occurring value
# excluding 0)
bg_location = np.median(bg_img[bg_img > 0])
bg_spread = mad(bg_img[bg_img > 0])
bg_img[bg_img > 0] -= bg_location
bg_img[bg_img > 0] /= bg_spread
# Apply this threshold to the background voxels to identify voxels
# contributing artifacts.
qi1_img = np.zeros_like(bg_img)
qi1_img[bg_img > zscore] = 1
qi1_img[distance < 0.10] = 0
# Create a structural element to be used in an opening operation.
struc = nd.generate_binary_structure(3, 1)
qi1_img = nd.binary_opening(qi1_img, struc).astype(np.uint8)
qi1_img[airdata <= 0] = 0
return qi1_img
def fuzzy_jaccard(in_tpms, in_mni_tpms):
overlaps = []
for tpm, mni_tpm in zip(in_tpms, in_mni_tpms):
tpm = tpm.reshape(-1)
mni_tpm = mni_tpm.reshape(-1)
num = np.min([tpm, mni_tpm], axis=0).sum()
den = np.max([tpm, mni_tpm], axis=0).sum()
overlaps.append(float(num / den))
return overlaps
| 33.338318 | 88 | 0.617403 |
import os.path as op
from builtins import zip
import nibabel as nb
import numpy as np
import scipy.ndimage as nd
from mriqc.qc.anatomical import (
art_qi1,
art_qi2,
cjv,
cnr,
efc,
fber,
rpve,
snr,
snr_dietrich,
summary_stats,
volume_fraction,
wm2max,
)
from mriqc.utils.misc import _flatten_dict
from nipype.interfaces.base import (
BaseInterfaceInputSpec,
File,
InputMultiPath,
SimpleInterface,
TraitedSpec,
isdefined,
traits,
)
from nipype.utils.filemanip import fname_presuffix
class StructuralQCInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="file to be plotted")
in_noinu = File(exists=True, mandatory=True, desc="image after INU correction")
in_segm = File(exists=True, mandatory=True, desc="segmentation file from FSL FAST")
in_bias = File(exists=True, mandatory=True, desc="bias file")
head_msk = File(exists=True, mandatory=True, desc="head mask")
air_msk = File(exists=True, mandatory=True, desc="air mask")
rot_msk = File(exists=True, mandatory=True, desc="rotation mask")
artifact_msk = File(exists=True, mandatory=True, desc="air mask")
in_pvms = InputMultiPath(
File(exists=True),
mandatory=True,
desc="partial volume maps from FSL FAST",
)
in_tpms = InputMultiPath(File(), desc="tissue probability maps from FSL FAST")
mni_tpms = InputMultiPath(File(), desc="tissue probability maps from FSL FAST")
in_fwhm = traits.List(
traits.Float, mandatory=True, desc="smoothness estimated with AFNI"
)
human = traits.Bool(True, usedefault=True, desc="human workflow")
class StructuralQCOutputSpec(TraitedSpec):
summary = traits.Dict(desc="summary statistics per tissue")
icvs = traits.Dict(desc="intracranial volume (ICV) fractions")
rpve = traits.Dict(desc="partial volume fractions")
size = traits.Dict(desc="image sizes")
spacing = traits.Dict(desc="image sizes")
fwhm = traits.Dict(desc="full width half-maximum measure")
inu = traits.Dict(desc="summary statistics of the bias field")
snr = traits.Dict
snrd = traits.Dict
cnr = traits.Float
fber = traits.Float
efc = traits.Float
qi_1 = traits.Float
wm2max = traits.Float
cjv = traits.Float
out_qc = traits.Dict(desc="output flattened dictionary with all measures")
out_noisefit = File(exists=True, desc="plot of background noise and chi fitting")
tpm_overlap = traits.Dict
class StructuralQC(SimpleInterface):
input_spec = StructuralQCInputSpec
output_spec = StructuralQCOutputSpec
def _run_interface(self, runtime):
imnii = nb.load(self.inputs.in_noinu)
erode = (
np.all(np.array(imnii.header.get_zooms()[:3], dtype=np.float32) < 1.9)
if self.inputs.human
else False
)
inudata = np.nan_to_num(imnii.get_fdata())
inudata[inudata < 0] = 0
if np.all(inudata < 1e-5):
raise RuntimeError(
"Input inhomogeneity-corrected data seem empty. "
"MRIQC failed to process this dataset."
)
segnii = nb.load(self.inputs.in_segm)
segdata = np.asanyarray(segnii.dataobj).astype(np.uint8)
if np.sum(segdata > 0) < 1e3:
raise RuntimeError(
"Input segmentation data is likely corrupt. "
"MRIQC failed to process this dataset."
)
airdata = np.asanyarray(nb.load(self.inputs.air_msk).dataobj).astype(np.uint8)
artdata = np.asanyarray(nb.load(self.inputs.artifact_msk).dataobj).astype(
np.uint8
)
headdata = np.asanyarray(nb.load(self.inputs.head_msk).dataobj).astype(np.uint8)
if np.sum(headdata > 0) < 100:
raise RuntimeError(
"Detected less than 100 voxels belonging to the head mask. "
"MRIQC failed to process this dataset."
)
rotdata = np.asanyarray(nb.load(self.inputs.rot_msk).dataobj).astype(np.uint8)
pvmdata = []
for fname in self.inputs.in_pvms:
pvmdata.append(nb.load(fname).get_fdata(dtype="float32"))
if np.sum(pvmdata[-1] > 1e-4) < 10:
raise RuntimeError(
"Detected less than 10 voxels belonging to one tissue prob. map. "
"MRIQC failed to process this dataset."
)
stats = summary_stats(inudata, pvmdata, airdata, erode=erode)
self._results["summary"] = stats
snrvals = []
self._results["snr"] = {}
for tlabel in ["csf", "wm", "gm"]:
snrvals.append(
snr(
stats[tlabel]["median"],
stats[tlabel]["stdv"],
stats[tlabel]["n"],
)
)
self._results["snr"][tlabel] = snrvals[-1]
self._results["snr"]["total"] = float(np.mean(snrvals))
snrvals = []
self._results["snrd"] = {
tlabel: snr_dietrich(
stats[tlabel]["median"],
mad_air=stats["bg"]["mad"],
sigma_air=stats["bg"]["stdv"],
)
for tlabel in ["csf", "wm", "gm"]
}
self._results["snrd"]["total"] = float(
np.mean([val for _, val in list(self._results["snrd"].items())])
)
self._results["cnr"] = cnr(
stats["wm"]["median"],
stats["gm"]["median"],
stats["bg"]["stdv"],
stats["wm"]["stdv"],
stats["gm"]["stdv"],
)
self._results["fber"] = fber(inudata, headdata, rotdata)
self._results["efc"] = efc(inudata, rotdata)
self._results["wm2max"] = wm2max(inudata, stats["wm"]["median"])
self._results["qi_1"] = art_qi1(airdata, artdata)
self._results["cjv"] = cjv(
stats["wm"]["median"],
stats["gm"]["median"],
stats["wm"]["mad"],
stats["gm"]["mad"],
)
fwhm = np.array(self.inputs.in_fwhm[:3]) / np.array(
imnii.header.get_zooms()[:3]
)
self._results["fwhm"] = {
"x": float(fwhm[0]),
"y": float(fwhm[1]),
"z": float(fwhm[2]),
"avg": float(np.average(fwhm)),
}
self._results["icvs"] = volume_fraction(pvmdata)
self._results["rpve"] = rpve(pvmdata, segdata)
self._results["size"] = {
"x": int(inudata.shape[0]),
"y": int(inudata.shape[1]),
"z": int(inudata.shape[2]),
}
self._results["spacing"] = {
i: float(v) for i, v in zip(["x", "y", "z"], imnii.header.get_zooms()[:3])
}
try:
self._results["size"]["t"] = int(inudata.shape[3])
except IndexError:
pass
try:
self._results["spacing"]["tr"] = float(imnii.header.get_zooms()[3])
except IndexError:
pass
bias = nb.load(self.inputs.in_bias).get_fdata()[segdata > 0]
self._results["inu"] = {
"range": float(
np.abs(np.percentile(bias, 95.0) - np.percentile(bias, 5.0))
),
"med": float(np.median(bias)),
}
mni_tpms = [nb.load(tpm).get_fdata() for tpm in self.inputs.mni_tpms]
in_tpms = [nb.load(tpm).get_fdata() for tpm in self.inputs.in_pvms]
overlap = fuzzy_jaccard(in_tpms, mni_tpms)
self._results["tpm_overlap"] = {
"csf": overlap[0],
"gm": overlap[1],
"wm": overlap[2],
}
self._results["out_qc"] = _flatten_dict(self._results)
return runtime
class ArtifactMaskInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="File to be plotted")
head_mask = File(exists=True, mandatory=True, desc="head mask")
rot_mask = File(exists=True, desc="a rotation mask")
nasion_post_mask = File(
exists=True,
mandatory=True,
desc="nasion to posterior of cerebellum mask",
)
class ArtifactMaskOutputSpec(TraitedSpec):
out_hat_msk = File(exists=True, desc='output "hat" mask')
out_art_msk = File(exists=True, desc="output artifacts mask")
out_air_msk = File(exists=True, desc='output "hat" mask, without artifacts')
class ArtifactMask(SimpleInterface):
input_spec = ArtifactMaskInputSpec
output_spec = ArtifactMaskOutputSpec
def _run_interface(self, runtime):
imnii = nb.load(self.inputs.in_file)
imdata = np.nan_to_num(imnii.get_fdata().astype(np.float32))
imdata[imdata < 0] = 0
hmdata = np.asanyarray(nb.load(self.inputs.head_mask).dataobj)
npdata = np.asanyarray(nb.load(self.inputs.nasion_post_mask).dataobj)
airdata = np.ones_like(hmdata, dtype=np.uint8)
airdata[hmdata == 1] = 0
dist = nd.morphology.distance_transform_edt(airdata)
airdata[npdata == 1] = 0
dist[npdata == 1] = 0
dist /= dist.max()
if isdefined(self.inputs.rot_mask):
rotmskdata = np.asanyarray(nb.load(self.inputs.rot_mask).dataobj)
airdata[rotmskdata == 1] = 0
qi1_img = artifact_mask(imdata, airdata, dist)
fname, ext = op.splitext(op.basename(self.inputs.in_file))
if ext == ".gz":
fname, ext2 = op.splitext(fname)
ext = ext2 + ext
self._results["out_hat_msk"] = op.abspath("{}_hat{}".format(fname, ext))
self._results["out_art_msk"] = op.abspath("{}_art{}".format(fname, ext))
self._results["out_air_msk"] = op.abspath("{}_air{}".format(fname, ext))
hdr = imnii.header.copy()
hdr.set_data_dtype(np.uint8)
nb.Nifti1Image(qi1_img, imnii.affine, hdr).to_filename(
self._results["out_art_msk"]
)
nb.Nifti1Image(airdata, imnii.affine, hdr).to_filename(
self._results["out_hat_msk"]
)
airdata[qi1_img > 0] = 0
nb.Nifti1Image(airdata, imnii.affine, hdr).to_filename(
self._results["out_air_msk"]
)
return runtime
class ComputeQI2InputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="File to be plotted")
air_msk = File(exists=True, mandatory=True, desc="air (without artifacts) mask")
class ComputeQI2OutputSpec(TraitedSpec):
qi2 = traits.Float(desc="computed QI2 value")
out_file = File(desc="output plot: noise fit")
class ComputeQI2(SimpleInterface):
input_spec = ComputeQI2InputSpec
output_spec = ComputeQI2OutputSpec
def _run_interface(self, runtime):
imdata = nb.load(self.inputs.in_file).get_fdata()
airdata = nb.load(self.inputs.air_msk).get_fdata()
qi2, out_file = art_qi2(imdata, airdata)
self._results["qi2"] = qi2
self._results["out_file"] = out_file
return runtime
class HarmonizeInputSpec(BaseInterfaceInputSpec):
in_file = File(
exists=True, mandatory=True, desc="input data (after bias correction)"
)
wm_mask = File(exists=True, mandatory=True, desc="white-matter mask")
erodemsk = traits.Bool(True, usedefault=True, desc="erode mask")
thresh = traits.Float(0.9, usedefault=True, desc="WM probability threshold")
class HarmonizeOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="input data (after intensity harmonization)")
class Harmonize(SimpleInterface):
input_spec = HarmonizeInputSpec
output_spec = HarmonizeOutputSpec
def _run_interface(self, runtime):
in_file = nb.load(self.inputs.in_file)
wm_mask = nb.load(self.inputs.wm_mask).get_fdata()
wm_mask[wm_mask < 0.9] = 0
wm_mask[wm_mask > 0] = 1
wm_mask = wm_mask.astype(np.uint8)
if self.inputs.erodemsk:
struc = nd.generate_binary_structure(3, 2)
wm_mask = nd.binary_erosion(wm_mask, structure=struc).astype(np.uint8)
data = in_file.get_fdata()
data *= 1000.0 / np.median(data[wm_mask > 0])
out_file = fname_presuffix(
self.inputs.in_file, suffix="_harmonized", newpath="."
)
in_file.__class__(data, in_file.affine, in_file.header).to_filename(out_file)
self._results["out_file"] = out_file
return runtime
class RotationMaskInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True, desc="input data")
class RotationMaskOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="rotation mask (if any)")
class RotationMask(SimpleInterface):
input_spec = RotationMaskInputSpec
output_spec = RotationMaskOutputSpec
def _run_interface(self, runtime):
in_file = nb.load(self.inputs.in_file)
data = in_file.get_fdata()
mask = data <= 0
mask = np.pad(mask, pad_width=(1,), mode="constant", constant_values=1)
struc = nd.generate_binary_structure(3, 2)
mask = nd.binary_opening(mask, structure=struc).astype(np.uint8)
label_im, nb_labels = nd.label(mask)
if nb_labels > 2:
sizes = nd.sum(mask, label_im, list(range(nb_labels + 1)))
ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1))))))
for _, label in ordered[2:]:
mask[label_im == label] = 0
mask = mask[1:-1, 1:-1, 1:-1]
if mask.sum() < 500:
mask = np.zeros_like(mask, dtype=np.uint8)
out_img = in_file.__class__(mask, in_file.affine, in_file.header)
out_img.header.set_data_dtype(np.uint8)
out_file = fname_presuffix(self.inputs.in_file, suffix="_rotmask", newpath=".")
out_img.to_filename(out_file)
self._results["out_file"] = out_file
return runtime
def artifact_mask(imdata, airdata, distance, zscore=10.0):
from statsmodels.robust.scale import mad
if not np.issubdtype(airdata.dtype, np.integer):
airdata[airdata < 0.95] = 0
airdata[airdata > 0.0] = 1
bg_img = imdata * airdata
if np.sum((bg_img > 0).astype(np.uint8)) < 100:
return np.zeros_like(airdata)
bg_location = np.median(bg_img[bg_img > 0])
bg_spread = mad(bg_img[bg_img > 0])
bg_img[bg_img > 0] -= bg_location
bg_img[bg_img > 0] /= bg_spread
qi1_img = np.zeros_like(bg_img)
qi1_img[bg_img > zscore] = 1
qi1_img[distance < 0.10] = 0
struc = nd.generate_binary_structure(3, 1)
qi1_img = nd.binary_opening(qi1_img, struc).astype(np.uint8)
qi1_img[airdata <= 0] = 0
return qi1_img
def fuzzy_jaccard(in_tpms, in_mni_tpms):
overlaps = []
for tpm, mni_tpm in zip(in_tpms, in_mni_tpms):
tpm = tpm.reshape(-1)
mni_tpm = mni_tpm.reshape(-1)
num = np.min([tpm, mni_tpm], axis=0).sum()
den = np.max([tpm, mni_tpm], axis=0).sum()
overlaps.append(float(num / den))
return overlaps
| true | true |
f735be27f6eb25d8ee16a99aaab2b9b696fe22b1 | 1,800 | py | Python | api/charts.py | carrier-io/backend_performance | a6f9ff682dabe8103733b51e23a01c2553e3aacc | [
"Apache-2.0"
] | null | null | null | api/charts.py | carrier-io/backend_performance | a6f9ff682dabe8103733b51e23a01c2553e3aacc | [
"Apache-2.0"
] | null | null | null | api/charts.py | carrier-io/backend_performance | a6f9ff682dabe8103733b51e23a01c2553e3aacc | [
"Apache-2.0"
] | 1 | 2021-06-16T12:14:42.000Z | 2021-06-16T12:14:42.000Z | from ...shared.utils.restApi import RestResource
from ...shared.utils.api_utils import build_req_parser
from ..utils.charts_utils import (requests_summary, requests_hits, avg_responses, summary_table, get_issues,
get_data_from_influx)
class ReportChartsAPI(RestResource):
get_rules = (
dict(name="low_value", type=float, default=0, location="args"),
dict(name="high_value", type=float, default=100, location="args"),
dict(name="start_time", type=str, default="", location="args"),
dict(name="end_time", type=str, default="", location="args"),
dict(name="aggregator", type=str, default="auto", location="args"),
dict(name="sampler", type=str, default="REQUEST", location="args"),
dict(name="metric", type=str, default="", location="args"),
dict(name="scope", type=str, default="", location="args"),
dict(name="build_id", type=str, location="args"),
dict(name="test_name", type=str, location="args"),
dict(name="lg_type", type=str, location="args"),
dict(name='status', type=str, default='all', location="args")
)
mapping = {
"requests": {
"summary": requests_summary,
"hits": requests_hits,
"average": avg_responses,
"table": summary_table,
"data": get_data_from_influx
},
"errors": {
"table": get_issues
}
}
def __init__(self):
super().__init__()
self.__init_req_parsers()
def __init_req_parsers(self):
self._parser_get = build_req_parser(rules=self.get_rules)
def get(self, source: str, target: str):
args = self._parser_get.parse_args(strict=False)
return self.mapping[source][target](args)
| 40 | 108 | 0.614444 | from ...shared.utils.restApi import RestResource
from ...shared.utils.api_utils import build_req_parser
from ..utils.charts_utils import (requests_summary, requests_hits, avg_responses, summary_table, get_issues,
get_data_from_influx)
class ReportChartsAPI(RestResource):
get_rules = (
dict(name="low_value", type=float, default=0, location="args"),
dict(name="high_value", type=float, default=100, location="args"),
dict(name="start_time", type=str, default="", location="args"),
dict(name="end_time", type=str, default="", location="args"),
dict(name="aggregator", type=str, default="auto", location="args"),
dict(name="sampler", type=str, default="REQUEST", location="args"),
dict(name="metric", type=str, default="", location="args"),
dict(name="scope", type=str, default="", location="args"),
dict(name="build_id", type=str, location="args"),
dict(name="test_name", type=str, location="args"),
dict(name="lg_type", type=str, location="args"),
dict(name='status', type=str, default='all', location="args")
)
mapping = {
"requests": {
"summary": requests_summary,
"hits": requests_hits,
"average": avg_responses,
"table": summary_table,
"data": get_data_from_influx
},
"errors": {
"table": get_issues
}
}
def __init__(self):
super().__init__()
self.__init_req_parsers()
def __init_req_parsers(self):
self._parser_get = build_req_parser(rules=self.get_rules)
def get(self, source: str, target: str):
args = self._parser_get.parse_args(strict=False)
return self.mapping[source][target](args)
| true | true |
f735beed301629b3bd7382dd9c3ee57eb053f65d | 5,491 | py | Python | unit_tests/add_land_charge/test_charge_description.py | LandRegistry/maintain-frontend | d92446a9972ebbcd9a43a7a7444a528aa2f30bf7 | [
"MIT"
] | 1 | 2019-10-03T13:58:29.000Z | 2019-10-03T13:58:29.000Z | unit_tests/add_land_charge/test_charge_description.py | LandRegistry/maintain-frontend | d92446a9972ebbcd9a43a7a7444a528aa2f30bf7 | [
"MIT"
] | null | null | null | unit_tests/add_land_charge/test_charge_description.py | LandRegistry/maintain-frontend | d92446a9972ebbcd9a43a7a7444a528aa2f30bf7 | [
"MIT"
] | 1 | 2021-04-11T05:24:57.000Z | 2021-04-11T05:24:57.000Z | from maintain_frontend import main
from flask_testing import TestCase
from unit_tests.utilities import Utilities
from maintain_frontend.dependencies.session_api.session import Session
from maintain_frontend.models import LocalLandChargeItem
from maintain_frontend.constants.permissions import Permissions
from flask import url_for
from unittest.mock import patch, PropertyMock
CHARGE_DESCRIPTION_PATH = 'maintain_frontend.add_land_charge.charge_description'
class TestChargeDescription(TestCase):
def create_app(self):
main.app.testing = True
Utilities.mock_session_cookie_flask_test(self)
return main.app
def setUp(self):
main.app.config['Testing'] = True
main.app.config['WTF_CSRF_ENABLED'] = False
def test_get_charge_description_success(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
state = LocalLandChargeItem()
self.mock_session.return_value.add_charge_state = state
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.get(url_for('add_land_charge.get_charge_description'))
self.assert_status(response, 200)
self.assert_template_used('charge_description.html')
def test_get_charge_description_add_charge_state_none(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.add_charge_state = None
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.get(url_for('add_land_charge.get_charge_description'))
self.assert_status(response, 302)
self.assertRedirects(response, url_for('add_land_charge.new'))
@patch('{}.g'.format(CHARGE_DESCRIPTION_PATH))
def test_get_charge_description_exception(self, mock_g):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
raise_exception = PropertyMock(side_effect=Exception('test exception'))
type(mock_g).session = raise_exception
try:
response = self.client.get(url_for('add_land_charge.get_charge_description'))
except Exception:
self.assertStatus(response, 302)
self.assertRedirects(response, url_for("add_land_charge.new"))
def test_get_charge_description_no_permission(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = []
response = self.client.get(url_for('add_land_charge.get_charge_description'))
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
@patch('{}.ReviewRouter'.format(CHARGE_DESCRIPTION_PATH))
def test_post_charge_description_success(self, mock_review_router):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
mock_review_router.get_redirect_url.return_value = url_for('add_land_charge.get_charge_date')
state = LocalLandChargeItem()
self.mock_session.return_value.add_charge_state = state
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.post(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'description'})
mock_review_router.update_edited_field.assert_called_with('supplementary_information', 'description')
self.assert_status(response, 302)
self.assertRedirects(response, url_for('add_land_charge.get_charge_date'))
def test_post_charge_description_add_charge_state_none(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.add_charge_state = None
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.post(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'description'})
self.assert_status(response, 302)
self.assertRedirects(response, url_for('add_land_charge.new'))
def test_post_charge_description_max_length_validation(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
state = LocalLandChargeItem()
self.mock_session.return_value.add_charge_state = state
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.post(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'a' * 1501})
self.assert_status(response, 400)
self.assert_template_used('charge_description.html')
self.assertIn('Answer too long', response.data.decode())
self.assertIn('Reduce your answer to 1500 characters or fewer', response.data.decode())
def test_post_charge_description_no_permission(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = []
response = self.client.get(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'description'})
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
| 44.642276 | 109 | 0.729557 | from maintain_frontend import main
from flask_testing import TestCase
from unit_tests.utilities import Utilities
from maintain_frontend.dependencies.session_api.session import Session
from maintain_frontend.models import LocalLandChargeItem
from maintain_frontend.constants.permissions import Permissions
from flask import url_for
from unittest.mock import patch, PropertyMock
CHARGE_DESCRIPTION_PATH = 'maintain_frontend.add_land_charge.charge_description'
class TestChargeDescription(TestCase):
def create_app(self):
main.app.testing = True
Utilities.mock_session_cookie_flask_test(self)
return main.app
def setUp(self):
main.app.config['Testing'] = True
main.app.config['WTF_CSRF_ENABLED'] = False
def test_get_charge_description_success(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
state = LocalLandChargeItem()
self.mock_session.return_value.add_charge_state = state
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.get(url_for('add_land_charge.get_charge_description'))
self.assert_status(response, 200)
self.assert_template_used('charge_description.html')
def test_get_charge_description_add_charge_state_none(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.add_charge_state = None
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.get(url_for('add_land_charge.get_charge_description'))
self.assert_status(response, 302)
self.assertRedirects(response, url_for('add_land_charge.new'))
@patch('{}.g'.format(CHARGE_DESCRIPTION_PATH))
def test_get_charge_description_exception(self, mock_g):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
raise_exception = PropertyMock(side_effect=Exception('test exception'))
type(mock_g).session = raise_exception
try:
response = self.client.get(url_for('add_land_charge.get_charge_description'))
except Exception:
self.assertStatus(response, 302)
self.assertRedirects(response, url_for("add_land_charge.new"))
def test_get_charge_description_no_permission(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = []
response = self.client.get(url_for('add_land_charge.get_charge_description'))
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
@patch('{}.ReviewRouter'.format(CHARGE_DESCRIPTION_PATH))
def test_post_charge_description_success(self, mock_review_router):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
mock_review_router.get_redirect_url.return_value = url_for('add_land_charge.get_charge_date')
state = LocalLandChargeItem()
self.mock_session.return_value.add_charge_state = state
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.post(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'description'})
mock_review_router.update_edited_field.assert_called_with('supplementary_information', 'description')
self.assert_status(response, 302)
self.assertRedirects(response, url_for('add_land_charge.get_charge_date'))
def test_post_charge_description_add_charge_state_none(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.add_charge_state = None
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.post(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'description'})
self.assert_status(response, 302)
self.assertRedirects(response, url_for('add_land_charge.new'))
def test_post_charge_description_max_length_validation(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
state = LocalLandChargeItem()
self.mock_session.return_value.add_charge_state = state
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.post(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'a' * 1501})
self.assert_status(response, 400)
self.assert_template_used('charge_description.html')
self.assertIn('Answer too long', response.data.decode())
self.assertIn('Reduce your answer to 1500 characters or fewer', response.data.decode())
def test_post_charge_description_no_permission(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = []
response = self.client.get(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'description'})
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
| true | true |
f735bef05d8b537cc5a95a3cbaf0b56e519999db | 5,006 | py | Python | api/util.py | GrumpyMeow/ownphotos-backend | 98d8e9136e9188009afe08657f943dba3df80ccb | [
"MIT"
] | null | null | null | api/util.py | GrumpyMeow/ownphotos-backend | 98d8e9136e9188009afe08657f943dba3df80ccb | [
"MIT"
] | null | null | null | api/util.py | GrumpyMeow/ownphotos-backend | 98d8e9136e9188009afe08657f943dba3df80ccb | [
"MIT"
] | null | null | null | import base64
import pickle
import itertools
from scipy import linalg
from sklearn.decomposition import PCA
import numpy as np
from sklearn import cluster
from sklearn import mixture
from scipy.spatial import distance
from sklearn.preprocessing import StandardScaler
import requests
from config import mapzen_api_key, mapbox_api_key
import logging
import logging.handlers
import spacy
nlp = spacy.load('en_core_web_sm')
logger = logging.getLogger('ownphotos')
fomatter = logging.Formatter(
'%(asctime)s : %(filename)s : %(funcName)s : %(lineno)s : %(levelname)s : %(message)s')
fileMaxByte = 256 * 1024 * 200 # 100MB
fileHandler = logging.handlers.RotatingFileHandler(
'./logs/ownphotos.log', maxBytes=fileMaxByte, backupCount=10)
fileHandler.setFormatter(fomatter)
logger.addHandler(fileHandler)
logger.setLevel(logging.INFO)
def convert_to_degrees(values):
"""
Helper function to convert the GPS coordinates stored in the EXIF to degress in float format
:param value:
:type value: exifread.utils.Ratio
:rtype: float
"""
d = float(values[0].num) / float(values[0].den)
m = float(values[1].num) / float(values[1].den)
s = float(values[2].num) / float(values[2].den)
return d + (m / 60.0) + (s / 3600.0)
weekdays = {1:'Monday',2:'Tuesday',3:'Wednesday',4:'Thursday',5:'Friday',6:'Saturday',7:'Sunday'}
def compute_bic(kmeans,X):
"""
Computes the BIC metric for a given clusters
Parameters:
-----------------------------------------
kmeans: List of clustering object from scikit learn
X : multidimension np array of data points
Returns:
-----------------------------------------
BIC value
"""
# assign centers and labels
centers = [kmeans.cluster_centers_]
labels = kmeans.labels_
#number of clusters
m = kmeans.n_clusters
# size of the clusters
n = np.bincount(labels)
#size of data set
N, d = X.shape
#compute variance for all clusters beforehand
cl_var = (1.0 / (N - m) / d) * sum([sum(distance.cdist(X[np.where(labels == i)], [centers[0][i]],
'euclidean')**2) for i in range(m)])
const_term = 0.5 * m * np.log(N) * (d+1)
BIC = np.sum([n[i] * np.log(n[i]) -
n[i] * np.log(N) -
((n[i] * d) / 2) * np.log(2*np.pi*cl_var) -
((n[i] - 1) * d/ 2) for i in range(m)]) - const_term
return(BIC)
def mapzen_reverse_geocode(lat,lon):
url = "https://search.mapzen.com/v1/reverse?point.lat=%f&point.lon=%f&size=1&lang=en&api_key=%s"%(lat,lon,mapzen_api_key)
resp = requests.get(url)
if resp.status_code == 200:
resp_json = resp.json()
search_text = []
if len(resp_json['features']) > 0:
if 'country' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['country'])
if 'county' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['county'])
if 'macrocounty' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['macrocounty'])
if 'locality' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['locality'])
if 'region' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['region'])
if 'neighbourhood' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['neighbourhood'])
if 'name' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['name'])
if 'label' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['label'])
search_text = ' '.join(search_text)
search_text = search_text.replace(',',' ')
search_text_tokens = list(set(search_text.split()))
search_text = ' '.join(search_text_tokens)
resp_json['search_text'] = search_text
return resp_json
else:
return {}
def mapbox_reverse_geocode(lat,lon):
url = "https://api.mapbox.com/geocoding/v5/mapbox.places/%f,%f.json?access_token=%s"%(lon,lat,mapbox_api_key)
resp = requests.get(url)
print(resp)
if resp.status_code == 200:
resp_json = resp.json()
search_terms = []
if 'features' in resp_json.keys():
for feature in resp_json['features']:
search_terms.append(feature['text'])
logger.info('location search terms: %s'%(' '.join(search_terms)))
resp_json['search_text'] = ' '.join(search_terms)
return resp_json
else:
logger.info('mapbox returned non 200 response.')
return {}
| 35.006993 | 125 | 0.620655 | import base64
import pickle
import itertools
from scipy import linalg
from sklearn.decomposition import PCA
import numpy as np
from sklearn import cluster
from sklearn import mixture
from scipy.spatial import distance
from sklearn.preprocessing import StandardScaler
import requests
from config import mapzen_api_key, mapbox_api_key
import logging
import logging.handlers
import spacy
nlp = spacy.load('en_core_web_sm')
logger = logging.getLogger('ownphotos')
fomatter = logging.Formatter(
'%(asctime)s : %(filename)s : %(funcName)s : %(lineno)s : %(levelname)s : %(message)s')
fileMaxByte = 256 * 1024 * 200
fileHandler = logging.handlers.RotatingFileHandler(
'./logs/ownphotos.log', maxBytes=fileMaxByte, backupCount=10)
fileHandler.setFormatter(fomatter)
logger.addHandler(fileHandler)
logger.setLevel(logging.INFO)
def convert_to_degrees(values):
d = float(values[0].num) / float(values[0].den)
m = float(values[1].num) / float(values[1].den)
s = float(values[2].num) / float(values[2].den)
return d + (m / 60.0) + (s / 3600.0)
weekdays = {1:'Monday',2:'Tuesday',3:'Wednesday',4:'Thursday',5:'Friday',6:'Saturday',7:'Sunday'}
def compute_bic(kmeans,X):
centers = [kmeans.cluster_centers_]
labels = kmeans.labels_
m = kmeans.n_clusters
n = np.bincount(labels)
N, d = X.shape
cl_var = (1.0 / (N - m) / d) * sum([sum(distance.cdist(X[np.where(labels == i)], [centers[0][i]],
'euclidean')**2) for i in range(m)])
const_term = 0.5 * m * np.log(N) * (d+1)
BIC = np.sum([n[i] * np.log(n[i]) -
n[i] * np.log(N) -
((n[i] * d) / 2) * np.log(2*np.pi*cl_var) -
((n[i] - 1) * d/ 2) for i in range(m)]) - const_term
return(BIC)
def mapzen_reverse_geocode(lat,lon):
url = "https://search.mapzen.com/v1/reverse?point.lat=%f&point.lon=%f&size=1&lang=en&api_key=%s"%(lat,lon,mapzen_api_key)
resp = requests.get(url)
if resp.status_code == 200:
resp_json = resp.json()
search_text = []
if len(resp_json['features']) > 0:
if 'country' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['country'])
if 'county' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['county'])
if 'macrocounty' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['macrocounty'])
if 'locality' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['locality'])
if 'region' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['region'])
if 'neighbourhood' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['neighbourhood'])
if 'name' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['name'])
if 'label' in resp_json['features'][0]['properties'].keys():
search_text.append(resp_json['features'][0]['properties']['label'])
search_text = ' '.join(search_text)
search_text = search_text.replace(',',' ')
search_text_tokens = list(set(search_text.split()))
search_text = ' '.join(search_text_tokens)
resp_json['search_text'] = search_text
return resp_json
else:
return {}
def mapbox_reverse_geocode(lat,lon):
url = "https://api.mapbox.com/geocoding/v5/mapbox.places/%f,%f.json?access_token=%s"%(lon,lat,mapbox_api_key)
resp = requests.get(url)
print(resp)
if resp.status_code == 200:
resp_json = resp.json()
search_terms = []
if 'features' in resp_json.keys():
for feature in resp_json['features']:
search_terms.append(feature['text'])
logger.info('location search terms: %s'%(' '.join(search_terms)))
resp_json['search_text'] = ' '.join(search_terms)
return resp_json
else:
logger.info('mapbox returned non 200 response.')
return {}
| true | true |
f735bfb68d5e14f28769863f789bd090ff80a21a | 49,230 | py | Python | ceed/tests/test_app/test_stage.py | matham/Ceed | b81a14a6b8211e5f4582418ddea34c951ab2667e | [
"MIT"
] | null | null | null | ceed/tests/test_app/test_stage.py | matham/Ceed | b81a14a6b8211e5f4582418ddea34c951ab2667e | [
"MIT"
] | null | null | null | ceed/tests/test_app/test_stage.py | matham/Ceed | b81a14a6b8211e5f4582418ddea34c951ab2667e | [
"MIT"
] | null | null | null | import os
import sys
import math
from contextlib import contextmanager
from math import isclose
import numpy as np
import pytest
import ceed
from .examples.stages import create_test_stages, make_stage, StageWrapper, \
stage_classes, assert_stages_same
from typing import Type, List, Union
from ceed.tests.ceed_app import CeedTestApp
from ceed.tests.test_app import replace_text, touch_widget, escape, \
run_plugin_experiment
from ceed.stage import CeedStage, CeedStageRef, last_experiment_stage_name
from ceed.function import CeedFuncRef, FuncBase, FuncGroup
from ceed.shape import CeedShape, CeedShapeGroup
from .examples.shapes import assert_add_three_groups, CircleShapeP1
from .examples.funcs import create_funcs, GroupFunction
from .examples.stages import fake_plugin_stage, SerialAllStage
from .examples.experiment import wait_stage_experiment_started, \
wait_experiment_done, measure_fps, wait_experiment_stopped
from .test_func import assert_func_params_in_gui, \
replace_last_ref_with_original_func, assert_funcs_same
pytestmark = pytest.mark.ceed_app
async def assert_set_params_in_gui(
stage_app: CeedTestApp, stage: StageWrapper, settings=None,
check_name=False):
opened_settings = settings is None
if opened_settings:
settings = await open_stage_settings(stage_app, stage.stage)
if check_name:
name = stage_app.resolve_widget(settings).down(
test_name='stage name')()
assert name.text != stage.name
assert name.text == stage.stage.name
await replace_text(stage_app, name, stage.name)
assert name.text == stage.name
assert name.text == stage.stage.name
# verify colors
for color in ('r', 'g', 'b'):
widget = stage_app.resolve_widget(settings).down(
test_name='stage color {}'.format(color))()
prop = 'color_{}'.format(color)
# the stage values should always match the GUI values
assert getattr(stage.stage, prop) == (widget.state == 'down')
# if the wrapper need to change the value, do it
if getattr(stage, prop) != getattr(stage.stage, prop):
await touch_widget(stage_app, widget)
# make sure it was changed
assert getattr(stage.stage, prop) == (widget.state == 'down')
assert getattr(stage, prop) == getattr(stage.stage, prop)
# parallel vs serial
serial = stage_app.resolve_widget(settings).down(
test_name='stage serial')()
parallel = stage_app.resolve_widget(settings).down(
test_name='stage parallel')()
assert (stage.stage.order == 'serial') == (serial.state == 'down') and \
(stage.stage.order == 'parallel') == (parallel.state == 'down')
# set the GUI to the correct value
if stage.order == 'parallel' and parallel.state != 'down':
await touch_widget(stage_app, parallel)
elif stage.order == 'serial' and serial.state != 'down':
await touch_widget(stage_app, serial)
assert (stage.stage.order == 'serial') == (serial.state == 'down') and \
(stage.stage.order == 'parallel') == (parallel.state == 'down')
assert (stage.order == 'serial') == (serial.state == 'down') and \
(stage.order == 'parallel') == (parallel.state == 'down')
# complete_on all vs any
all_w = stage_app.resolve_widget(settings).down(
test_name='stage finish all')()
any_w = stage_app.resolve_widget(settings).down(
test_name='stage finish any')()
assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \
(stage.stage.complete_on == 'any') == (any_w.state == 'down')
# set the GUI to the correct value
if stage.complete_on == 'all' and all_w.state != 'down':
await touch_widget(stage_app, all_w)
elif stage.complete_on == 'any' and any_w.state != 'down':
await touch_widget(stage_app, any_w)
assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \
(stage.stage.complete_on == 'any') == (any_w.state == 'down')
assert (stage.complete_on == 'all') == (all_w.state == 'down') and \
(stage.complete_on == 'any') == (any_w.state == 'down')
if opened_settings:
await escape(stage_app)
return settings
async def assert_stage_params_in_gui(
stage_app: CeedTestApp, stage: StageWrapper, settings=None,
check_name=False):
opened_settings = settings is None
if opened_settings:
settings = await open_stage_settings(stage_app, stage.stage)
if check_name:
name = stage_app.resolve_widget(settings).down(
test_name='stage name')()
name_label = stage_app.resolve_widget(stage.stage.display).down(
test_name='stage label')()
assert name.text == stage.name
assert name_label.text == stage.name
assert name.text == stage.stage.name
# verify colors
for color in ('r', 'g', 'b'):
widget = stage_app.resolve_widget(settings).down(
test_name='stage color {}'.format(color))()
prop = 'color_{}'.format(color)
assert getattr(stage.stage, prop) == (widget.state == 'down')
assert getattr(stage, prop) == getattr(stage.stage, prop)
# parallel vs serial
serial = stage_app.resolve_widget(settings).down(
test_name='stage serial')()
parallel = stage_app.resolve_widget(settings).down(
test_name='stage parallel')()
assert (stage.stage.order == 'serial') == (serial.state == 'down') and \
(stage.stage.order == 'parallel') == (parallel.state == 'down')
# complete_on all vs any
all_w = stage_app.resolve_widget(settings).down(
test_name='stage finish all')()
any_w = stage_app.resolve_widget(settings).down(
test_name='stage finish any')()
assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \
(stage.stage.complete_on == 'any') == (any_w.state == 'down')
if opened_settings:
await escape(stage_app)
return settings
async def replace_last_ref_with_original_stage(
stage_app: CeedTestApp,
stages: List[Union[CeedStageRef, CeedStage]], name: str):
start_stages = stages[:]
ref_stage = stages[-1]
# it should be a ref to start with
assert isinstance(ref_stage, CeedStageRef)
# make sure the class name matches - we added the right class
assert ref_stage.stage.name == name
# the label of the new sub-stage
sub_stage_widget = ref_stage.display
name_w = stage_app.resolve_widget(sub_stage_widget).down(
test_name='stage label')()
assert name_w.text == name
# replace the ref with a copy of the stage
ref_btn = stage_app.resolve_widget(sub_stage_widget).down(
test_name='stage settings open')()
await touch_widget(stage_app, ref_btn)
# should now have replaced the ref with a copy of the original
assert ref_stage not in stages
assert len(stages) == len(start_stages)
new_stage = stages[-1]
assert ref_stage is not new_stage
assert stages[:-1] == start_stages[:-1]
# it should not be a ref anymore
assert not isinstance(new_stage, CeedStageRef)
assert_stages_same(ref_stage.stage, new_stage)
return new_stage
async def open_stage_settings(app: CeedTestApp, stage: CeedStage):
settings_btn = app.resolve_widget(stage.display).down(
test_name='stage settings open')()
await touch_widget(app, settings_btn)
return app.resolve_widget().down(test_name='stage settings')()
async def test_stage_find_shape_in_all_stages(stage_app: CeedTestApp):
(s1, s2, s3), (group, shape1, shape2, shape3) = create_test_stages(
stage_app=stage_app, show_in_gui=True)
await stage_app.wait_clock_frames(2)
for shape in (shape1, shape2, shape3):
for stage in (s1, s2, s3):
assert shape.shape in [s.shape for s in stage.stage.shapes]
assert shape.shape in group.shapes
stage_app.app.shape_factory.remove_shape(shape2.shape)
await stage_app.wait_clock_frames(2)
for shape in (shape1, shape3):
for stage in (s1, s2, s3):
assert shape.shape in [s.shape for s in stage.stage.shapes]
assert shape.shape in group.shapes
for shape in (shape2, ):
for stage in (s1, s2, s3):
assert shape.shape not in [s.shape for s in stage.stage.shapes]
assert shape.shape not in group.shapes
stage_app.app.shape_factory.remove_shape(shape1.shape)
await stage_app.wait_clock_frames(2)
for shape in (shape3, ):
for stage in (s1, s2, s3):
assert shape.shape in [s.shape for s in stage.stage.shapes]
assert shape.shape in group.shapes
for shape in (shape2, shape1):
for stage in (s1, s2, s3):
assert shape.shape not in [s.shape for s in stage.stage.shapes]
assert shape.shape not in group.shapes
stage_app.app.shape_factory.remove_shape(shape3.shape)
await stage_app.wait_clock_frames(2)
for shape in (shape2, shape1, shape3):
for stage in (s1, s2, s3):
assert shape.shape not in [s.shape for s in stage.stage.shapes]
assert shape.shape not in group.shapes
async def test_add_empty_stage(stage_app: CeedTestApp):
stage_factory = stage_app.app.stage_factory
assert not stage_factory.stages
n = len(stage_factory.stage_names)
# add first empty stage
add_stage = stage_app.resolve_widget().down(test_name='stage add')()
await touch_widget(stage_app, add_stage)
assert stage_factory.stages
stage = stage_factory.stages[0]
assert stage in list(stage_factory.stage_names.values())
assert len(stage_factory.stage_names) == n + 1
assert stage.display.show_more
# select the stage and add stage to it
name_label = stage_app.resolve_widget(stage.display).down(
test_name='stage label')()
assert not stage.display.selected
await touch_widget(stage_app, name_label)
assert stage.display.selected
await touch_widget(stage_app, add_stage)
assert stage_factory.stages == [stage]
# deselect the stage and add stage globally
assert stage.display.selected
await touch_widget(stage_app, name_label)
await touch_widget(stage_app, add_stage)
assert len(stage_factory.stages) == 2
assert stage_factory.stages[0] is stage
async def test_gui_add_stages(stage_app: CeedTestApp):
stages = []
add_stage = stage_app.resolve_widget().down(test_name='stage add')()
for i, stage_cls in enumerate(stage_classes):
stage = stage_cls(app=stage_app, show_in_gui=False)
stages.append(stage)
# don't keep more than two stages so the list is not too long
if i >= 2:
oldest_stage = stages.pop(0)
assert oldest_stage.stage in stage_app.app.stage_factory.stages
remove_btn = stage_app.resolve_widget(
oldest_stage.stage.display).down(test_name='del btn stage')()
await touch_widget(stage_app, remove_btn)
assert oldest_stage.stage not in stage_app.app.stage_factory.stages
# add the stage
await touch_widget(stage_app, add_stage)
assert len(stage_app.app.stage_factory.stages) == min(2, i + 1)
stage.stage = stage_app.app.stage_factory.stages[-1]
# show the settings for the stage
widget = stage.stage.display
settings = await open_stage_settings(stage_app, stage.stage)
# check default name
name = stage_app.resolve_widget(settings).down(
test_name='stage name')()
assert not name.disabled, "root stages can be renamed"
name_label = stage_app.resolve_widget(widget).down(
test_name='stage label')()
original_name = name.text
assert stage.name != original_name
assert original_name == name_label.text
assert original_name in stage_app.app.stage_factory.stage_names
assert stage.name not in stage_app.app.stage_factory.stage_names
# change the stage name
await replace_text(stage_app, name, stage.name)
assert name.text == stage.name
assert name_label.text == stage.name
assert original_name not in stage_app.app.stage_factory.stage_names
assert stage.name in stage_app.app.stage_factory.stage_names
await assert_set_params_in_gui(stage_app, stage, settings)
# close the settings widget
await escape(stage_app)
async def test_gui_add_sub_stages(stage_app: CeedTestApp):
add_stage = stage_app.resolve_widget().down(test_name='stage add')()
await touch_widget(stage_app, add_stage)
base_stage: CeedStage = stage_app.app.stage_factory.stages[0]
name_label = stage_app.resolve_widget(base_stage.display).down(
test_name='stage label')()
await touch_widget(stage_app, name_label)
assert base_stage.display.selected
assert not base_stage.stages
stages = []
for i, stage_cls in enumerate(stage_classes[:4]):
stage = stage_cls(app=stage_app, show_in_gui=False)
stages.append(stage)
# don't keep more than two stages so the list is not too long
if i >= 2:
oldest_stage = stages.pop(0)
assert oldest_stage.stage in base_stage.stages
remove_btn = stage_app.resolve_widget(
oldest_stage.stage.display).down(test_name='del btn stage')()
await touch_widget(stage_app, remove_btn)
assert oldest_stage.stage not in base_stage.stages
if not base_stage.display.selected:
await touch_widget(stage_app, name_label)
assert base_stage.display.selected
# add the stage
await touch_widget(stage_app, add_stage)
assert len(base_stage.stages) == min(2, i + 1)
stage.stage = base_stage.stages[-1]
# replace the ref stage
settings_btn = stage_app.resolve_widget(stage.stage.display).down(
test_name='stage settings open')()
await touch_widget(stage_app, settings_btn)
stage.stage = base_stage.stages[-1]
await assert_set_params_in_gui(stage_app, stage, check_name=False)
async def test_gui_drag_shape_to_stage(stage_app: CeedTestApp):
(group, group2, group3), (shape1, shape2, shape3) = \
assert_add_three_groups(
shape_factory=stage_app.app.shape_factory, app=stage_app,
show_in_gui=True)
await stage_app.wait_clock_frames(2)
(s1, s2, s3), _ = create_test_stages(
stage_app=stage_app, add_func=False, add_shapes=False)
await stage_app.wait_clock_frames(2)
# multiple stages
for stage in (s2, s3):
container = stage.stage.display.shape_widget
shapes = stage.stage.shapes
assert not shapes
# drag each shape to the stage
added_shapes = []
for i, shape in enumerate((shape1, group2, shape3, shape2)):
if isinstance(shape, CeedShapeGroup):
src = stage_app.resolve_widget(shape.widget).down(
test_name='group drag button')()
else:
shape = shape.shape
src = stage_app.resolve_widget(shape.widget).down(
test_name='shape drag')()
offset = (0, 5) if container.height else (0, 0)
async for _ in stage_app.do_touch_drag_follow(
widget=src, target_widget=container,
target_widget_loc=('center_x', 'y'),
target_widget_offset=offset, drag_n=15):
pass
# check that shape was added
assert len(shapes) == min(3, i + 1)
assert shape is shapes[-1].shape
# make sure label matches
name_label = stage_app.resolve_widget(shapes[-1].display).down(
test_name='stage shape name')()
assert name_label.text == shape.name
added_shapes.append(shapes[-1])
# don't keep more than two stages so the list is not too long
if i >= 2:
oldest_shape = added_shapes.pop(0)
assert oldest_shape in shapes
remove_btn = stage_app.resolve_widget(
oldest_shape.display).down(
test_name='stage shape del')()
await touch_widget(stage_app, remove_btn)
assert oldest_shape not in shapes
await stage_app.wait_clock_frames(2)
async def test_gui_drag_func_to_stage(stage_app: CeedTestApp):
global_funcs = create_funcs(func_app=stage_app, show_in_gui=True)
group_func: GroupFunction = global_funcs[-1]
ff1 = group_func.wrapper_funcs[0]
ff2 = group_func.wrapper_funcs[1]
global_funcs = [
(ff1, True)] + [(f, False) for f in global_funcs] + [(ff2, True)]
await stage_app.wait_clock_frames(2)
(s1, s2, s3), _ = create_test_stages(
stage_app=stage_app, add_func=False, add_shapes=False)
await stage_app.wait_clock_frames(2)
# multiple funcs
for stage in (s2, s3):
container = stage.stage.display.func_widget
functions = stage.stage.functions
assert not functions
# drag each func to the stage
added_funcs = []
for i, (func, is_sub_func) in enumerate(global_funcs):
src = stage_app.resolve_widget(func.func.display).down(
test_name='func drag btn')()
async for _ in stage_app.do_touch_drag_follow(
widget=src, target_widget=container,
target_widget_loc=('center_x', 'y'),
target_widget_offset=(0, 5)):
pass
# check that shape was added
assert len(functions) == min(3, i + 1)
assert functions[-1] is not func.func
if is_sub_func:
assert isinstance(functions[-1], (FuncBase, FuncGroup))
assert_funcs_same(functions[-1], func.func)
else:
assert isinstance(functions[-1], CeedFuncRef)
assert func.func is functions[-1].func
await replace_last_ref_with_original_func(
stage_app, functions, func.func.name)
added_funcs.append(functions[-1])
# don't keep more than two funcs so the list is not too long
if i >= 2:
oldest_func = added_funcs.pop(0)
assert oldest_func in functions
remove_btn = stage_app.resolve_widget(
oldest_func.display).down(
test_name='del_btn_func')()
await touch_widget(stage_app, remove_btn)
assert oldest_func not in functions
await stage_app.wait_clock_frames(2)
async def test_gui_drag_stage_to_stage(stage_app: CeedTestApp):
(s1, s2, s21), _ = create_test_stages(
stage_app=stage_app, show_in_gui=True, add_func=False,
add_shapes=False)
(s3, s4, s41), _ = create_test_stages(
stage_app=stage_app, show_in_gui=True, add_func=False,
add_shapes=False)
await stage_app.wait_clock_frames(2)
# collapse stages to not take up space
for stage in (s1, s21, s3):
stage.stage.display.show_more = False
await stage_app.wait_clock_frames(2)
# multiple funcs
for stage in (s4, s41):
container = stage.stage.display.stage_widget
stages = stage.stage.stages
n_start = 0 if stage is s41 else 1
assert len(stages) == n_start
# drag each func to the stage
added_stages = []
for i, src_stage in enumerate((s1, s2, s21, s3)):
src = stage_app.resolve_widget(src_stage.stage.display).down(
test_name='stage drag btn')()
async for _ in stage_app.do_touch_drag_follow(
widget=src, target_widget=container,
target_widget_loc=('center_x', 'y'),
target_widget_offset=(0, 5)):
pass
# check that shape was added
assert len(stages) == min(3, i + 1) + n_start
assert stages[-1] is not src_stage.stage
if src_stage is s21:
assert isinstance(stages[-1], CeedStage)
assert_stages_same(stages[-1], src_stage.stage)
else:
assert isinstance(stages[-1], CeedStageRef)
assert src_stage.stage is stages[-1].stage
await replace_last_ref_with_original_stage(
stage_app, stages, src_stage.stage.name)
added_stages.append(stages[-1])
# don't keep more than two stages so the list is not too long
if i >= 2:
oldest_stage = added_stages.pop(0)
assert oldest_stage in stages
remove_btn = stage_app.resolve_widget(
oldest_stage.display).down(
test_name='del btn stage')()
await touch_widget(stage_app, remove_btn)
assert oldest_stage not in stages
await stage_app.wait_clock_frames(2)
def verify_color(
stage_app, shape_color, shape2_color, frame, centers, flip, video_mode):
(cx1, cy1), (cx2, cy2) = centers
if flip:
cx1 = 1920 - cx1
cx2 = 1920 - cx2
centers = [[(cx1, cy1), (cx2, cy2)]]
if 'QUAD' in video_mode:
cx1, cx2, cy1, cy2 = cx1 // 2, cx2 // 2, cy1 // 2, cy2 // 2
corners = ((0, 540), (960, 540), (0, 0), (960, 0))
centers = [
[(cx + x, cy + y) for cx, cy in [(cx1, cy1), (cx2, cy2)]]
for x, y in corners]
if video_mode == 'QUAD12X':
# first get all 4 centers values, one for each quadrant
rgb_values = []
for i in range(4):
rgb = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, centers[i])
rgb = [[c / 255 for c in p] for p in rgb]
rgb_values.append(rgb)
# r, g, b
for plane in [0, 1, 2]:
# 4 quads
for color1, color2 in rgb_values:
assert isclose(
color1[plane], shape_color[frame][3], abs_tol=2 / 255)
assert isclose(
color2[plane], shape2_color[frame][3], abs_tol=2 / 255)
frame += 1
else:
n_sub_frames = 1
if video_mode == 'QUAD4X':
n_sub_frames = 4
for i in range(n_sub_frames):
points = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, centers[i])
points = [[c / 255 for c in p] for p in points]
(r1, g1, b1, _), (r2, g2, b2, _) = points
val = shape_color[frame]
assert isclose(r1, val[3], abs_tol=2 / 255) if val[0] else r1 == 0
assert isclose(g1, val[3], abs_tol=2 / 255) if val[1] else g1 == 0
assert isclose(b1, val[3], abs_tol=2 / 255) if val[2] else b1 == 0
val = shape2_color[frame]
assert isclose(r2, val[3], abs_tol=2 / 255) if val[0] else r2 == 0
assert isclose(g2, val[3], abs_tol=2 / 255) if val[1] else g2 == 0
assert isclose(b2, val[3], abs_tol=2 / 255) if val[2] else b2 == 0
frame += 1
return frame
@pytest.mark.parametrize('video_mode', ['RGB', 'QUAD4X', 'QUAD12X'])
@pytest.mark.parametrize(
'flip,skip', [(True, False), (False, True), (False, False)])
async def test_recursive_play_stage_intensity(
stage_app: CeedTestApp, tmp_path, flip, skip, video_mode):
"""Checks that proper frame rendering happens in all these modes.
In skip mode, some frames are skipped if GPU/CPU is too slow.
"""
from ..test_stages import create_recursive_stages
from .examples.shapes import CircleShapeP1, CircleShapeP2
from kivy.clock import Clock
from ceed.analysis import CeedDataReader
root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(
stage_app.app.stage_factory, app=stage_app)
from ceed.function.plugin import LinearFunc
for i, stage in enumerate((s1, s2, s3, s4, s5, s6)):
stage.stage.add_func(LinearFunc(
function_factory=stage_app.app.function_factory, b=0, m=.5,
duration=(i % 2 + 1) * 1))
shape = CircleShapeP1(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
shape2 = CircleShapeP2(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
s1.stage.add_shape(shape.shape)
s4.stage.add_shape(shape.shape)
s5.stage.add_shape(shape.shape)
s2.stage.add_shape(shape2.shape)
s3.stage.add_shape(shape2.shape)
s6.stage.add_shape(shape2.shape)
root.show_in_gui()
await stage_app.wait_clock_frames(2)
frame = 0
event = None
# make GPU too slow to force skipping frames, when enabled
fps = await measure_fps(stage_app) + 10
rate = stage_app.app.view_controller.frame_rate = fps
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.flip_projector = flip
stage_app.app.view_controller.skip_estimated_missed_frames = skip
stage_app.app.view_controller.video_mode = video_mode
stage_app.app.view_controller.pad_to_stage_handshake = False
n_sub_frames = 1
if video_mode == 'QUAD4X':
n_sub_frames = 4
elif video_mode == 'QUAD12X':
n_sub_frames = 12
centers = shape.center, shape2.center
num_frames = rate * n_sub_frames * (2 + 1 + 2 + 1)
shape_color = [(False, False, False, 0.), ] * num_frames
shape2_color = [(False, False, False, 0.), ] * num_frames
skipped_frame_indices = set()
n_missed_frames = 0
for s, start, e in [(s1, 0, 1), (s4, 3, 5), (s5, 5, 6)]:
for i in range(start * rate * n_sub_frames, e * rate * n_sub_frames):
val = (i - start * rate * n_sub_frames) / (rate * n_sub_frames) * .5
shape_color[i] = s.color_r, s.color_g, s.color_b, val
for s, start, e in [(s2, 0, 2), (s3, 2, 3), (s6, 5, 6)]:
for i in range(start * rate * n_sub_frames, e * rate * n_sub_frames):
val = (i - start * rate * n_sub_frames) / (rate * n_sub_frames) * .5
shape2_color[i] = s.color_r, s.color_g, s.color_b, val
def verify_intensity(*largs):
nonlocal frame, n_missed_frames
# total frames is a multiple of n_sub_frames
if not stage_app.app.view_controller.stage_active:
assert stage_app.app.view_controller.count - 1 == num_frames
if skip:
# last frame could be passed actual frames
assert frame - n_missed_frames * n_sub_frames <= num_frames
else:
assert frame == num_frames
event.cancel()
return
# not yet started
if not stage_app.app.view_controller.count:
return
# some frame may have been skipped, but num_frames is max frames
# This callback happens after frame callback and after the frame flip.
# This also means we record even the last skipped frames (if skipped)
assert frame < num_frames
frame = verify_color(
stage_app, shape_color, shape2_color, frame, centers, flip,
video_mode)
assert stage_app.app.view_controller.count == frame
if skip:
# some frames may have been dropped for next frame
n_missed_frames = stage_app.app.view_controller._n_missed_frames
for k in range(n_missed_frames * n_sub_frames):
# frame is next frame index, next frame is skipped
skipped_frame_indices.add(frame)
frame += 1
else:
assert not stage_app.app.view_controller._n_missed_frames
event = Clock.create_trigger(verify_intensity, timeout=0, interval=True)
event()
stage_app.app.view_controller.request_stage_start(root.name)
await wait_experiment_done(stage_app, timeout=num_frames / rate * 50)
await wait_experiment_stopped(stage_app)
filename = str(tmp_path / 'recursive_play_stage_intensity.h5')
stage_app.app.ceed_data.save(filename=filename)
f = CeedDataReader(filename)
f.open_h5()
assert f.experiments_in_file == ['0']
assert not f.num_images_in_file
f.load_experiment(0)
shape_data = f.shapes_intensity[shape.name]
shape_data_rendered = f.shapes_intensity_rendered[shape.name]
shape2_data = f.shapes_intensity[shape2.name]
shape2_data_rendered = f.shapes_intensity_rendered[shape2.name]
recorded_rendered_frames = f.rendered_frames
# even when skipping, skipped frames are still logged but they are removed
# in xxx_rendered arrays
if skip:
# because frame rate is high, we'll definitely drop frames
assert skipped_frame_indices
else:
assert not skipped_frame_indices
assert shape_data.shape[0] == num_frames
assert shape2_data.shape[0] == num_frames
n_skipped = len(skipped_frame_indices)
if skip:
# last frame may be recorded as skipped, but if stage is done frame is
# not real. n_missed_frames is the n_missed_frames from last frame
assert num_frames - n_skipped <= shape_data_rendered.shape[0] \
<= num_frames - n_skipped + n_sub_frames * n_missed_frames
assert num_frames - n_skipped <= shape2_data_rendered.shape[0] \
<= num_frames - n_skipped + n_sub_frames * n_missed_frames
else:
assert shape_data_rendered.shape[0] == num_frames
assert shape2_data_rendered.shape[0] == num_frames
# in QUAD12X mode, all 3 channels have same value in the data (because we
# show gray). But the projector outputs different values for each channel,
# for each sub-frame
gray = video_mode == 'QUAD12X'
i = 0
k = 0
for (r, g, b, val), (r1, g1, b1, _) in zip(shape_color, shape_data):
assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0
assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0
assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0
if skip:
assert recorded_rendered_frames[k] \
== (k not in skipped_frame_indices)
else:
assert recorded_rendered_frames[k]
if k not in skipped_frame_indices:
r1, g1, b1, _ = shape_data_rendered[i, :]
assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0
assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0
assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0
i += 1
k += 1
i = 0
k = 0
for (r, g, b, val), (r1, g1, b1, _) in zip(shape2_color, shape2_data):
assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0
assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0
assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0
if skip:
assert recorded_rendered_frames[k] \
== (k not in skipped_frame_indices)
else:
assert recorded_rendered_frames[k]
if k not in skipped_frame_indices:
r1, g1, b1, _ = shape2_data_rendered[i, :]
assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0
assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0
assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0
i += 1
k += 1
f.close_h5()
async def test_moat_stage_shapes(stage_app: CeedTestApp, tmp_path):
from ..test_stages import create_recursive_stages
from .examples.shapes import CircleShapeP1, CircleShapeP1Internal
from ceed.function.plugin import ConstFunc
from ceed.analysis import CeedDataReader
root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(
stage_app.app.stage_factory, app=stage_app)
# internal shape
s1.stage.color_r = False
s1.stage.color_g = False
s1.stage.color_b = True
# surrounding shape
s2.stage.color_r = True
s2.stage.color_g = False
s2.stage.color_b = True
shape = CircleShapeP1(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
internal_shape = CircleShapeP1Internal(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
s1.stage.add_func(ConstFunc(
function_factory=stage_app.app.function_factory, a=1, duration=5))
s1.stage.add_shape(internal_shape.shape)
s2.stage.add_func(ConstFunc(
function_factory=stage_app.app.function_factory, a=1, duration=5))
s2.stage.add_shape(shape.shape)
root.show_in_gui()
await stage_app.wait_clock_frames(2)
stage_app.app.view_controller.frame_rate = 10
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.flip_projector = False
stage_app.app.view_controller.request_stage_start(root.name)
await wait_stage_experiment_started(stage_app)
assert stage_app.app.view_controller.stage_active
points = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, [internal_shape.center, shape.center])
(r1, g1, b1, _), (r2, g2, b2, _) = points
assert r1 == 0
assert g1 == 0
assert b1 == 255
assert r2 == 255
assert g2 == 0
assert b2 == 255
stage_app.app.view_controller.request_stage_end()
await stage_app.wait_clock_frames(2)
assert not stage_app.app.view_controller.stage_active
# now hide internal shape behind larger circle
stage_app.app.shape_factory.move_shape_upwards(shape.shape)
await stage_app.wait_clock_frames(2)
stage_app.app.view_controller.request_stage_start(root.name)
await wait_stage_experiment_started(stage_app)
assert stage_app.app.view_controller.stage_active
points = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, [internal_shape.center, shape.center])
(r1, g1, b1, _), (r2, g2, b2, _) = points
assert r1 == 255
assert g1 == 0
assert b1 == 255
assert r2 == 255
assert g2 == 0
assert b2 == 255
stage_app.app.view_controller.request_stage_end()
await stage_app.wait_clock_frames(2)
await wait_experiment_stopped(stage_app)
filename = str(tmp_path / 'moat_stage_shapes.h5')
stage_app.app.ceed_data.save(filename=filename)
f = CeedDataReader(filename)
f.open_h5()
assert f.experiments_in_file == ['0', '1']
assert not f.num_images_in_file
f.load_experiment(0)
assert tuple(np.array(f.shapes_intensity[shape.name])[0, :3]) == (1, 0, 1)
assert tuple(
np.array(f.shapes_intensity[internal_shape.name])[0, :3]) == (0, 0, 1)
f.load_experiment(1)
assert tuple(np.array(f.shapes_intensity[shape.name])[0, :3]) == (1, 0, 1)
assert tuple(
np.array(f.shapes_intensity[internal_shape.name])[0, :3]) == (0, 0, 1)
f.close_h5()
async def test_moat_single_stage_shapes(stage_app: CeedTestApp, tmp_path):
from ..test_stages import create_recursive_stages
from .examples.shapes import CircleShapeP1, CircleShapeP1Internal
from ceed.function.plugin import ConstFunc
from ceed.analysis import CeedDataReader
root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(
stage_app.app.stage_factory, app=stage_app)
s1.stage.color_r = False
s1.stage.color_g = False
s1.stage.color_b = True
shape = CircleShapeP1(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
internal_shape = CircleShapeP1Internal(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
s1.stage.add_func(ConstFunc(
function_factory=stage_app.app.function_factory, a=1, duration=5))
stage_shape = s1.stage.add_shape(internal_shape.shape)
s1.stage.add_shape(shape.shape)
stage_shape.keep_dark = True
root.show_in_gui()
await stage_app.wait_clock_frames(2)
stage_app.app.view_controller.frame_rate = 10
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.flip_projector = False
stage_app.app.view_controller.request_stage_start(root.name)
await wait_stage_experiment_started(stage_app)
assert stage_app.app.view_controller.stage_active
points = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, [internal_shape.center, shape.center])
(r1, g1, b1, _), (r2, g2, b2, _) = points
assert r1 == 0
assert g1 == 0
assert b1 == 0
assert r2 == 0
assert g2 == 0
assert b2 == 255
stage_app.app.view_controller.request_stage_end()
await stage_app.wait_clock_frames(2)
assert not stage_app.app.view_controller.stage_active
await wait_experiment_stopped(stage_app)
filename = str(tmp_path / 'moat_single_stage_shapes.h5')
stage_app.app.ceed_data.save(filename=filename)
f = CeedDataReader(filename)
f.open_h5()
assert f.experiments_in_file == ['0', ]
assert not f.num_images_in_file
f.load_experiment(0)
assert tuple(np.array(f.shapes_intensity[shape.name])[0]) == (0, 0, 1, 1)
assert tuple(
np.array(f.shapes_intensity[internal_shape.name])[0]) == (0, 0, 0, 1)
f.close_h5()
@pytest.mark.parametrize('func', [True, False])
async def test_event_data_empty(stage_app: CeedTestApp, tmp_path, func):
from ..test_stages import create_2_shape_stage
from ceed.function.plugin import ConstFunc
from ceed.analysis import CeedDataReader
root, s1, s2, shape1, shape2 = create_2_shape_stage(
stage_app.app.stage_factory, show_in_gui=True, app=stage_app)
s1.stage.name = 'test stage'
if func:
s1.stage.add_func(ConstFunc(
function_factory=stage_app.app.function_factory, duration=0))
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.skip_estimated_missed_frames = False
stage_app.app.view_controller.frame_rate = 10
stage_app.app.view_controller.request_stage_start(root.name)
await wait_experiment_done(stage_app, timeout=180)
await wait_experiment_stopped(stage_app)
filename = str(tmp_path / 'event_data_empty.h5')
stage_app.app.ceed_data.save(filename=filename)
# order in which the stage/func id start/finish
if func:
order = (0, 1, 3, 2), (2, 1, 3, 0)
else:
order = (0, 1, 2), (1, 2, 0)
loops = [
[0, i, 'start' + s, [0, ] * 2] for i in order[0] for s in ('', '_loop')
]
loops += [
[0, i, 'end' + s, [0, ] * 2] for i in order[1] for s in ('_loop', '')
]
with CeedDataReader(filename) as f:
f.load_experiment(0)
events = [d[:-1] + [d[-1][:-1], ] for d in f.event_data]
assert loops == events
s = f.experiment_stage.stages[0]
for kw in [{'ceed_id': s.ceed_id}, {'ceed_name': s1.stage.name},
{'ceed_obj': s}]:
items = f.format_event_data(event='start_loop', **kw)
assert len(items) == 1
assert items[0][:5] == [0, s, 'start_loop', 0, 0]
items = f.format_event_data(**kw)
assert len(items) == 4
for item, val in zip(
items, ['start', 'start_loop', 'end_loop', 'end']):
assert item[:5] == [0, s, val, 0, 0]
@pytest.mark.parametrize(
'quad,sub_frames', [('RGB', 1), ('QUAD4X', 4), ('QUAD12X', 12)])
@pytest.mark.parametrize('skip', [False, True])
async def test_pad_stage_ticks(
stage_app: CeedTestApp, tmp_path, quad, sub_frames, skip):
from ceed.analysis import CeedDataReader
root = SerialAllStage(
stage_factory=stage_app.app.stage_factory, show_in_gui=False,
app=stage_app, create_add_to_parent=True)
shape = CircleShapeP1(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
root.stage.add_shape(shape.shape)
root.show_in_gui()
await stage_app.wait_clock_frames(2)
# use a larger frame rate so we have to drop frames
stage_app.app.view_controller.frame_rate = await measure_fps(stage_app) + 10
stage_app.app.view_controller.skip_estimated_missed_frames = skip
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.video_mode = quad
stage_app.app.view_controller.pad_to_stage_handshake = False
stage_app.app.view_controller.request_stage_start(root.name)
await wait_experiment_done(stage_app)
stage_app.app.view_controller.pad_to_stage_handshake = True
stage_app.app.view_controller.request_stage_start(root.name)
await wait_experiment_done(stage_app, 300)
await wait_experiment_stopped(stage_app)
filename = str(tmp_path / 'pad_stage_ticks.h5')
stage_app.app.ceed_data.save(filename=filename)
f = CeedDataReader(filename)
f.open_h5()
assert f.experiments_in_file == ['0', '1']
assert not f.num_images_in_file
f.load_experiment('0')
assert f.shapes_intensity[shape.name].shape == (0, 4)
f.load_experiment('1')
# sub_frames scales up the handshake since IO is same for each sub-frame
# Even skipped frames are logged so size matches
assert f.shapes_intensity[shape.name].shape == (
stage_app.app.data_serializer.num_ticks_handshake(16, sub_frames), 4)
assert f.shapes_intensity[shape.name].shape == (
stage_app.app.data_serializer.num_ticks_handshake(16, 1) * sub_frames,
4)
frame_time_counter = np.asarray(f._block.data_arrays['frame_time_counter'])
frame_time = np.asarray(f._block.data_arrays['frame_time'])
rendered_frames_bool = f.rendered_frames
assert len(frame_time_counter) == len(frame_time)
assert np.sum(rendered_frames_bool) == len(frame_time_counter) * sub_frames
frame_counter = np.asarray(f._block.data_arrays['frame_counter'])
n = f.shapes_intensity[shape.name].shape[0]
# some frames will have been skipped because of higher frame rate than GPU
if skip:
assert sub_frames * len(frame_time_counter) < n
else:
assert sub_frames * len(frame_time_counter) == n
# we didn't stop early so all frames are rendered
rendered_indices = np.arange(0, n, sub_frames)
if skip:
assert len(frame_time_counter) < len(frame_counter) // sub_frames
assert len(rendered_indices) > len(frame_time_counter)
else:
assert len(frame_time_counter) == len(frame_counter) // sub_frames
assert len(rendered_indices) == len(frame_time_counter)
assert np.all(np.arange(1, n + 1) == frame_counter)
# count recorded is last sub-frame
if skip:
assert np.all(
np.isin(frame_time_counter, rendered_indices + sub_frames))
assert np.all(frame_time_counter[1:] - frame_time_counter[:-1] > 0)
assert np.all(np.isin(
frame_time_counter,
frame_counter[rendered_indices + sub_frames - 1]))
else:
assert np.all(frame_time_counter == rendered_indices + sub_frames)
assert np.all(
frame_counter[rendered_indices + sub_frames - 1]
== frame_time_counter)
f.close_h5()
@contextmanager
def add_to_path(tmp_path, *args):
sys.path.append(str(tmp_path))
mod = tmp_path / 'my_gui_stage_plugin' / '__init__.py'
try:
mod.parent.mkdir()
mod.write_text(fake_plugin_stage)
yield None
finally:
sys.path.remove(str(tmp_path))
if 'my_gui_stage_plugin' in sys.modules:
del sys.modules['my_gui_stage_plugin']
@pytest.mark.parametrize(
"ceed_app",
[{'yaml_config': {
'external_stage_plugin_package': 'my_gui_stage_plugin',
'view': {'teensy_frame_estimation': {'use_teensy': False}}},
'app_context': add_to_path}, ],
indirect=True
)
@pytest.mark.parametrize('external', [False, True])
async def test_external_plugin_named_package(
stage_app: CeedTestApp, tmp_path, external):
stage_factory = stage_app.app.stage_factory
assert 'FakeStage' in stage_factory.stages_cls
stage = SerialAllStage(
stage_factory=stage_factory, show_in_gui=True, app=stage_app,
create_add_to_parent=False, stage_cls=stage_factory.get('FakeStage'))
stage.stage.val = 13
await run_plugin_experiment(stage_app, tmp_path, external, stage=stage)
assert stage_factory.stage_names[last_experiment_stage_name].val == 13
@pytest.mark.parametrize(
'quad,sub_frames', [('RGB', 1), ('QUAD4X', 4), ('QUAD12X', 12)])
@pytest.mark.parametrize('main_frames', [1, 1.5, 2])
async def test_short_stage(
stage_app: CeedTestApp, tmp_path, quad, sub_frames, main_frames):
from ceed.analysis import CeedDataReader
from ceed.function.plugin import LinearFunc
from kivy.clock import Clock
num_frames = int(math.ceil(main_frames * sub_frames))
rate = main_frames
root = SerialAllStage(
stage_factory=stage_app.app.stage_factory, show_in_gui=False,
app=stage_app, create_add_to_parent=True)
shape = CircleShapeP1(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
root.stage.add_shape(shape.shape)
root.stage.add_func(LinearFunc(
function_factory=stage_app.app.function_factory, b=0, m=1,
duration=1))
root.show_in_gui()
await stage_app.wait_clock_frames(2)
# use a larger frame rate so we have to drop frames
stage_app.app.view_controller.frame_rate = rate
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.video_mode = quad
stage_app.app.view_controller.pad_to_stage_handshake = False
stage_app.app.view_controller.flip_projector = False
frame = 0
event = None
cx, cy = shape.shape.centroid
if sub_frames == 1:
centers = [(cx, cy)]
else:
cx1, cy1 = cx // 2, cy // 2
corners = ((0, 540), (960, 540), (0, 0), (960, 0))
centers = [(cx1 + x, cy1 + y) for x, y in corners]
intensity = []
total_rounded_frames = math.ceil(main_frames) * sub_frames
def verify_intensity(*largs):
nonlocal frame
if not stage_app.app.view_controller.stage_active:
event.cancel()
return
# not yet started
if not stage_app.app.view_controller.count:
return
assert frame < num_frames
rgb = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, centers)
rgb = [[c / 255 for c in p] for p in rgb]
if sub_frames == 12:
for plane in range(3):
for point in rgb:
value = point[plane]
intensity.append((value, value, value, 1))
else:
intensity.extend(rgb)
frame += sub_frames
assert frame in (
stage_app.app.view_controller.count, total_rounded_frames)
assert not stage_app.app.view_controller._n_missed_frames
event = Clock.create_trigger(verify_intensity, timeout=0, interval=True)
event()
stage_app.app.view_controller.request_stage_start(root.name)
await wait_experiment_done(stage_app, timeout=50)
await wait_experiment_stopped(stage_app)
assert stage_app.app.view_controller.count == num_frames + 1
# only counts whole frames
assert frame == total_rounded_frames
# have data for blank frames at end
assert len(intensity) == total_rounded_frames
assert total_rounded_frames >= num_frames
filename = str(tmp_path / 'short_stage.h5')
stage_app.app.ceed_data.save(filename=filename)
with CeedDataReader(filename) as f:
f.load_experiment(0)
shape_data = f.shapes_intensity[shape.name]
shape_data_rendered = f.shapes_intensity_rendered[shape.name]
recorded_rendered_frames = f.rendered_frames
assert shape_data.shape[0] == num_frames
assert shape_data_rendered.shape[0] == num_frames
assert len(recorded_rendered_frames) == num_frames
# for each sub-frame
gray = quad == 'QUAD12X'
r, g, b = root.color_r, root.color_g, root.color_b
for i, ((v1, v2, v3, _), (r1, g1, b1, _)) in enumerate(
zip(intensity[:num_frames], shape_data)):
# we saw the intensity we expect
val = i / (main_frames * sub_frames)
assert isclose(val, v1, abs_tol=2 / 255) if r or gray else v1 == 0
assert isclose(val, v2, abs_tol=2 / 255) if g or gray else v2 == 0
assert isclose(val, v3, abs_tol=2 / 255) if b or gray else v3 == 0
# what we saw is what is recorded
assert isclose(v1, r1, abs_tol=2 / 255)
assert isclose(v2, g1, abs_tol=2 / 255)
assert isclose(v3, b1, abs_tol=2 / 255)
assert recorded_rendered_frames[i]
assert shape_data_rendered[i, 0] == r1
assert shape_data_rendered[i, 1] == g1
assert shape_data_rendered[i, 2] == b1
# remaining frames are blank in quad mode
for r, g, b, _ in intensity[num_frames:]:
assert not r
assert not g
assert not b
| 38.70283 | 80 | 0.653971 | import os
import sys
import math
from contextlib import contextmanager
from math import isclose
import numpy as np
import pytest
import ceed
from .examples.stages import create_test_stages, make_stage, StageWrapper, \
stage_classes, assert_stages_same
from typing import Type, List, Union
from ceed.tests.ceed_app import CeedTestApp
from ceed.tests.test_app import replace_text, touch_widget, escape, \
run_plugin_experiment
from ceed.stage import CeedStage, CeedStageRef, last_experiment_stage_name
from ceed.function import CeedFuncRef, FuncBase, FuncGroup
from ceed.shape import CeedShape, CeedShapeGroup
from .examples.shapes import assert_add_three_groups, CircleShapeP1
from .examples.funcs import create_funcs, GroupFunction
from .examples.stages import fake_plugin_stage, SerialAllStage
from .examples.experiment import wait_stage_experiment_started, \
wait_experiment_done, measure_fps, wait_experiment_stopped
from .test_func import assert_func_params_in_gui, \
replace_last_ref_with_original_func, assert_funcs_same
pytestmark = pytest.mark.ceed_app
async def assert_set_params_in_gui(
stage_app: CeedTestApp, stage: StageWrapper, settings=None,
check_name=False):
opened_settings = settings is None
if opened_settings:
settings = await open_stage_settings(stage_app, stage.stage)
if check_name:
name = stage_app.resolve_widget(settings).down(
test_name='stage name')()
assert name.text != stage.name
assert name.text == stage.stage.name
await replace_text(stage_app, name, stage.name)
assert name.text == stage.name
assert name.text == stage.stage.name
for color in ('r', 'g', 'b'):
widget = stage_app.resolve_widget(settings).down(
test_name='stage color {}'.format(color))()
prop = 'color_{}'.format(color)
assert getattr(stage.stage, prop) == (widget.state == 'down')
if getattr(stage, prop) != getattr(stage.stage, prop):
await touch_widget(stage_app, widget)
assert getattr(stage.stage, prop) == (widget.state == 'down')
assert getattr(stage, prop) == getattr(stage.stage, prop)
serial = stage_app.resolve_widget(settings).down(
test_name='stage serial')()
parallel = stage_app.resolve_widget(settings).down(
test_name='stage parallel')()
assert (stage.stage.order == 'serial') == (serial.state == 'down') and \
(stage.stage.order == 'parallel') == (parallel.state == 'down')
if stage.order == 'parallel' and parallel.state != 'down':
await touch_widget(stage_app, parallel)
elif stage.order == 'serial' and serial.state != 'down':
await touch_widget(stage_app, serial)
assert (stage.stage.order == 'serial') == (serial.state == 'down') and \
(stage.stage.order == 'parallel') == (parallel.state == 'down')
assert (stage.order == 'serial') == (serial.state == 'down') and \
(stage.order == 'parallel') == (parallel.state == 'down')
all_w = stage_app.resolve_widget(settings).down(
test_name='stage finish all')()
any_w = stage_app.resolve_widget(settings).down(
test_name='stage finish any')()
assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \
(stage.stage.complete_on == 'any') == (any_w.state == 'down')
if stage.complete_on == 'all' and all_w.state != 'down':
await touch_widget(stage_app, all_w)
elif stage.complete_on == 'any' and any_w.state != 'down':
await touch_widget(stage_app, any_w)
assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \
(stage.stage.complete_on == 'any') == (any_w.state == 'down')
assert (stage.complete_on == 'all') == (all_w.state == 'down') and \
(stage.complete_on == 'any') == (any_w.state == 'down')
if opened_settings:
await escape(stage_app)
return settings
async def assert_stage_params_in_gui(
stage_app: CeedTestApp, stage: StageWrapper, settings=None,
check_name=False):
opened_settings = settings is None
if opened_settings:
settings = await open_stage_settings(stage_app, stage.stage)
if check_name:
name = stage_app.resolve_widget(settings).down(
test_name='stage name')()
name_label = stage_app.resolve_widget(stage.stage.display).down(
test_name='stage label')()
assert name.text == stage.name
assert name_label.text == stage.name
assert name.text == stage.stage.name
for color in ('r', 'g', 'b'):
widget = stage_app.resolve_widget(settings).down(
test_name='stage color {}'.format(color))()
prop = 'color_{}'.format(color)
assert getattr(stage.stage, prop) == (widget.state == 'down')
assert getattr(stage, prop) == getattr(stage.stage, prop)
serial = stage_app.resolve_widget(settings).down(
test_name='stage serial')()
parallel = stage_app.resolve_widget(settings).down(
test_name='stage parallel')()
assert (stage.stage.order == 'serial') == (serial.state == 'down') and \
(stage.stage.order == 'parallel') == (parallel.state == 'down')
all_w = stage_app.resolve_widget(settings).down(
test_name='stage finish all')()
any_w = stage_app.resolve_widget(settings).down(
test_name='stage finish any')()
assert (stage.stage.complete_on == 'all') == (all_w.state == 'down') and \
(stage.stage.complete_on == 'any') == (any_w.state == 'down')
if opened_settings:
await escape(stage_app)
return settings
async def replace_last_ref_with_original_stage(
stage_app: CeedTestApp,
stages: List[Union[CeedStageRef, CeedStage]], name: str):
start_stages = stages[:]
ref_stage = stages[-1]
assert isinstance(ref_stage, CeedStageRef)
assert ref_stage.stage.name == name
sub_stage_widget = ref_stage.display
name_w = stage_app.resolve_widget(sub_stage_widget).down(
test_name='stage label')()
assert name_w.text == name
ref_btn = stage_app.resolve_widget(sub_stage_widget).down(
test_name='stage settings open')()
await touch_widget(stage_app, ref_btn)
assert ref_stage not in stages
assert len(stages) == len(start_stages)
new_stage = stages[-1]
assert ref_stage is not new_stage
assert stages[:-1] == start_stages[:-1]
assert not isinstance(new_stage, CeedStageRef)
assert_stages_same(ref_stage.stage, new_stage)
return new_stage
async def open_stage_settings(app: CeedTestApp, stage: CeedStage):
settings_btn = app.resolve_widget(stage.display).down(
test_name='stage settings open')()
await touch_widget(app, settings_btn)
return app.resolve_widget().down(test_name='stage settings')()
async def test_stage_find_shape_in_all_stages(stage_app: CeedTestApp):
(s1, s2, s3), (group, shape1, shape2, shape3) = create_test_stages(
stage_app=stage_app, show_in_gui=True)
await stage_app.wait_clock_frames(2)
for shape in (shape1, shape2, shape3):
for stage in (s1, s2, s3):
assert shape.shape in [s.shape for s in stage.stage.shapes]
assert shape.shape in group.shapes
stage_app.app.shape_factory.remove_shape(shape2.shape)
await stage_app.wait_clock_frames(2)
for shape in (shape1, shape3):
for stage in (s1, s2, s3):
assert shape.shape in [s.shape for s in stage.stage.shapes]
assert shape.shape in group.shapes
for shape in (shape2, ):
for stage in (s1, s2, s3):
assert shape.shape not in [s.shape for s in stage.stage.shapes]
assert shape.shape not in group.shapes
stage_app.app.shape_factory.remove_shape(shape1.shape)
await stage_app.wait_clock_frames(2)
for shape in (shape3, ):
for stage in (s1, s2, s3):
assert shape.shape in [s.shape for s in stage.stage.shapes]
assert shape.shape in group.shapes
for shape in (shape2, shape1):
for stage in (s1, s2, s3):
assert shape.shape not in [s.shape for s in stage.stage.shapes]
assert shape.shape not in group.shapes
stage_app.app.shape_factory.remove_shape(shape3.shape)
await stage_app.wait_clock_frames(2)
for shape in (shape2, shape1, shape3):
for stage in (s1, s2, s3):
assert shape.shape not in [s.shape for s in stage.stage.shapes]
assert shape.shape not in group.shapes
async def test_add_empty_stage(stage_app: CeedTestApp):
stage_factory = stage_app.app.stage_factory
assert not stage_factory.stages
n = len(stage_factory.stage_names)
add_stage = stage_app.resolve_widget().down(test_name='stage add')()
await touch_widget(stage_app, add_stage)
assert stage_factory.stages
stage = stage_factory.stages[0]
assert stage in list(stage_factory.stage_names.values())
assert len(stage_factory.stage_names) == n + 1
assert stage.display.show_more
name_label = stage_app.resolve_widget(stage.display).down(
test_name='stage label')()
assert not stage.display.selected
await touch_widget(stage_app, name_label)
assert stage.display.selected
await touch_widget(stage_app, add_stage)
assert stage_factory.stages == [stage]
assert stage.display.selected
await touch_widget(stage_app, name_label)
await touch_widget(stage_app, add_stage)
assert len(stage_factory.stages) == 2
assert stage_factory.stages[0] is stage
async def test_gui_add_stages(stage_app: CeedTestApp):
stages = []
add_stage = stage_app.resolve_widget().down(test_name='stage add')()
for i, stage_cls in enumerate(stage_classes):
stage = stage_cls(app=stage_app, show_in_gui=False)
stages.append(stage)
if i >= 2:
oldest_stage = stages.pop(0)
assert oldest_stage.stage in stage_app.app.stage_factory.stages
remove_btn = stage_app.resolve_widget(
oldest_stage.stage.display).down(test_name='del btn stage')()
await touch_widget(stage_app, remove_btn)
assert oldest_stage.stage not in stage_app.app.stage_factory.stages
# add the stage
await touch_widget(stage_app, add_stage)
assert len(stage_app.app.stage_factory.stages) == min(2, i + 1)
stage.stage = stage_app.app.stage_factory.stages[-1]
# show the settings for the stage
widget = stage.stage.display
settings = await open_stage_settings(stage_app, stage.stage)
# check default name
name = stage_app.resolve_widget(settings).down(
test_name='stage name')()
assert not name.disabled, "root stages can be renamed"
name_label = stage_app.resolve_widget(widget).down(
test_name='stage label')()
original_name = name.text
assert stage.name != original_name
assert original_name == name_label.text
assert original_name in stage_app.app.stage_factory.stage_names
assert stage.name not in stage_app.app.stage_factory.stage_names
# change the stage name
await replace_text(stage_app, name, stage.name)
assert name.text == stage.name
assert name_label.text == stage.name
assert original_name not in stage_app.app.stage_factory.stage_names
assert stage.name in stage_app.app.stage_factory.stage_names
await assert_set_params_in_gui(stage_app, stage, settings)
# close the settings widget
await escape(stage_app)
async def test_gui_add_sub_stages(stage_app: CeedTestApp):
add_stage = stage_app.resolve_widget().down(test_name='stage add')()
await touch_widget(stage_app, add_stage)
base_stage: CeedStage = stage_app.app.stage_factory.stages[0]
name_label = stage_app.resolve_widget(base_stage.display).down(
test_name='stage label')()
await touch_widget(stage_app, name_label)
assert base_stage.display.selected
assert not base_stage.stages
stages = []
for i, stage_cls in enumerate(stage_classes[:4]):
stage = stage_cls(app=stage_app, show_in_gui=False)
stages.append(stage)
# don't keep more than two stages so the list is not too long
if i >= 2:
oldest_stage = stages.pop(0)
assert oldest_stage.stage in base_stage.stages
remove_btn = stage_app.resolve_widget(
oldest_stage.stage.display).down(test_name='del btn stage')()
await touch_widget(stage_app, remove_btn)
assert oldest_stage.stage not in base_stage.stages
if not base_stage.display.selected:
await touch_widget(stage_app, name_label)
assert base_stage.display.selected
await touch_widget(stage_app, add_stage)
assert len(base_stage.stages) == min(2, i + 1)
stage.stage = base_stage.stages[-1]
settings_btn = stage_app.resolve_widget(stage.stage.display).down(
test_name='stage settings open')()
await touch_widget(stage_app, settings_btn)
stage.stage = base_stage.stages[-1]
await assert_set_params_in_gui(stage_app, stage, check_name=False)
async def test_gui_drag_shape_to_stage(stage_app: CeedTestApp):
(group, group2, group3), (shape1, shape2, shape3) = \
assert_add_three_groups(
shape_factory=stage_app.app.shape_factory, app=stage_app,
show_in_gui=True)
await stage_app.wait_clock_frames(2)
(s1, s2, s3), _ = create_test_stages(
stage_app=stage_app, add_func=False, add_shapes=False)
await stage_app.wait_clock_frames(2)
for stage in (s2, s3):
container = stage.stage.display.shape_widget
shapes = stage.stage.shapes
assert not shapes
added_shapes = []
for i, shape in enumerate((shape1, group2, shape3, shape2)):
if isinstance(shape, CeedShapeGroup):
src = stage_app.resolve_widget(shape.widget).down(
test_name='group drag button')()
else:
shape = shape.shape
src = stage_app.resolve_widget(shape.widget).down(
test_name='shape drag')()
offset = (0, 5) if container.height else (0, 0)
async for _ in stage_app.do_touch_drag_follow(
widget=src, target_widget=container,
target_widget_loc=('center_x', 'y'),
target_widget_offset=offset, drag_n=15):
pass
assert len(shapes) == min(3, i + 1)
assert shape is shapes[-1].shape
name_label = stage_app.resolve_widget(shapes[-1].display).down(
test_name='stage shape name')()
assert name_label.text == shape.name
added_shapes.append(shapes[-1])
if i >= 2:
oldest_shape = added_shapes.pop(0)
assert oldest_shape in shapes
remove_btn = stage_app.resolve_widget(
oldest_shape.display).down(
test_name='stage shape del')()
await touch_widget(stage_app, remove_btn)
assert oldest_shape not in shapes
await stage_app.wait_clock_frames(2)
async def test_gui_drag_func_to_stage(stage_app: CeedTestApp):
global_funcs = create_funcs(func_app=stage_app, show_in_gui=True)
group_func: GroupFunction = global_funcs[-1]
ff1 = group_func.wrapper_funcs[0]
ff2 = group_func.wrapper_funcs[1]
global_funcs = [
(ff1, True)] + [(f, False) for f in global_funcs] + [(ff2, True)]
await stage_app.wait_clock_frames(2)
(s1, s2, s3), _ = create_test_stages(
stage_app=stage_app, add_func=False, add_shapes=False)
await stage_app.wait_clock_frames(2)
# multiple funcs
for stage in (s2, s3):
container = stage.stage.display.func_widget
functions = stage.stage.functions
assert not functions
# drag each func to the stage
added_funcs = []
for i, (func, is_sub_func) in enumerate(global_funcs):
src = stage_app.resolve_widget(func.func.display).down(
test_name='func drag btn')()
async for _ in stage_app.do_touch_drag_follow(
widget=src, target_widget=container,
target_widget_loc=('center_x', 'y'),
target_widget_offset=(0, 5)):
pass
# check that shape was added
assert len(functions) == min(3, i + 1)
assert functions[-1] is not func.func
if is_sub_func:
assert isinstance(functions[-1], (FuncBase, FuncGroup))
assert_funcs_same(functions[-1], func.func)
else:
assert isinstance(functions[-1], CeedFuncRef)
assert func.func is functions[-1].func
await replace_last_ref_with_original_func(
stage_app, functions, func.func.name)
added_funcs.append(functions[-1])
# don't keep more than two funcs so the list is not too long
if i >= 2:
oldest_func = added_funcs.pop(0)
assert oldest_func in functions
remove_btn = stage_app.resolve_widget(
oldest_func.display).down(
test_name='del_btn_func')()
await touch_widget(stage_app, remove_btn)
assert oldest_func not in functions
await stage_app.wait_clock_frames(2)
async def test_gui_drag_stage_to_stage(stage_app: CeedTestApp):
(s1, s2, s21), _ = create_test_stages(
stage_app=stage_app, show_in_gui=True, add_func=False,
add_shapes=False)
(s3, s4, s41), _ = create_test_stages(
stage_app=stage_app, show_in_gui=True, add_func=False,
add_shapes=False)
await stage_app.wait_clock_frames(2)
for stage in (s1, s21, s3):
stage.stage.display.show_more = False
await stage_app.wait_clock_frames(2)
for stage in (s4, s41):
container = stage.stage.display.stage_widget
stages = stage.stage.stages
n_start = 0 if stage is s41 else 1
assert len(stages) == n_start
added_stages = []
for i, src_stage in enumerate((s1, s2, s21, s3)):
src = stage_app.resolve_widget(src_stage.stage.display).down(
test_name='stage drag btn')()
async for _ in stage_app.do_touch_drag_follow(
widget=src, target_widget=container,
target_widget_loc=('center_x', 'y'),
target_widget_offset=(0, 5)):
pass
assert len(stages) == min(3, i + 1) + n_start
assert stages[-1] is not src_stage.stage
if src_stage is s21:
assert isinstance(stages[-1], CeedStage)
assert_stages_same(stages[-1], src_stage.stage)
else:
assert isinstance(stages[-1], CeedStageRef)
assert src_stage.stage is stages[-1].stage
await replace_last_ref_with_original_stage(
stage_app, stages, src_stage.stage.name)
added_stages.append(stages[-1])
if i >= 2:
oldest_stage = added_stages.pop(0)
assert oldest_stage in stages
remove_btn = stage_app.resolve_widget(
oldest_stage.display).down(
test_name='del btn stage')()
await touch_widget(stage_app, remove_btn)
assert oldest_stage not in stages
await stage_app.wait_clock_frames(2)
def verify_color(
stage_app, shape_color, shape2_color, frame, centers, flip, video_mode):
(cx1, cy1), (cx2, cy2) = centers
if flip:
cx1 = 1920 - cx1
cx2 = 1920 - cx2
centers = [[(cx1, cy1), (cx2, cy2)]]
if 'QUAD' in video_mode:
cx1, cx2, cy1, cy2 = cx1 // 2, cx2 // 2, cy1 // 2, cy2 // 2
corners = ((0, 540), (960, 540), (0, 0), (960, 0))
centers = [
[(cx + x, cy + y) for cx, cy in [(cx1, cy1), (cx2, cy2)]]
for x, y in corners]
if video_mode == 'QUAD12X':
# first get all 4 centers values, one for each quadrant
rgb_values = []
for i in range(4):
rgb = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, centers[i])
rgb = [[c / 255 for c in p] for p in rgb]
rgb_values.append(rgb)
# r, g, b
for plane in [0, 1, 2]:
# 4 quads
for color1, color2 in rgb_values:
assert isclose(
color1[plane], shape_color[frame][3], abs_tol=2 / 255)
assert isclose(
color2[plane], shape2_color[frame][3], abs_tol=2 / 255)
frame += 1
else:
n_sub_frames = 1
if video_mode == 'QUAD4X':
n_sub_frames = 4
for i in range(n_sub_frames):
points = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, centers[i])
points = [[c / 255 for c in p] for p in points]
(r1, g1, b1, _), (r2, g2, b2, _) = points
val = shape_color[frame]
assert isclose(r1, val[3], abs_tol=2 / 255) if val[0] else r1 == 0
assert isclose(g1, val[3], abs_tol=2 / 255) if val[1] else g1 == 0
assert isclose(b1, val[3], abs_tol=2 / 255) if val[2] else b1 == 0
val = shape2_color[frame]
assert isclose(r2, val[3], abs_tol=2 / 255) if val[0] else r2 == 0
assert isclose(g2, val[3], abs_tol=2 / 255) if val[1] else g2 == 0
assert isclose(b2, val[3], abs_tol=2 / 255) if val[2] else b2 == 0
frame += 1
return frame
@pytest.mark.parametrize('video_mode', ['RGB', 'QUAD4X', 'QUAD12X'])
@pytest.mark.parametrize(
'flip,skip', [(True, False), (False, True), (False, False)])
async def test_recursive_play_stage_intensity(
stage_app: CeedTestApp, tmp_path, flip, skip, video_mode):
from ..test_stages import create_recursive_stages
from .examples.shapes import CircleShapeP1, CircleShapeP2
from kivy.clock import Clock
from ceed.analysis import CeedDataReader
root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(
stage_app.app.stage_factory, app=stage_app)
from ceed.function.plugin import LinearFunc
for i, stage in enumerate((s1, s2, s3, s4, s5, s6)):
stage.stage.add_func(LinearFunc(
function_factory=stage_app.app.function_factory, b=0, m=.5,
duration=(i % 2 + 1) * 1))
shape = CircleShapeP1(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
shape2 = CircleShapeP2(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
s1.stage.add_shape(shape.shape)
s4.stage.add_shape(shape.shape)
s5.stage.add_shape(shape.shape)
s2.stage.add_shape(shape2.shape)
s3.stage.add_shape(shape2.shape)
s6.stage.add_shape(shape2.shape)
root.show_in_gui()
await stage_app.wait_clock_frames(2)
frame = 0
event = None
# make GPU too slow to force skipping frames, when enabled
fps = await measure_fps(stage_app) + 10
rate = stage_app.app.view_controller.frame_rate = fps
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.flip_projector = flip
stage_app.app.view_controller.skip_estimated_missed_frames = skip
stage_app.app.view_controller.video_mode = video_mode
stage_app.app.view_controller.pad_to_stage_handshake = False
n_sub_frames = 1
if video_mode == 'QUAD4X':
n_sub_frames = 4
elif video_mode == 'QUAD12X':
n_sub_frames = 12
centers = shape.center, shape2.center
num_frames = rate * n_sub_frames * (2 + 1 + 2 + 1)
shape_color = [(False, False, False, 0.), ] * num_frames
shape2_color = [(False, False, False, 0.), ] * num_frames
skipped_frame_indices = set()
n_missed_frames = 0
for s, start, e in [(s1, 0, 1), (s4, 3, 5), (s5, 5, 6)]:
for i in range(start * rate * n_sub_frames, e * rate * n_sub_frames):
val = (i - start * rate * n_sub_frames) / (rate * n_sub_frames) * .5
shape_color[i] = s.color_r, s.color_g, s.color_b, val
for s, start, e in [(s2, 0, 2), (s3, 2, 3), (s6, 5, 6)]:
for i in range(start * rate * n_sub_frames, e * rate * n_sub_frames):
val = (i - start * rate * n_sub_frames) / (rate * n_sub_frames) * .5
shape2_color[i] = s.color_r, s.color_g, s.color_b, val
def verify_intensity(*largs):
nonlocal frame, n_missed_frames
# total frames is a multiple of n_sub_frames
if not stage_app.app.view_controller.stage_active:
assert stage_app.app.view_controller.count - 1 == num_frames
if skip:
# last frame could be passed actual frames
assert frame - n_missed_frames * n_sub_frames <= num_frames
else:
assert frame == num_frames
event.cancel()
return
# not yet started
if not stage_app.app.view_controller.count:
return
# some frame may have been skipped, but num_frames is max frames
# This callback happens after frame callback and after the frame flip.
# This also means we record even the last skipped frames (if skipped)
assert frame < num_frames
frame = verify_color(
stage_app, shape_color, shape2_color, frame, centers, flip,
video_mode)
assert stage_app.app.view_controller.count == frame
if skip:
# some frames may have been dropped for next frame
n_missed_frames = stage_app.app.view_controller._n_missed_frames
for k in range(n_missed_frames * n_sub_frames):
# frame is next frame index, next frame is skipped
skipped_frame_indices.add(frame)
frame += 1
else:
assert not stage_app.app.view_controller._n_missed_frames
event = Clock.create_trigger(verify_intensity, timeout=0, interval=True)
event()
stage_app.app.view_controller.request_stage_start(root.name)
await wait_experiment_done(stage_app, timeout=num_frames / rate * 50)
await wait_experiment_stopped(stage_app)
filename = str(tmp_path / 'recursive_play_stage_intensity.h5')
stage_app.app.ceed_data.save(filename=filename)
f = CeedDataReader(filename)
f.open_h5()
assert f.experiments_in_file == ['0']
assert not f.num_images_in_file
f.load_experiment(0)
shape_data = f.shapes_intensity[shape.name]
shape_data_rendered = f.shapes_intensity_rendered[shape.name]
shape2_data = f.shapes_intensity[shape2.name]
shape2_data_rendered = f.shapes_intensity_rendered[shape2.name]
recorded_rendered_frames = f.rendered_frames
# even when skipping, skipped frames are still logged but they are removed
# in xxx_rendered arrays
if skip:
# because frame rate is high, we'll definitely drop frames
assert skipped_frame_indices
else:
assert not skipped_frame_indices
assert shape_data.shape[0] == num_frames
assert shape2_data.shape[0] == num_frames
n_skipped = len(skipped_frame_indices)
if skip:
assert num_frames - n_skipped <= shape_data_rendered.shape[0] \
<= num_frames - n_skipped + n_sub_frames * n_missed_frames
assert num_frames - n_skipped <= shape2_data_rendered.shape[0] \
<= num_frames - n_skipped + n_sub_frames * n_missed_frames
else:
assert shape_data_rendered.shape[0] == num_frames
assert shape2_data_rendered.shape[0] == num_frames
gray = video_mode == 'QUAD12X'
i = 0
k = 0
for (r, g, b, val), (r1, g1, b1, _) in zip(shape_color, shape_data):
assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0
assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0
assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0
if skip:
assert recorded_rendered_frames[k] \
== (k not in skipped_frame_indices)
else:
assert recorded_rendered_frames[k]
if k not in skipped_frame_indices:
r1, g1, b1, _ = shape_data_rendered[i, :]
assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0
assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0
assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0
i += 1
k += 1
i = 0
k = 0
for (r, g, b, val), (r1, g1, b1, _) in zip(shape2_color, shape2_data):
assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0
assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0
assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0
if skip:
assert recorded_rendered_frames[k] \
== (k not in skipped_frame_indices)
else:
assert recorded_rendered_frames[k]
if k not in skipped_frame_indices:
r1, g1, b1, _ = shape2_data_rendered[i, :]
assert isclose(val, r1, abs_tol=2 / 255) if r or gray else r1 == 0
assert isclose(val, g1, abs_tol=2 / 255) if g or gray else g1 == 0
assert isclose(val, b1, abs_tol=2 / 255) if b or gray else b1 == 0
i += 1
k += 1
f.close_h5()
async def test_moat_stage_shapes(stage_app: CeedTestApp, tmp_path):
from ..test_stages import create_recursive_stages
from .examples.shapes import CircleShapeP1, CircleShapeP1Internal
from ceed.function.plugin import ConstFunc
from ceed.analysis import CeedDataReader
root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(
stage_app.app.stage_factory, app=stage_app)
s1.stage.color_r = False
s1.stage.color_g = False
s1.stage.color_b = True
s2.stage.color_r = True
s2.stage.color_g = False
s2.stage.color_b = True
shape = CircleShapeP1(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
internal_shape = CircleShapeP1Internal(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
s1.stage.add_func(ConstFunc(
function_factory=stage_app.app.function_factory, a=1, duration=5))
s1.stage.add_shape(internal_shape.shape)
s2.stage.add_func(ConstFunc(
function_factory=stage_app.app.function_factory, a=1, duration=5))
s2.stage.add_shape(shape.shape)
root.show_in_gui()
await stage_app.wait_clock_frames(2)
stage_app.app.view_controller.frame_rate = 10
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.flip_projector = False
stage_app.app.view_controller.request_stage_start(root.name)
await wait_stage_experiment_started(stage_app)
assert stage_app.app.view_controller.stage_active
points = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, [internal_shape.center, shape.center])
(r1, g1, b1, _), (r2, g2, b2, _) = points
assert r1 == 0
assert g1 == 0
assert b1 == 255
assert r2 == 255
assert g2 == 0
assert b2 == 255
stage_app.app.view_controller.request_stage_end()
await stage_app.wait_clock_frames(2)
assert not stage_app.app.view_controller.stage_active
stage_app.app.shape_factory.move_shape_upwards(shape.shape)
await stage_app.wait_clock_frames(2)
stage_app.app.view_controller.request_stage_start(root.name)
await wait_stage_experiment_started(stage_app)
assert stage_app.app.view_controller.stage_active
points = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, [internal_shape.center, shape.center])
(r1, g1, b1, _), (r2, g2, b2, _) = points
assert r1 == 255
assert g1 == 0
assert b1 == 255
assert r2 == 255
assert g2 == 0
assert b2 == 255
stage_app.app.view_controller.request_stage_end()
await stage_app.wait_clock_frames(2)
await wait_experiment_stopped(stage_app)
filename = str(tmp_path / 'moat_stage_shapes.h5')
stage_app.app.ceed_data.save(filename=filename)
f = CeedDataReader(filename)
f.open_h5()
assert f.experiments_in_file == ['0', '1']
assert not f.num_images_in_file
f.load_experiment(0)
assert tuple(np.array(f.shapes_intensity[shape.name])[0, :3]) == (1, 0, 1)
assert tuple(
np.array(f.shapes_intensity[internal_shape.name])[0, :3]) == (0, 0, 1)
f.load_experiment(1)
assert tuple(np.array(f.shapes_intensity[shape.name])[0, :3]) == (1, 0, 1)
assert tuple(
np.array(f.shapes_intensity[internal_shape.name])[0, :3]) == (0, 0, 1)
f.close_h5()
async def test_moat_single_stage_shapes(stage_app: CeedTestApp, tmp_path):
from ..test_stages import create_recursive_stages
from .examples.shapes import CircleShapeP1, CircleShapeP1Internal
from ceed.function.plugin import ConstFunc
from ceed.analysis import CeedDataReader
root, g1, g2, s1, s2, s3, s4, s5, s6 = create_recursive_stages(
stage_app.app.stage_factory, app=stage_app)
s1.stage.color_r = False
s1.stage.color_g = False
s1.stage.color_b = True
shape = CircleShapeP1(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
internal_shape = CircleShapeP1Internal(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
s1.stage.add_func(ConstFunc(
function_factory=stage_app.app.function_factory, a=1, duration=5))
stage_shape = s1.stage.add_shape(internal_shape.shape)
s1.stage.add_shape(shape.shape)
stage_shape.keep_dark = True
root.show_in_gui()
await stage_app.wait_clock_frames(2)
stage_app.app.view_controller.frame_rate = 10
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.flip_projector = False
stage_app.app.view_controller.request_stage_start(root.name)
await wait_stage_experiment_started(stage_app)
assert stage_app.app.view_controller.stage_active
points = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, [internal_shape.center, shape.center])
(r1, g1, b1, _), (r2, g2, b2, _) = points
assert r1 == 0
assert g1 == 0
assert b1 == 0
assert r2 == 0
assert g2 == 0
assert b2 == 255
stage_app.app.view_controller.request_stage_end()
await stage_app.wait_clock_frames(2)
assert not stage_app.app.view_controller.stage_active
await wait_experiment_stopped(stage_app)
filename = str(tmp_path / 'moat_single_stage_shapes.h5')
stage_app.app.ceed_data.save(filename=filename)
f = CeedDataReader(filename)
f.open_h5()
assert f.experiments_in_file == ['0', ]
assert not f.num_images_in_file
f.load_experiment(0)
assert tuple(np.array(f.shapes_intensity[shape.name])[0]) == (0, 0, 1, 1)
assert tuple(
np.array(f.shapes_intensity[internal_shape.name])[0]) == (0, 0, 0, 1)
f.close_h5()
@pytest.mark.parametrize('func', [True, False])
async def test_event_data_empty(stage_app: CeedTestApp, tmp_path, func):
from ..test_stages import create_2_shape_stage
from ceed.function.plugin import ConstFunc
from ceed.analysis import CeedDataReader
root, s1, s2, shape1, shape2 = create_2_shape_stage(
stage_app.app.stage_factory, show_in_gui=True, app=stage_app)
s1.stage.name = 'test stage'
if func:
s1.stage.add_func(ConstFunc(
function_factory=stage_app.app.function_factory, duration=0))
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.skip_estimated_missed_frames = False
stage_app.app.view_controller.frame_rate = 10
stage_app.app.view_controller.request_stage_start(root.name)
await wait_experiment_done(stage_app, timeout=180)
await wait_experiment_stopped(stage_app)
filename = str(tmp_path / 'event_data_empty.h5')
stage_app.app.ceed_data.save(filename=filename)
if func:
order = (0, 1, 3, 2), (2, 1, 3, 0)
else:
order = (0, 1, 2), (1, 2, 0)
loops = [
[0, i, 'start' + s, [0, ] * 2] for i in order[0] for s in ('', '_loop')
]
loops += [
[0, i, 'end' + s, [0, ] * 2] for i in order[1] for s in ('_loop', '')
]
with CeedDataReader(filename) as f:
f.load_experiment(0)
events = [d[:-1] + [d[-1][:-1], ] for d in f.event_data]
assert loops == events
s = f.experiment_stage.stages[0]
for kw in [{'ceed_id': s.ceed_id}, {'ceed_name': s1.stage.name},
{'ceed_obj': s}]:
items = f.format_event_data(event='start_loop', **kw)
assert len(items) == 1
assert items[0][:5] == [0, s, 'start_loop', 0, 0]
items = f.format_event_data(**kw)
assert len(items) == 4
for item, val in zip(
items, ['start', 'start_loop', 'end_loop', 'end']):
assert item[:5] == [0, s, val, 0, 0]
@pytest.mark.parametrize(
'quad,sub_frames', [('RGB', 1), ('QUAD4X', 4), ('QUAD12X', 12)])
@pytest.mark.parametrize('skip', [False, True])
async def test_pad_stage_ticks(
stage_app: CeedTestApp, tmp_path, quad, sub_frames, skip):
from ceed.analysis import CeedDataReader
root = SerialAllStage(
stage_factory=stage_app.app.stage_factory, show_in_gui=False,
app=stage_app, create_add_to_parent=True)
shape = CircleShapeP1(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
root.stage.add_shape(shape.shape)
root.show_in_gui()
await stage_app.wait_clock_frames(2)
stage_app.app.view_controller.frame_rate = await measure_fps(stage_app) + 10
stage_app.app.view_controller.skip_estimated_missed_frames = skip
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.video_mode = quad
stage_app.app.view_controller.pad_to_stage_handshake = False
stage_app.app.view_controller.request_stage_start(root.name)
await wait_experiment_done(stage_app)
stage_app.app.view_controller.pad_to_stage_handshake = True
stage_app.app.view_controller.request_stage_start(root.name)
await wait_experiment_done(stage_app, 300)
await wait_experiment_stopped(stage_app)
filename = str(tmp_path / 'pad_stage_ticks.h5')
stage_app.app.ceed_data.save(filename=filename)
f = CeedDataReader(filename)
f.open_h5()
assert f.experiments_in_file == ['0', '1']
assert not f.num_images_in_file
f.load_experiment('0')
assert f.shapes_intensity[shape.name].shape == (0, 4)
f.load_experiment('1')
assert f.shapes_intensity[shape.name].shape == (
stage_app.app.data_serializer.num_ticks_handshake(16, sub_frames), 4)
assert f.shapes_intensity[shape.name].shape == (
stage_app.app.data_serializer.num_ticks_handshake(16, 1) * sub_frames,
4)
frame_time_counter = np.asarray(f._block.data_arrays['frame_time_counter'])
frame_time = np.asarray(f._block.data_arrays['frame_time'])
rendered_frames_bool = f.rendered_frames
assert len(frame_time_counter) == len(frame_time)
assert np.sum(rendered_frames_bool) == len(frame_time_counter) * sub_frames
frame_counter = np.asarray(f._block.data_arrays['frame_counter'])
n = f.shapes_intensity[shape.name].shape[0]
if skip:
assert sub_frames * len(frame_time_counter) < n
else:
assert sub_frames * len(frame_time_counter) == n
rendered_indices = np.arange(0, n, sub_frames)
if skip:
assert len(frame_time_counter) < len(frame_counter) // sub_frames
assert len(rendered_indices) > len(frame_time_counter)
else:
assert len(frame_time_counter) == len(frame_counter) // sub_frames
assert len(rendered_indices) == len(frame_time_counter)
assert np.all(np.arange(1, n + 1) == frame_counter)
# count recorded is last sub-frame
if skip:
assert np.all(
np.isin(frame_time_counter, rendered_indices + sub_frames))
assert np.all(frame_time_counter[1:] - frame_time_counter[:-1] > 0)
assert np.all(np.isin(
frame_time_counter,
frame_counter[rendered_indices + sub_frames - 1]))
else:
assert np.all(frame_time_counter == rendered_indices + sub_frames)
assert np.all(
frame_counter[rendered_indices + sub_frames - 1]
== frame_time_counter)
f.close_h5()
@contextmanager
def add_to_path(tmp_path, *args):
sys.path.append(str(tmp_path))
mod = tmp_path / 'my_gui_stage_plugin' / '__init__.py'
try:
mod.parent.mkdir()
mod.write_text(fake_plugin_stage)
yield None
finally:
sys.path.remove(str(tmp_path))
if 'my_gui_stage_plugin' in sys.modules:
del sys.modules['my_gui_stage_plugin']
@pytest.mark.parametrize(
"ceed_app",
[{'yaml_config': {
'external_stage_plugin_package': 'my_gui_stage_plugin',
'view': {'teensy_frame_estimation': {'use_teensy': False}}},
'app_context': add_to_path}, ],
indirect=True
)
@pytest.mark.parametrize('external', [False, True])
async def test_external_plugin_named_package(
stage_app: CeedTestApp, tmp_path, external):
stage_factory = stage_app.app.stage_factory
assert 'FakeStage' in stage_factory.stages_cls
stage = SerialAllStage(
stage_factory=stage_factory, show_in_gui=True, app=stage_app,
create_add_to_parent=False, stage_cls=stage_factory.get('FakeStage'))
stage.stage.val = 13
await run_plugin_experiment(stage_app, tmp_path, external, stage=stage)
assert stage_factory.stage_names[last_experiment_stage_name].val == 13
@pytest.mark.parametrize(
'quad,sub_frames', [('RGB', 1), ('QUAD4X', 4), ('QUAD12X', 12)])
@pytest.mark.parametrize('main_frames', [1, 1.5, 2])
async def test_short_stage(
stage_app: CeedTestApp, tmp_path, quad, sub_frames, main_frames):
from ceed.analysis import CeedDataReader
from ceed.function.plugin import LinearFunc
from kivy.clock import Clock
num_frames = int(math.ceil(main_frames * sub_frames))
rate = main_frames
root = SerialAllStage(
stage_factory=stage_app.app.stage_factory, show_in_gui=False,
app=stage_app, create_add_to_parent=True)
shape = CircleShapeP1(
app=None, painter=stage_app.app.shape_factory, show_in_gui=True)
root.stage.add_shape(shape.shape)
root.stage.add_func(LinearFunc(
function_factory=stage_app.app.function_factory, b=0, m=1,
duration=1))
root.show_in_gui()
await stage_app.wait_clock_frames(2)
# use a larger frame rate so we have to drop frames
stage_app.app.view_controller.frame_rate = rate
stage_app.app.view_controller.use_software_frame_rate = False
stage_app.app.view_controller.video_mode = quad
stage_app.app.view_controller.pad_to_stage_handshake = False
stage_app.app.view_controller.flip_projector = False
frame = 0
event = None
cx, cy = shape.shape.centroid
if sub_frames == 1:
centers = [(cx, cy)]
else:
cx1, cy1 = cx // 2, cy // 2
corners = ((0, 540), (960, 540), (0, 0), (960, 0))
centers = [(cx1 + x, cy1 + y) for x, y in corners]
intensity = []
total_rounded_frames = math.ceil(main_frames) * sub_frames
def verify_intensity(*largs):
nonlocal frame
if not stage_app.app.view_controller.stage_active:
event.cancel()
return
# not yet started
if not stage_app.app.view_controller.count:
return
assert frame < num_frames
rgb = stage_app.get_widget_pos_pixel(
stage_app.app.shape_factory, centers)
rgb = [[c / 255 for c in p] for p in rgb]
if sub_frames == 12:
for plane in range(3):
for point in rgb:
value = point[plane]
intensity.append((value, value, value, 1))
else:
intensity.extend(rgb)
frame += sub_frames
assert frame in (
stage_app.app.view_controller.count, total_rounded_frames)
assert not stage_app.app.view_controller._n_missed_frames
event = Clock.create_trigger(verify_intensity, timeout=0, interval=True)
event()
stage_app.app.view_controller.request_stage_start(root.name)
await wait_experiment_done(stage_app, timeout=50)
await wait_experiment_stopped(stage_app)
assert stage_app.app.view_controller.count == num_frames + 1
# only counts whole frames
assert frame == total_rounded_frames
# have data for blank frames at end
assert len(intensity) == total_rounded_frames
assert total_rounded_frames >= num_frames
filename = str(tmp_path / 'short_stage.h5')
stage_app.app.ceed_data.save(filename=filename)
with CeedDataReader(filename) as f:
f.load_experiment(0)
shape_data = f.shapes_intensity[shape.name]
shape_data_rendered = f.shapes_intensity_rendered[shape.name]
recorded_rendered_frames = f.rendered_frames
assert shape_data.shape[0] == num_frames
assert shape_data_rendered.shape[0] == num_frames
assert len(recorded_rendered_frames) == num_frames
# for each sub-frame
gray = quad == 'QUAD12X'
r, g, b = root.color_r, root.color_g, root.color_b
for i, ((v1, v2, v3, _), (r1, g1, b1, _)) in enumerate(
zip(intensity[:num_frames], shape_data)):
# we saw the intensity we expect
val = i / (main_frames * sub_frames)
assert isclose(val, v1, abs_tol=2 / 255) if r or gray else v1 == 0
assert isclose(val, v2, abs_tol=2 / 255) if g or gray else v2 == 0
assert isclose(val, v3, abs_tol=2 / 255) if b or gray else v3 == 0
# what we saw is what is recorded
assert isclose(v1, r1, abs_tol=2 / 255)
assert isclose(v2, g1, abs_tol=2 / 255)
assert isclose(v3, b1, abs_tol=2 / 255)
assert recorded_rendered_frames[i]
assert shape_data_rendered[i, 0] == r1
assert shape_data_rendered[i, 1] == g1
assert shape_data_rendered[i, 2] == b1
# remaining frames are blank in quad mode
for r, g, b, _ in intensity[num_frames:]:
assert not r
assert not g
assert not b
| true | true |
f735c01bc52db529a6823dbff4e72eb236525344 | 1,495 | py | Python | python/examples/detection/ttfnet_darknet53_1x_coco/test_client.py | BeyondYourself/Serving | 5114f5ce513d8d37890ef3678cef08fb2ca861af | [
"Apache-2.0"
] | null | null | null | python/examples/detection/ttfnet_darknet53_1x_coco/test_client.py | BeyondYourself/Serving | 5114f5ce513d8d37890ef3678cef08fb2ca861af | [
"Apache-2.0"
] | null | null | null | python/examples/detection/ttfnet_darknet53_1x_coco/test_client.py | BeyondYourself/Serving | 5114f5ce513d8d37890ef3678cef08fb2ca861af | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy as np
from paddle_serving_client import Client
from paddle_serving_app.reader import *
import cv2
preprocess = DetectionSequential([
DetectionFile2Image(),
DetectionResize(
(512, 512), False, interpolation=cv2.INTER_LINEAR),
DetectionNormalize([123.675, 116.28, 103.53], [58.395, 57.12, 57.375], False),
DetectionTranspose((2,0,1))
])
postprocess = RCNNPostprocess("label_list.txt", "output")
client = Client()
client.load_client_config("serving_client/serving_client_conf.prototxt")
client.connect(['127.0.0.1:9494'])
im, im_info = preprocess(sys.argv[1])
fetch_map = client.predict(
feed={
"image": im,
"im_shape": np.array(list(im.shape[1:])).reshape(-1),
"scale_factor": im_info['scale_factor'],
},
fetch=["save_infer_model/scale_0.tmp_1"],
batch=False)
print(fetch_map)
| 32.5 | 86 | 0.718395 |
import sys
import numpy as np
from paddle_serving_client import Client
from paddle_serving_app.reader import *
import cv2
preprocess = DetectionSequential([
DetectionFile2Image(),
DetectionResize(
(512, 512), False, interpolation=cv2.INTER_LINEAR),
DetectionNormalize([123.675, 116.28, 103.53], [58.395, 57.12, 57.375], False),
DetectionTranspose((2,0,1))
])
postprocess = RCNNPostprocess("label_list.txt", "output")
client = Client()
client.load_client_config("serving_client/serving_client_conf.prototxt")
client.connect(['127.0.0.1:9494'])
im, im_info = preprocess(sys.argv[1])
fetch_map = client.predict(
feed={
"image": im,
"im_shape": np.array(list(im.shape[1:])).reshape(-1),
"scale_factor": im_info['scale_factor'],
},
fetch=["save_infer_model/scale_0.tmp_1"],
batch=False)
print(fetch_map)
| true | true |
f735c02fc48a16ad8263f2a86d856d6bf5e86980 | 58 | py | Python | zad4_2.py | kamilhabrych/python-semestr5-lista4 | 8370b187130cd9cfef51d36459a9f3b49cefa949 | [
"MIT"
] | null | null | null | zad4_2.py | kamilhabrych/python-semestr5-lista4 | 8370b187130cd9cfef51d36459a9f3b49cefa949 | [
"MIT"
] | null | null | null | zad4_2.py | kamilhabrych/python-semestr5-lista4 | 8370b187130cd9cfef51d36459a9f3b49cefa949 | [
"MIT"
] | null | null | null | napis = str(input("Podaj napis:"))
print(napis.count('a')) | 29 | 34 | 0.672414 | napis = str(input("Podaj napis:"))
print(napis.count('a')) | true | true |
f735c16494901b088e2dd2495a483bc7fb16638c | 2,202 | py | Python | src/mail.py | ishritam/Company-info-auto-mail-sender | 27427f8e126386f1a202dda2addba70f74c322a1 | [
"MIT"
] | null | null | null | src/mail.py | ishritam/Company-info-auto-mail-sender | 27427f8e126386f1a202dda2addba70f74c322a1 | [
"MIT"
] | null | null | null | src/mail.py | ishritam/Company-info-auto-mail-sender | 27427f8e126386f1a202dda2addba70f74c322a1 | [
"MIT"
] | null | null | null | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from datetime import datetime
import logging
import json
with open('config.json', 'r') as config:
params = json.load(config)["params"]
# params
log_dir = params["log_dir"]
def send_mail(Mail_sender_id, Mail_sender_password, receiver_mail_id, message_to_send, company, file):
try:
#The mail addresses and password
sender_address = Mail_sender_id
sender_pass = Mail_sender_password
receiver_address = receiver_mail_id
mail_content = message_to_send
#Setup the MIME
message = MIMEMultipart()
message['From'] = sender_address
message['To'] = receiver_address
message['Subject'] = f'Information regarding {company}'
#The subject line
#The body and the attachments for the mail
message.attach(MIMEText(mail_content, 'plain'))
attach_file_name = file
attach_file = open(attach_file_name, 'rb') # Open the file as binary mode
payload = MIMEBase('application', 'octate-stream')
payload.set_payload((attach_file).read())
encoders.encode_base64(payload) #encode the attachment
#add payload header with filename
payload.add_header('Content-Decomposition', 'attachment', filename=attach_file_name)
message.attach(payload)
#Create SMTP session for sending the mail
session = smtplib.SMTP('smtp.gmail.com', 587) #use gmail with port
session.starttls() #enable security
session.login(sender_address, sender_pass) #login with mail_id and password
text = message.as_string()
session.sendmail(sender_address, receiver_address, text)
session.quit()
logging.basicConfig(filename=log_dir + 'clean_log_' + datetime.today().strftime("%d_%m_%Y"), filemode='w', format='%(asctime)s - %(message)s', level=logging.INFO)
logging.warning('Mail Sent')
print('Mail Sent')
except Exception as e:
print(f'Could not send mail : {e}')
logging.warning(f'Mail could not Sent {e}') | 40.036364 | 170 | 0.683924 | import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
from datetime import datetime
import logging
import json
with open('config.json', 'r') as config:
params = json.load(config)["params"]
log_dir = params["log_dir"]
def send_mail(Mail_sender_id, Mail_sender_password, receiver_mail_id, message_to_send, company, file):
try:
sender_address = Mail_sender_id
sender_pass = Mail_sender_password
receiver_address = receiver_mail_id
mail_content = message_to_send
message = MIMEMultipart()
message['From'] = sender_address
message['To'] = receiver_address
message['Subject'] = f'Information regarding {company}'
message.attach(MIMEText(mail_content, 'plain'))
attach_file_name = file
attach_file = open(attach_file_name, 'rb')
payload = MIMEBase('application', 'octate-stream')
payload.set_payload((attach_file).read())
encoders.encode_base64(payload)
payload.add_header('Content-Decomposition', 'attachment', filename=attach_file_name)
message.attach(payload)
session = smtplib.SMTP('smtp.gmail.com', 587)
session.starttls()
session.login(sender_address, sender_pass)
text = message.as_string()
session.sendmail(sender_address, receiver_address, text)
session.quit()
logging.basicConfig(filename=log_dir + 'clean_log_' + datetime.today().strftime("%d_%m_%Y"), filemode='w', format='%(asctime)s - %(message)s', level=logging.INFO)
logging.warning('Mail Sent')
print('Mail Sent')
except Exception as e:
print(f'Could not send mail : {e}')
logging.warning(f'Mail could not Sent {e}') | true | true |
f735c18a5eec3c534465aea56ee3b6ce5c668fe6 | 475 | py | Python | lib/extensions/__init__.py | renebentes/JoomlaProjects | 30c07041b90fc51e4de3fb544b378854f71db2f1 | [
"MIT"
] | 1 | 2015-04-19T10:36:18.000Z | 2015-04-19T10:36:18.000Z | lib/extensions/__init__.py | renebentes/JoomlaProjects | 30c07041b90fc51e4de3fb544b378854f71db2f1 | [
"MIT"
] | null | null | null | lib/extensions/__init__.py | renebentes/JoomlaProjects | 30c07041b90fc51e4de3fb544b378854f71db2f1 | [
"MIT"
] | 1 | 2015-01-14T02:10:55.000Z | 2015-01-14T02:10:55.000Z | # coding: utf-8
import sublime
st_version = int(sublime.version())
if st_version > 3000:
from JoomlaPack.lib.extensions.component import Component
from JoomlaPack.lib.extensions.package import Package
from JoomlaPack.lib.extensions.plugin import Plugin
else:
from lib.extensions.component import Component
from lib.extensions.package import Package
from lib.extensions.plugin import Plugin
__all__ = [
'Component',
'Package',
'Plugin'
]
| 25 | 61 | 0.747368 |
import sublime
st_version = int(sublime.version())
if st_version > 3000:
from JoomlaPack.lib.extensions.component import Component
from JoomlaPack.lib.extensions.package import Package
from JoomlaPack.lib.extensions.plugin import Plugin
else:
from lib.extensions.component import Component
from lib.extensions.package import Package
from lib.extensions.plugin import Plugin
__all__ = [
'Component',
'Package',
'Plugin'
]
| true | true |
f735c3ab5d7a3453ace6640fe7bb19ef260fc045 | 145 | py | Python | kiri_search/__init__.py | kiri-ai/kiri-search | 78a0f78b11b73cca8934054498d5713773d3e93a | [
"Apache-2.0"
] | null | null | null | kiri_search/__init__.py | kiri-ai/kiri-search | 78a0f78b11b73cca8934054498d5713773d3e93a | [
"Apache-2.0"
] | null | null | null | kiri_search/__init__.py | kiri-ai/kiri-search | 78a0f78b11b73cca8934054498d5713773d3e93a | [
"Apache-2.0"
] | null | null | null | from .core import Kiri
from .search import ElasticDocStore, Document, ChunkedDocument, ElasticDocument, ElasticChunkedDocument, InMemoryDocStore
| 48.333333 | 121 | 0.862069 | from .core import Kiri
from .search import ElasticDocStore, Document, ChunkedDocument, ElasticDocument, ElasticChunkedDocument, InMemoryDocStore
| true | true |
f735c414bba1419088affd6eca51ac71316e0dd2 | 2,542 | py | Python | tests/python/gaia-ui-tests/gaiatest/apps/email/regions/new_email.py | dcoloma/gaia | e542540cfa64c31ac8bb71b1a9cf16879fc9d58e | [
"Apache-2.0"
] | 1 | 2017-09-09T12:55:56.000Z | 2017-09-09T12:55:56.000Z | tests/python/gaia-ui-tests/gaiatest/apps/email/regions/new_email.py | dcoloma/gaia | e542540cfa64c31ac8bb71b1a9cf16879fc9d58e | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/apps/email/regions/new_email.py | dcoloma/gaia | e542540cfa64c31ac8bb71b1a9cf16879fc9d58e | [
"Apache-2.0"
] | null | null | null | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from marionette.by import By
from gaiatest.apps.base import Base
class NewEmail(Base):
# Write new email
_view_locator = (By.CSS_SELECTOR, '#cardContainer .card-compose')
_to_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .cmp-to-text.cmp-addr-text')
_cc_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .cmp-cc-text.cmp-addr-text')
_bcc_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .cmp-bcc-text.cmp-addr-text')
_subject_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .cmp-subject-text')
_body_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .cmp-body-text')
_send_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .icon.icon-send')
def __init__(self, marionette):
Base.__init__(self, marionette)
view = self.marionette.find_element(*self._view_locator)
self.wait_for_condition(lambda m: view.location['x'] == 0)
def type_to(self, value):
self.marionette.find_element(*self._to_locator).tap()
self.wait_for_condition(lambda m: self.keyboard.is_displayed())
self.keyboard.send(value)
self.keyboard.dismiss()
def type_cc(self, value):
self.marionette.find_element(*self._cc_locator).tap()
self.wait_for_condition(lambda m: self.keyboard.is_displayed())
self.keyboard.send(value)
self.keyboard.dismiss()
def type_bcc(self, value):
self.marionette.find_element(*self._bcc_locator).tap()
self.wait_for_condition(lambda m: self.keyboard.is_displayed())
self.keyboard.send(value)
self.keyboard.dismiss()
def type_subject(self, value):
self.marionette.find_element(*self._subject_locator).tap()
self.wait_for_condition(lambda m: self.keyboard.is_displayed())
self.keyboard.send(value)
self.keyboard.dismiss()
def type_body(self, value):
self.marionette.find_element(*self._body_locator).tap()
self.wait_for_condition(lambda m: self.keyboard.is_displayed())
self.keyboard.send(value)
self.keyboard.dismiss()
def tap_send(self):
self.marionette.find_element(*self._send_locator).tap()
from gaiatest.apps.email.app import Email
email = Email(self.marionette)
email.wait_for_message_list()
return email
| 41.672131 | 95 | 0.696302 |
from marionette.by import By
from gaiatest.apps.base import Base
class NewEmail(Base):
_view_locator = (By.CSS_SELECTOR, '#cardContainer .card-compose')
_to_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .cmp-to-text.cmp-addr-text')
_cc_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .cmp-cc-text.cmp-addr-text')
_bcc_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .cmp-bcc-text.cmp-addr-text')
_subject_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .cmp-subject-text')
_body_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .cmp-body-text')
_send_locator = (By.CSS_SELECTOR, '#cardContainer .card.center .icon.icon-send')
def __init__(self, marionette):
Base.__init__(self, marionette)
view = self.marionette.find_element(*self._view_locator)
self.wait_for_condition(lambda m: view.location['x'] == 0)
def type_to(self, value):
self.marionette.find_element(*self._to_locator).tap()
self.wait_for_condition(lambda m: self.keyboard.is_displayed())
self.keyboard.send(value)
self.keyboard.dismiss()
def type_cc(self, value):
self.marionette.find_element(*self._cc_locator).tap()
self.wait_for_condition(lambda m: self.keyboard.is_displayed())
self.keyboard.send(value)
self.keyboard.dismiss()
def type_bcc(self, value):
self.marionette.find_element(*self._bcc_locator).tap()
self.wait_for_condition(lambda m: self.keyboard.is_displayed())
self.keyboard.send(value)
self.keyboard.dismiss()
def type_subject(self, value):
self.marionette.find_element(*self._subject_locator).tap()
self.wait_for_condition(lambda m: self.keyboard.is_displayed())
self.keyboard.send(value)
self.keyboard.dismiss()
def type_body(self, value):
self.marionette.find_element(*self._body_locator).tap()
self.wait_for_condition(lambda m: self.keyboard.is_displayed())
self.keyboard.send(value)
self.keyboard.dismiss()
def tap_send(self):
self.marionette.find_element(*self._send_locator).tap()
from gaiatest.apps.email.app import Email
email = Email(self.marionette)
email.wait_for_message_list()
return email
| true | true |
f735c423a5ff8820d689878cd70ec7e4bd7e308f | 386 | py | Python | saltdash/wsgi.py | lincolnloop/saltdash | 4b0268cf9ce0320272f0182f33b046d4ab7766a2 | [
"MIT"
] | 19 | 2018-01-30T00:10:14.000Z | 2021-11-11T10:26:56.000Z | saltdash/wsgi.py | lincolnloop/saltdash | 4b0268cf9ce0320272f0182f33b046d4ab7766a2 | [
"MIT"
] | 118 | 2018-01-26T18:27:47.000Z | 2022-02-10T07:31:20.000Z | saltdash/wsgi.py | lincolnloop/saltdash | 4b0268cf9ce0320272f0182f33b046d4ab7766a2 | [
"MIT"
] | 2 | 2021-07-13T00:32:02.000Z | 2021-11-11T10:27:03.000Z | import sys
from django.conf import settings
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Warm-up wsgi app on import
warmup = application(
{
"REQUEST_METHOD": "GET",
"SERVER_NAME": "127.0.0.1",
"SERVER_PORT": 80,
"PATH_INFO": "/-/alive/",
"wsgi.input": sys.stdin,
},
lambda x, y: None,
)
| 20.315789 | 49 | 0.621762 | import sys
from django.conf import settings
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
warmup = application(
{
"REQUEST_METHOD": "GET",
"SERVER_NAME": "127.0.0.1",
"SERVER_PORT": 80,
"PATH_INFO": "/-/alive/",
"wsgi.input": sys.stdin,
},
lambda x, y: None,
)
| true | true |
f735c693706d55cb0fb635532de5320536fb224e | 2,281 | py | Python | test/unit/tool_util/toolbox/test_watcher.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | test/unit/tool_util/toolbox/test_watcher.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | 6 | 2021-11-11T20:57:49.000Z | 2021-12-10T15:30:33.000Z | test/unit/tool_util/toolbox/test_watcher.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | import tempfile
import time
from contextlib import contextmanager
from os import path
from shutil import rmtree
import pytest
from galaxy.tool_util.toolbox import watcher
from galaxy.util import bunch
@pytest.mark.skipif(not watcher.can_watch, reason="watchdog not available")
def test_watcher():
with __test_directory() as t:
tool_path = path.join(t, "test.xml")
toolbox = Toolbox()
with open(tool_path, "w") as f:
f.write("a")
tool_watcher = watcher.get_tool_watcher(toolbox, bunch.Bunch(watch_tools=True))
tool_watcher.start()
tool_watcher.watch_file(tool_path, "cool_tool")
time.sleep(2)
assert not toolbox.was_reloaded("cool_tool")
with open(tool_path, "w") as f:
f.write("b")
wait_for_reload(lambda: toolbox.was_reloaded("cool_tool"))
tool_watcher.shutdown()
assert tool_watcher.observer is None
@pytest.mark.skipif(not watcher.can_watch, reason="watchdog not available")
def test_tool_conf_watcher():
callback = CallbackRecorder()
conf_watcher = watcher.get_tool_conf_watcher(callback.call)
conf_watcher.start()
with __test_directory() as t:
tool_conf_path = path.join(t, "test_conf.xml")
with open(tool_conf_path, "w") as f:
f.write("a")
conf_watcher.watch_file(tool_conf_path)
time.sleep(2)
with open(tool_conf_path, "w") as f:
f.write("b")
wait_for_reload(lambda: callback.called)
conf_watcher.shutdown()
assert conf_watcher.thread is None
def wait_for_reload(check):
reloaded = False
for _ in range(10):
reloaded = check()
if reloaded:
break
time.sleep(0.2)
assert reloaded
class Toolbox:
def __init__(self):
self.reloaded = {}
def reload_tool_by_id(self, tool_id):
self.reloaded[tool_id] = True
def was_reloaded(self, tool_id):
return self.reloaded.get(tool_id, False)
class CallbackRecorder:
def __init__(self):
self.called = False
def call(self):
self.called = True
@contextmanager
def __test_directory():
base_path = tempfile.mkdtemp()
try:
yield base_path
finally:
rmtree(base_path)
| 26.218391 | 87 | 0.65673 | import tempfile
import time
from contextlib import contextmanager
from os import path
from shutil import rmtree
import pytest
from galaxy.tool_util.toolbox import watcher
from galaxy.util import bunch
@pytest.mark.skipif(not watcher.can_watch, reason="watchdog not available")
def test_watcher():
with __test_directory() as t:
tool_path = path.join(t, "test.xml")
toolbox = Toolbox()
with open(tool_path, "w") as f:
f.write("a")
tool_watcher = watcher.get_tool_watcher(toolbox, bunch.Bunch(watch_tools=True))
tool_watcher.start()
tool_watcher.watch_file(tool_path, "cool_tool")
time.sleep(2)
assert not toolbox.was_reloaded("cool_tool")
with open(tool_path, "w") as f:
f.write("b")
wait_for_reload(lambda: toolbox.was_reloaded("cool_tool"))
tool_watcher.shutdown()
assert tool_watcher.observer is None
@pytest.mark.skipif(not watcher.can_watch, reason="watchdog not available")
def test_tool_conf_watcher():
callback = CallbackRecorder()
conf_watcher = watcher.get_tool_conf_watcher(callback.call)
conf_watcher.start()
with __test_directory() as t:
tool_conf_path = path.join(t, "test_conf.xml")
with open(tool_conf_path, "w") as f:
f.write("a")
conf_watcher.watch_file(tool_conf_path)
time.sleep(2)
with open(tool_conf_path, "w") as f:
f.write("b")
wait_for_reload(lambda: callback.called)
conf_watcher.shutdown()
assert conf_watcher.thread is None
def wait_for_reload(check):
reloaded = False
for _ in range(10):
reloaded = check()
if reloaded:
break
time.sleep(0.2)
assert reloaded
class Toolbox:
def __init__(self):
self.reloaded = {}
def reload_tool_by_id(self, tool_id):
self.reloaded[tool_id] = True
def was_reloaded(self, tool_id):
return self.reloaded.get(tool_id, False)
class CallbackRecorder:
def __init__(self):
self.called = False
def call(self):
self.called = True
@contextmanager
def __test_directory():
base_path = tempfile.mkdtemp()
try:
yield base_path
finally:
rmtree(base_path)
| true | true |
f735c714304c3b5527411ae0ba5ef0803ae3852d | 196 | py | Python | sbsearch/__init__.py | mkelley/sbsearch | 7569e473cf0b8f5df3cfc7332ae4287a780d28d2 | [
"BSD-3-Clause"
] | null | null | null | sbsearch/__init__.py | mkelley/sbsearch | 7569e473cf0b8f5df3cfc7332ae4287a780d28d2 | [
"BSD-3-Clause"
] | null | null | null | sbsearch/__init__.py | mkelley/sbsearch | 7569e473cf0b8f5df3cfc7332ae4287a780d28d2 | [
"BSD-3-Clause"
] | null | null | null | # Licensed with the 3-clause BSD license. See LICENSE for details.
try:
from .version import version as __version__
except ImportError:
__version__ = ""
from .sbsearch import * # noqa
| 21.777778 | 67 | 0.72449 |
try:
from .version import version as __version__
except ImportError:
__version__ = ""
from .sbsearch import *
| true | true |
f735c73fa8542b9c0faec59fb6dc99f1bca85d50 | 304 | py | Python | doc/conf.py | tombaker/mklists_old | cf3ca814cf2cfc785a8cdbddd33162b9ee658570 | [
"MIT"
] | 1 | 2021-07-02T03:41:57.000Z | 2021-07-02T03:41:57.000Z | doc/conf.py | tombaker/mklists_old | cf3ca814cf2cfc785a8cdbddd33162b9ee658570 | [
"MIT"
] | null | null | null | doc/conf.py | tombaker/mklists_old | cf3ca814cf2cfc785a8cdbddd33162b9ee658570 | [
"MIT"
] | null | null | null | project = "mklists"
copyright = "2020, Tom Baker"
author = "Tom Baker"
release = "0.2"
extensions = []
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
html_theme = "default"
github_project_url = "https://github.com/tombaker/mklists"
html_static_path = ["_static"]
| 27.636364 | 58 | 0.713816 | project = "mklists"
copyright = "2020, Tom Baker"
author = "Tom Baker"
release = "0.2"
extensions = []
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
html_theme = "default"
github_project_url = "https://github.com/tombaker/mklists"
html_static_path = ["_static"]
| true | true |
f735c80dafefca62fbe4588f2fa52f2ceed09235 | 57,744 | py | Python | databricks/koalas/base.py | igorlucci/koalas | 8803344d620261981003175bd1edc3c4120b84e2 | [
"Apache-2.0"
] | 1 | 2021-07-16T20:32:32.000Z | 2021-07-16T20:32:32.000Z | databricks/koalas/base.py | igorlucci/koalas | 8803344d620261981003175bd1edc3c4120b84e2 | [
"Apache-2.0"
] | null | null | null | databricks/koalas/base.py | igorlucci/koalas | 8803344d620261981003175bd1edc3c4120b84e2 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base and utility classes for Koalas objects.
"""
from abc import ABCMeta, abstractmethod
import datetime
from functools import wraps, partial
from typing import Any, Callable, Tuple, Union, cast, TYPE_CHECKING
import warnings
import numpy as np
import pandas as pd # noqa: F401
from pandas.api.types import is_list_like
from pyspark import sql as spark
from pyspark.sql import functions as F, Window, Column
from pyspark.sql.types import (
BooleanType,
DateType,
DoubleType,
FloatType,
IntegralType,
LongType,
StringType,
TimestampType,
)
from databricks import koalas as ks # For running doctests and reference resolution in PyCharm.
from databricks.koalas import numpy_compat
from databricks.koalas.config import get_option, option_context
from databricks.koalas.internal import (
InternalFrame,
DEFAULT_SERIES_NAME,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_INDEX_NAME,
)
from databricks.koalas.spark import functions as SF
from databricks.koalas.spark.accessors import SparkIndexOpsMethods
from databricks.koalas.typedef import as_spark_type, spark_type_to_pandas_dtype
from databricks.koalas.utils import (
combine_frames,
same_anchor,
scol_for,
validate_axis,
ERROR_MESSAGE_CANNOT_COMBINE,
)
from databricks.koalas.frame import DataFrame
if TYPE_CHECKING:
from databricks.koalas.indexes import Index
from databricks.koalas.series import Series
def should_alignment_for_column_op(self: "IndexOpsMixin", other: "IndexOpsMixin") -> bool:
from databricks.koalas.series import Series
if isinstance(self, Series) and isinstance(other, Series):
return not same_anchor(self, other)
else:
return self._internal.spark_frame is not other._internal.spark_frame
def align_diff_index_ops(func, this_index_ops: "IndexOpsMixin", *args) -> "IndexOpsMixin":
"""
Align the `IndexOpsMixin` objects and apply the function.
Parameters
----------
func : The function to apply
this_index_ops : IndexOpsMixin
A base `IndexOpsMixin` object
args : list of other arguments including other `IndexOpsMixin` objects
Returns
-------
`Index` if all `this_index_ops` and arguments are `Index`; otherwise `Series`
"""
from databricks.koalas.indexes import Index
from databricks.koalas.series import Series, first_series
cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]
if isinstance(this_index_ops, Series) and all(isinstance(col, Series) for col in cols):
combined = combine_frames(this_index_ops.to_frame(), *cols, how="full")
return column_op(func)(
combined["this"]._kser_for(combined["this"]._internal.column_labels[0]),
*[
combined["that"]._kser_for(label)
for label in combined["that"]._internal.column_labels
]
)
else:
# This could cause as many counts, reset_index calls, joins for combining
# as the number of `Index`s in `args`. So far it's fine since we can assume the ops
# only work between at most two `Index`s. We might need to fix it in the future.
self_len = len(this_index_ops)
if any(len(col) != self_len for col in args if isinstance(col, IndexOpsMixin)):
raise ValueError("operands could not be broadcast together with shapes")
with option_context("compute.default_index_type", "distributed-sequence"):
if isinstance(this_index_ops, Index) and all(isinstance(col, Index) for col in cols):
return (
cast(
Series,
column_op(func)(
this_index_ops.to_series().reset_index(drop=True),
*[
arg.to_series().reset_index(drop=True)
if isinstance(arg, Index)
else arg
for arg in args
]
),
)
.sort_index()
.to_frame(DEFAULT_SERIES_NAME)
.set_index(DEFAULT_SERIES_NAME)
.index.rename(this_index_ops.name)
)
elif isinstance(this_index_ops, Series):
this = this_index_ops.reset_index()
that = [
cast(Series, col.to_series() if isinstance(col, Index) else col).reset_index(
drop=True
)
for col in cols
]
combined = combine_frames(this, *that, how="full").sort_index()
combined = combined.set_index(
combined._internal.column_labels[: this_index_ops._internal.index_level]
)
combined.index.names = this_index_ops._internal.index_names
return column_op(func)(
first_series(combined["this"]),
*[
combined["that"]._kser_for(label)
for label in combined["that"]._internal.column_labels
]
)
else:
this = cast(Index, this_index_ops).to_frame().reset_index(drop=True)
that_series = next(col for col in cols if isinstance(col, Series))
that_frame = that_series._kdf[
[col.to_series() if isinstance(col, Index) else col for col in cols]
]
combined = combine_frames(this, that_frame.reset_index()).sort_index()
self_index = (
combined["this"].set_index(combined["this"]._internal.column_labels).index
)
other = combined["that"].set_index(
combined["that"]._internal.column_labels[: that_series._internal.index_level]
)
other.index.names = that_series._internal.index_names
return column_op(func)(
self_index, *[other._kser_for(label) for label in other._internal.column_labels]
)
def booleanize_null(left_scol, scol, f) -> Column:
"""
Booleanize Null in Spark Column
"""
comp_ops = [
getattr(Column, "__{}__".format(comp_op))
for comp_op in ["eq", "ne", "lt", "le", "ge", "gt"]
]
if f in comp_ops:
# if `f` is "!=", fill null with True otherwise False
filler = f == Column.__ne__
scol = F.when(scol.isNull(), filler).otherwise(scol)
elif f == Column.__or__:
scol = F.when(left_scol.isNull() | scol.isNull(), False).otherwise(scol)
elif f == Column.__and__:
scol = F.when(scol.isNull(), False).otherwise(scol)
return scol
def column_op(f):
"""
A decorator that wraps APIs taking/returning Spark Column so that Koalas Series can be
supported too. If this decorator is used for the `f` function that takes Spark Column and
returns Spark Column, decorated `f` takes Koalas Series as well and returns Koalas
Series.
:param f: a function that takes Spark Column and returns Spark Column.
:param self: Koalas Series
:param args: arguments that the function `f` takes.
"""
@wraps(f)
def wrapper(self, *args):
from databricks.koalas.series import Series
# It is possible for the function `f` takes other arguments than Spark Column.
# To cover this case, explicitly check if the argument is Koalas Series and
# extract Spark Column. For other arguments, they are used as are.
cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]
if all(not should_alignment_for_column_op(self, col) for col in cols):
# Same DataFrame anchors
args = [arg.spark.column if isinstance(arg, IndexOpsMixin) else arg for arg in args]
scol = f(self.spark.column, *args)
scol = booleanize_null(self.spark.column, scol, f)
if isinstance(self, Series) or not any(isinstance(col, Series) for col in cols):
index_ops = self._with_new_scol(scol)
else:
kser = next(col for col in cols if isinstance(col, Series))
index_ops = kser._with_new_scol(scol)
elif get_option("compute.ops_on_diff_frames"):
index_ops = align_diff_index_ops(f, self, *args)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
if not all(self.name == col.name for col in cols):
index_ops = index_ops.rename(None)
return index_ops
return wrapper
def numpy_column_op(f):
@wraps(f)
def wrapper(self, *args):
# PySpark does not support NumPy type out of the box. For now, we convert NumPy types
# into some primitive types understandable in PySpark.
new_args = []
for arg in args:
# TODO: This is a quick hack to support NumPy type. We should revisit this.
if isinstance(self.spark.data_type, LongType) and isinstance(arg, np.timedelta64):
new_args.append(float(arg / np.timedelta64(1, "s")))
else:
new_args.append(arg)
return column_op(f)(self, *new_args)
return wrapper
class IndexOpsMixin(object, metaclass=ABCMeta):
"""common ops mixin to support a unified interface / docs for Series / Index
Assuming there are following attributes or properties and function.
"""
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@property
@abstractmethod
def _kdf(self) -> DataFrame:
pass
@abstractmethod
def _with_new_scol(self, scol: spark.Column):
pass
@property
@abstractmethod
def _column_label(self) -> Tuple:
pass
@property
@abstractmethod
def spark(self) -> SparkIndexOpsMethods:
pass
@property
def spark_column(self) -> Column:
warnings.warn(
"Series.spark_column is deprecated as of Series.spark.column. "
"Please use the API instead.",
FutureWarning,
)
return self.spark.column
spark_column.__doc__ = SparkIndexOpsMethods.column.__doc__
# arithmetic operators
__neg__ = column_op(Column.__neg__)
def __add__(self, other) -> Union["Series", "Index"]:
if not isinstance(self.spark.data_type, StringType) and (
(isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))
or isinstance(other, str)
):
raise TypeError("string addition can only be applied to string series or literals.")
if isinstance(self.spark.data_type, StringType):
# Concatenate string columns
if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType):
return column_op(F.concat)(self, other)
# Handle df['col'] + 'literal'
elif isinstance(other, str):
return column_op(F.concat)(self, F.lit(other))
else:
raise TypeError("string addition can only be applied to string series or literals.")
else:
return column_op(Column.__add__)(self, other)
def __sub__(self, other) -> Union["Series", "Index"]:
if (
isinstance(self.spark.data_type, StringType)
or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))
or isinstance(other, str)
):
raise TypeError("substraction can not be applied to string series or literals.")
if isinstance(self.spark.data_type, TimestampType):
# Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's
# behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.
msg = (
"Note that there is a behavior difference of timestamp subtraction. "
"The timestamp subtraction returns an integer in seconds, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(other, IndexOpsMixin) and isinstance(
other.spark.data_type, TimestampType
):
warnings.warn(msg, UserWarning)
return self.astype("long") - other.astype("long")
elif isinstance(other, datetime.datetime):
warnings.warn(msg, UserWarning)
return self.astype("long") - F.lit(other).cast(as_spark_type("long"))
else:
raise TypeError("datetime subtraction can only be applied to datetime series.")
elif isinstance(self.spark.data_type, DateType):
# Note that date subtraction casts arguments to integer. This is to mimic pandas's
# behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.
msg = (
"Note that there is a behavior difference of date subtraction. "
"The date subtraction returns an integer in days, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, DateType):
warnings.warn(msg, UserWarning)
return column_op(F.datediff)(self, other).astype("long")
elif isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):
warnings.warn(msg, UserWarning)
return column_op(F.datediff)(self, F.lit(other)).astype("long")
else:
raise TypeError("date subtraction can only be applied to date series.")
return column_op(Column.__sub__)(self, other)
def __mul__(self, other) -> Union["Series", "Index"]:
if isinstance(other, str):
raise TypeError("multiplication can not be applied to a string literal.")
if (
isinstance(self.spark.data_type, IntegralType)
and isinstance(other, IndexOpsMixin)
and isinstance(other.spark.data_type, StringType)
):
return column_op(SF.repeat)(other, self)
if isinstance(self.spark.data_type, StringType):
if (
isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, IntegralType)
) or isinstance(other, int):
return column_op(SF.repeat)(self, other)
else:
raise TypeError(
"a string series can only be multiplied to an int series or literal"
)
return column_op(Column.__mul__)(self, other)
def __truediv__(self, other) -> Union["Series", "Index"]:
"""
__truediv__ has different behaviour between pandas and PySpark for several cases.
1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf
2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf
3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf
4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf
+-------------------------------------------+
| dividend (divisor: 0) | PySpark | pandas |
|-----------------------|---------|---------|
| np.inf | null | np.inf |
| -np.inf | null | -np.inf |
| 10 | null | np.inf |
| -10 | null | -np.inf |
+-----------------------|---------|---------+
"""
if (
isinstance(self.spark.data_type, StringType)
or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))
or isinstance(other, str)
):
raise TypeError("division can not be applied on string series or literals.")
def truediv(left, right):
return F.when(F.lit(right != 0) | F.lit(right).isNull(), left.__div__(right)).otherwise(
F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(
F.lit(np.inf).__div__(left)
)
)
return numpy_column_op(truediv)(self, other)
def __mod__(self, other) -> Union["Series", "Index"]:
if (
isinstance(self.spark.data_type, StringType)
or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))
or isinstance(other, str)
):
raise TypeError("modulo can not be applied on string series or literals.")
def mod(left, right):
return ((left % right) + right) % right
return column_op(mod)(self, other)
def __radd__(self, other) -> Union["Series", "Index"]:
# Handle 'literal' + df['col']
if not isinstance(self.spark.data_type, StringType) and isinstance(other, str):
raise TypeError("string addition can only be applied to string series or literals.")
if isinstance(self.spark.data_type, StringType):
if isinstance(other, str):
return self._with_new_scol(F.concat(F.lit(other), self.spark.column))
else:
raise TypeError("string addition can only be applied to string series or literals.")
else:
return column_op(Column.__radd__)(self, other)
def __rsub__(self, other) -> Union["Series", "Index"]:
if isinstance(self.spark.data_type, StringType) or isinstance(other, str):
raise TypeError("substraction can not be applied to string series or literals.")
if isinstance(self.spark.data_type, TimestampType):
# Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's
# behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.
msg = (
"Note that there is a behavior difference of timestamp subtraction. "
"The timestamp subtraction returns an integer in seconds, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(other, datetime.datetime):
warnings.warn(msg, UserWarning)
return -(self.astype("long") - F.lit(other).cast(as_spark_type("long")))
else:
raise TypeError("datetime subtraction can only be applied to datetime series.")
elif isinstance(self.spark.data_type, DateType):
# Note that date subtraction casts arguments to integer. This is to mimic pandas's
# behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.
msg = (
"Note that there is a behavior difference of date subtraction. "
"The date subtraction returns an integer in days, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):
warnings.warn(msg, UserWarning)
return -column_op(F.datediff)(self, F.lit(other)).astype("long")
else:
raise TypeError("date subtraction can only be applied to date series.")
return column_op(Column.__rsub__)(self, other)
def __rmul__(self, other) -> Union["Series", "Index"]:
if isinstance(other, str):
raise TypeError("multiplication can not be applied to a string literal.")
if isinstance(self.spark.data_type, StringType):
if isinstance(other, int):
return column_op(SF.repeat)(self, other)
else:
raise TypeError(
"a string series can only be multiplied to an int series or literal"
)
return column_op(Column.__rmul__)(self, other)
def __rtruediv__(self, other) -> Union["Series", "Index"]:
if isinstance(self.spark.data_type, StringType) or isinstance(other, str):
raise TypeError("division can not be applied on string series or literals.")
def rtruediv(left, right):
return F.when(left == 0, F.lit(np.inf).__div__(right)).otherwise(
F.lit(right).__truediv__(left)
)
return numpy_column_op(rtruediv)(self, other)
def __floordiv__(self, other) -> Union["Series", "Index"]:
"""
__floordiv__ has different behaviour between pandas and PySpark for several cases.
1. When divide np.inf by zero, PySpark returns null whereas pandas returns np.inf
2. When divide positive number by zero, PySpark returns null whereas pandas returns np.inf
3. When divide -np.inf by zero, PySpark returns null whereas pandas returns -np.inf
4. When divide negative number by zero, PySpark returns null whereas pandas returns -np.inf
+-------------------------------------------+
| dividend (divisor: 0) | PySpark | pandas |
|-----------------------|---------|---------|
| np.inf | null | np.inf |
| -np.inf | null | -np.inf |
| 10 | null | np.inf |
| -10 | null | -np.inf |
+-----------------------|---------|---------+
"""
if (
isinstance(self.spark.data_type, StringType)
or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))
or isinstance(other, str)
):
raise TypeError("division can not be applied on string series or literals.")
def floordiv(left, right):
return F.when(F.lit(right is np.nan), np.nan).otherwise(
F.when(
F.lit(right != 0) | F.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(
F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(
F.lit(np.inf).__div__(left)
)
)
)
return numpy_column_op(floordiv)(self, other)
def __rfloordiv__(self, other) -> Union["Series", "Index"]:
if isinstance(self.spark.data_type, StringType) or isinstance(other, str):
raise TypeError("division can not be applied on string series or literals.")
def rfloordiv(left, right):
return F.when(F.lit(left == 0), F.lit(np.inf).__div__(right)).otherwise(
F.when(F.lit(left) == np.nan, np.nan).otherwise(F.floor(F.lit(right).__div__(left)))
)
return numpy_column_op(rfloordiv)(self, other)
def __rmod__(self, other) -> Union["Series", "Index"]:
if isinstance(self.spark.data_type, StringType) or isinstance(other, str):
raise TypeError("modulo can not be applied on string series or literals.")
def rmod(left, right):
return ((right % left) + left) % left
return column_op(rmod)(self, other)
__pow__ = column_op(Column.__pow__)
__rpow__ = column_op(Column.__rpow__)
__abs__ = column_op(F.abs)
# comparison operators
__eq__ = column_op(Column.__eq__)
__ne__ = column_op(Column.__ne__)
__lt__ = column_op(Column.__lt__)
__le__ = column_op(Column.__le__)
__ge__ = column_op(Column.__ge__)
__gt__ = column_op(Column.__gt__)
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
__and__ = column_op(Column.__and__)
__or__ = column_op(Column.__or__)
__invert__ = column_op(Column.__invert__)
__rand__ = column_op(Column.__rand__)
__ror__ = column_op(Column.__ror__)
def __len__(self):
return len(self._kdf)
# NDArray Compat
def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any):
# Try dunder methods first.
result = numpy_compat.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
# After that, we try with PySpark APIs.
if result is NotImplemented:
result = numpy_compat.maybe_dispatch_ufunc_to_spark_func(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
else:
# TODO: support more APIs?
raise NotImplementedError("Koalas objects currently do not support %s." % ufunc)
@property
def dtype(self) -> np.dtype:
"""Return the dtype object of the underlying data.
Examples
--------
>>> s = ks.Series([1, 2, 3])
>>> s.dtype
dtype('int64')
>>> s = ks.Series(list('abc'))
>>> s.dtype
dtype('O')
>>> s = ks.Series(pd.date_range('20130101', periods=3))
>>> s.dtype
dtype('<M8[ns]')
>>> s.rename("a").to_frame().set_index("a").index.dtype
dtype('<M8[ns]')
"""
return spark_type_to_pandas_dtype(self.spark.data_type)
@property
def empty(self) -> bool:
"""
Returns true if the current object is empty. Otherwise, returns false.
>>> ks.range(10).id.empty
False
>>> ks.range(0).id.empty
True
>>> ks.DataFrame({}, index=list('abc')).index.empty
False
"""
return self._internal.resolved_copy.spark_frame.rdd.isEmpty()
@property
def hasnans(self) -> bool:
"""
Return True if it has any missing values. Otherwise, it returns False.
>>> ks.DataFrame({}, index=list('abc')).index.hasnans
False
>>> ks.Series(['a', None]).hasnans
True
>>> ks.Series([1.0, 2.0, np.nan]).hasnans
True
>>> ks.Series([1, 2, 3]).hasnans
False
>>> (ks.Series([1.0, 2.0, np.nan]) + 1).hasnans
True
>>> ks.Series([1, 2, 3]).rename("a").to_frame().set_index("a").index.hasnans
False
"""
sdf = self._internal.spark_frame
scol = self.spark.column
if isinstance(self.spark.data_type, (DoubleType, FloatType)):
return sdf.select(F.max(scol.isNull() | F.isnan(scol))).collect()[0][0]
else:
return sdf.select(F.max(scol.isNull())).collect()[0][0]
@property
def is_monotonic(self) -> bool:
"""
Return boolean if values in the object are monotonically increasing.
.. note:: the current implementation of is_monotonic requires to shuffle
and aggregate multiple times to check the order locally and globally,
which is potentially expensive. In case of multi-index, all data are
transferred to single node which can easily cause out-of-memory error currently.
Returns
-------
is_monotonic : bool
Examples
--------
>>> ser = ks.Series(['1/1/2018', '3/1/2018', '4/1/2018'])
>>> ser.is_monotonic
True
>>> df = ks.DataFrame({'dates': [None, '1/1/2018', '2/1/2018', '3/1/2018']})
>>> df.dates.is_monotonic
False
>>> df.index.is_monotonic
True
>>> ser = ks.Series([1])
>>> ser.is_monotonic
True
>>> ser = ks.Series([])
>>> ser.is_monotonic
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic
True
>>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic
False
>>> ser.index.is_monotonic
True
Support for MultiIndex
>>> midx = ks.MultiIndex.from_tuples(
... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c'),
('y', 'd'),
('z', 'e')],
)
>>> midx.is_monotonic
True
>>> midx = ks.MultiIndex.from_tuples(
... [('z', 'a'), ('z', 'b'), ('y', 'c'), ('y', 'd'), ('x', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('z', 'a'),
('z', 'b'),
('y', 'c'),
('y', 'd'),
('x', 'e')],
)
>>> midx.is_monotonic
False
"""
return self._is_monotonic("increasing")
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return boolean if values in the object are monotonically decreasing.
.. note:: the current implementation of is_monotonic_decreasing requires to shuffle
and aggregate multiple times to check the order locally and globally,
which is potentially expensive. In case of multi-index, all data are transferred
to single node which can easily cause out-of-memory error currently.
Returns
-------
is_monotonic : bool
Examples
--------
>>> ser = ks.Series(['4/1/2018', '3/1/2018', '1/1/2018'])
>>> ser.is_monotonic_decreasing
True
>>> df = ks.DataFrame({'dates': [None, '3/1/2018', '2/1/2018', '1/1/2018']})
>>> df.dates.is_monotonic_decreasing
False
>>> df.index.is_monotonic_decreasing
False
>>> ser = ks.Series([1])
>>> ser.is_monotonic_decreasing
True
>>> ser = ks.Series([])
>>> ser.is_monotonic_decreasing
True
>>> ser.rename("a").to_frame().set_index("a").index.is_monotonic_decreasing
True
>>> ser = ks.Series([5, 4, 3, 2, 1], index=[1, 2, 3, 4, 5])
>>> ser.is_monotonic_decreasing
True
>>> ser.index.is_monotonic_decreasing
False
Support for MultiIndex
>>> midx = ks.MultiIndex.from_tuples(
... [('x', 'a'), ('x', 'b'), ('y', 'c'), ('y', 'd'), ('z', 'e')])
>>> midx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('y', 'c'),
('y', 'd'),
('z', 'e')],
)
>>> midx.is_monotonic_decreasing
False
>>> midx = ks.MultiIndex.from_tuples(
... [('z', 'e'), ('z', 'd'), ('y', 'c'), ('y', 'b'), ('x', 'a')])
>>> midx # doctest: +SKIP
MultiIndex([('z', 'a'),
('z', 'b'),
('y', 'c'),
('y', 'd'),
('x', 'e')],
)
>>> midx.is_monotonic_decreasing
True
"""
return self._is_monotonic("decreasing")
def _is_locally_monotonic_spark_column(self, order):
window = (
Window.partitionBy(F.col("__partition_id"))
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-1, -1)
)
if order == "increasing":
return (F.col("__origin") >= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
else:
return (F.col("__origin") <= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
def _is_monotonic(self, order):
assert order in ("increasing", "decreasing")
sdf = self._internal.spark_frame
sdf = (
sdf.select(
F.spark_partition_id().alias(
"__partition_id"
), # Make sure we use the same partition id in the whole job.
F.col(NATURAL_ORDER_COLUMN_NAME),
self.spark.column.alias("__origin"),
)
.select(
F.col("__partition_id"),
F.col("__origin"),
self._is_locally_monotonic_spark_column(order).alias(
"__comparison_within_partition"
),
)
.groupby(F.col("__partition_id"))
.agg(
F.min(F.col("__origin")).alias("__partition_min"),
F.max(F.col("__origin")).alias("__partition_max"),
F.min(F.coalesce(F.col("__comparison_within_partition"), F.lit(True))).alias(
"__comparison_within_partition"
),
)
)
# Now we're windowing the aggregation results without partition specification.
# The number of rows here will be as the same of partitions, which is expected
# to be small.
window = Window.orderBy(F.col("__partition_id")).rowsBetween(-1, -1)
if order == "increasing":
comparison_col = F.col("__partition_min") >= F.lag(F.col("__partition_max"), 1).over(
window
)
else:
comparison_col = F.col("__partition_min") <= F.lag(F.col("__partition_max"), 1).over(
window
)
sdf = sdf.select(
comparison_col.alias("__comparison_between_partitions"),
F.col("__comparison_within_partition"),
)
ret = sdf.select(
F.min(F.coalesce(F.col("__comparison_between_partitions"), F.lit(True)))
& F.min(F.coalesce(F.col("__comparison_within_partition"), F.lit(True)))
).collect()[0][0]
if ret is None:
return True
else:
return ret
@property
def ndim(self) -> int:
"""
Return an int representing the number of array dimensions.
Return 1 for Series / Index / MultiIndex.
Examples
--------
For Series
>>> s = ks.Series([None, 1, 2, 3, 4], index=[4, 5, 2, 1, 8])
>>> s.ndim
1
For Index
>>> s.index.ndim
1
For MultiIndex
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [1, 1, 1, 1, 1, 2, 1, 2, 2]])
>>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s.index.ndim
1
"""
return 1
def astype(self, dtype) -> Union["Index", "Series"]:
"""
Cast a Koalas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type.
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
Examples
--------
>>> ser = ks.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
>>> ser.rename("a").to_frame().set_index("a").index.astype('int64')
Int64Index([1, 2], dtype='int64', name='a')
"""
spark_type = as_spark_type(dtype)
if not spark_type:
raise ValueError("Type {} not understood".format(dtype))
if isinstance(spark_type, BooleanType):
if isinstance(self.spark.data_type, StringType):
scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(
F.length(self.spark.column) > 0
)
elif isinstance(self.spark.data_type, (FloatType, DoubleType)):
scol = F.when(
self.spark.column.isNull() | F.isnan(self.spark.column), F.lit(True)
).otherwise(self.spark.column.cast(spark_type))
else:
scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(
self.spark.column.cast(spark_type)
)
elif isinstance(spark_type, StringType):
scol = F.when(self.spark.column.isNull(), str(None)).otherwise(
self.spark.column.cast(spark_type)
)
else:
scol = self.spark.column.cast(spark_type)
return self._with_new_scol(scol)
def isin(self, values) -> Union["Series", "Index"]:
"""
Check whether `values` are contained in Series or Index.
Return a boolean Series or Index showing whether each element in the Series
matches an element in the passed sequence of `values` exactly.
Parameters
----------
values : list or set
The sequence of values to test.
Returns
-------
isin : Series (bool dtype) or Index (bool dtype)
Examples
--------
>>> s = ks.Series(['lama', 'cow', 'lama', 'beetle', 'lama',
... 'hippo'], name='animal')
>>> s.isin(['cow', 'lama'])
0 True
1 True
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
Passing a single string as ``s.isin('lama')`` will raise an error. Use
a list of one element instead:
>>> s.isin(['lama'])
0 True
1 False
2 True
3 False
4 True
5 False
Name: animal, dtype: bool
>>> s.rename("a").to_frame().set_index("a").index.isin(['lama'])
Index([True, False, True, False, True, False], dtype='object', name='a')
"""
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]".format(values_type=type(values).__name__)
)
return self._with_new_scol(self.spark.column.isin(list(values)))
def isnull(self) -> Union["Series", "Index"]:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or numpy.NaN, gets mapped to True values.
Everything else gets mapped to False values. Characters such as empty strings '' or
numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
Returns
-------
Series or Index : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
>>> ser = ks.Series([5, 6, np.NaN])
>>> ser.isna() # doctest: +NORMALIZE_WHITESPACE
0 False
1 False
2 True
dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.isna()
Index([False, False, True], dtype='object', name='a')
"""
from databricks.koalas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
if isinstance(self.spark.data_type, (FloatType, DoubleType)):
return self._with_new_scol(self.spark.column.isNull() | F.isnan(self.spark.column))
else:
return self._with_new_scol(self.spark.column.isNull())
isna = isnull
def notnull(self) -> Union["Series", "Index"]:
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True.
Characters such as empty strings '' or numpy.inf are not considered NA values
(unless you set pandas.options.mode.use_inf_as_na = True).
NA values, such as None or numpy.NaN, get mapped to False values.
Returns
-------
Series or Index : Mask of bool values for each element in Series
that indicates whether an element is not an NA value.
Examples
--------
Show which entries in a Series are not NA.
>>> ser = ks.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
>>> ser.rename("a").to_frame().set_index("a").index.notna()
Index([True, True, False], dtype='object', name='a')
"""
from databricks.koalas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("notna is not defined for MultiIndex")
return (~self.isnull()).rename(
self.name # type: ignore
)
notna = notnull
# TODO: axis, skipna, and many arguments should be implemented.
def all(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether all elements are True.
Returns True unless there at least one element within a series that is
False or equivalent (e.g. zero or empty)
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ks.Series([True, True]).all()
True
>>> ks.Series([True, False]).all()
False
>>> ks.Series([0, 1]).all()
False
>>> ks.Series([1, 2, 3]).all()
True
>>> ks.Series([True, True, None]).all()
True
>>> ks.Series([True, False, None]).all()
False
>>> ks.Series([]).all()
True
>>> ks.Series([np.nan]).all()
True
>>> df = ks.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.all()
False
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("every(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use min as its alternative:
ret = sdf.select(F.min(F.coalesce(col.cast("boolean"), F.lit(True)))).collect()[0][0]
if ret is None:
return True
else:
return ret
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Union[int, str] = 0) -> bool:
"""
Return whether any element is True.
Returns False unless there at least one element within a series that is
True or equivalent (e.g. non-zero or non-empty).
Parameters
----------
axis : {0 or 'index'}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
Examples
--------
>>> ks.Series([False, False]).any()
False
>>> ks.Series([True, False]).any()
True
>>> ks.Series([0, 0]).any()
False
>>> ks.Series([0, 1, 2]).any()
True
>>> ks.Series([False, False, None]).any()
False
>>> ks.Series([True, False, None]).any()
True
>>> ks.Series([]).any()
False
>>> ks.Series([np.nan]).any()
False
>>> df = ks.Series([True, False, None]).rename("a").to_frame()
>>> df.set_index("a").index.any()
True
"""
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("any(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use max as its alternative:
ret = sdf.select(F.max(F.coalesce(col.cast("boolean"), F.lit(False)))).collect()[0][0]
if ret is None:
return False
else:
return ret
# TODO: add frep and axis parameter
def shift(self, periods=1, fill_value=None) -> Union["Series", "Index"]:
"""
Shift Series/Index by desired number of periods.
.. note:: the current implementation of shift uses Spark's Window without
specifying partition specification. This leads to move all data into
single partition in single machine and could cause serious
performance degradation. Avoid this method against very large dataset.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
The default depends on the dtype of self. For numeric data, np.nan is used.
Returns
-------
Copy of input Series/Index, shifted.
Examples
--------
>>> df = ks.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]},
... columns=['Col1', 'Col2', 'Col3'])
>>> df.Col1.shift(periods=3)
0 NaN
1 NaN
2 NaN
3 10.0
4 20.0
Name: Col1, dtype: float64
>>> df.Col2.shift(periods=3, fill_value=0)
0 0
1 0
2 0
3 13
4 23
Name: Col2, dtype: int64
>>> df.index.shift(periods=3, fill_value=0)
Int64Index([0, 0, 0, 0, 1], dtype='int64')
"""
return self._shift(periods, fill_value)
def _shift(self, periods, fill_value, part_cols=()):
if not isinstance(periods, int):
raise ValueError("periods should be an int; however, got [%s]" % type(periods).__name__)
col = self.spark.column
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-periods, -periods)
)
lag_col = F.lag(col, periods).over(window)
col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col)
return self._with_new_scol(col)
# TODO: Update Documentation for Bins Parameter when its supported
def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
) -> "Series":
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : boolean, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : boolean, default True
Sort by values.
ascending : boolean, default False
Sort in ascending order.
bins : Not Yet Supported
dropna : boolean, default True
Don't include counts of NaN.
Returns
-------
counts : Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
Examples
--------
For Series
>>> df = ks.DataFrame({'x':[0, 0, 1, 1, 1, np.nan]})
>>> df.x.value_counts() # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
Name: x, dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> df.x.value_counts(normalize=True) # doctest: +NORMALIZE_WHITESPACE
1.0 0.6
0.0 0.4
Name: x, dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> df.x.value_counts(dropna=False) # doctest: +NORMALIZE_WHITESPACE
1.0 3
0.0 2
NaN 1
Name: x, dtype: int64
For Index
>>> idx = ks.Index([3, 1, 2, 3, 4, np.nan])
>>> idx
Float64Index([3.0, 1.0, 2.0, 3.0, 4.0, nan], dtype='float64')
>>> idx.value_counts().sort_index()
1.0 1
2.0 1
3.0 2
4.0 1
dtype: int64
**sort**
With `sort` set to `False`, the result wouldn't be sorted by number of count.
>>> idx.value_counts(sort=True).sort_index()
1.0 1
2.0 1
3.0 2
4.0 1
dtype: int64
**normalize**
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> idx.value_counts(normalize=True).sort_index()
1.0 0.2
2.0 0.2
3.0 0.4
4.0 0.2
dtype: float64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> idx.value_counts(dropna=False).sort_index() # doctest: +SKIP
1.0 1
2.0 1
3.0 2
4.0 1
NaN 1
dtype: int64
For MultiIndex.
>>> midx = pd.MultiIndex([['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... [[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [1, 1, 1, 1, 1, 2, 1, 2, 2]])
>>> s = ks.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
>>> s.index # doctest: +SKIP
MultiIndex([( 'lama', 'weight'),
( 'lama', 'weight'),
( 'lama', 'weight'),
( 'cow', 'weight'),
( 'cow', 'weight'),
( 'cow', 'length'),
('falcon', 'weight'),
('falcon', 'length'),
('falcon', 'length')],
)
>>> s.index.value_counts().sort_index()
(cow, length) 1
(cow, weight) 2
(falcon, length) 2
(falcon, weight) 1
(lama, weight) 3
dtype: int64
>>> s.index.value_counts(normalize=True).sort_index()
(cow, length) 0.111111
(cow, weight) 0.222222
(falcon, length) 0.222222
(falcon, weight) 0.111111
(lama, weight) 0.333333
dtype: float64
If Index has name, keep the name up.
>>> idx = ks.Index([0, 0, 0, 1, 1, 2, 3], name='koalas')
>>> idx.value_counts().sort_index()
0 3
1 2
2 1
3 1
Name: koalas, dtype: int64
"""
from databricks.koalas.series import first_series
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
sdf_dropna = self._internal.spark_frame.select(self.spark.column).dropna()
else:
sdf_dropna = self._internal.spark_frame.select(self.spark.column)
index_name = SPARK_DEFAULT_INDEX_NAME
column_name = self._internal.data_spark_column_names[0]
sdf = sdf_dropna.groupby(scol_for(sdf_dropna, column_name).alias(index_name)).count()
if sort:
if ascending:
sdf = sdf.orderBy(F.col("count"))
else:
sdf = sdf.orderBy(F.col("count").desc())
if normalize:
sum = sdf_dropna.count()
sdf = sdf.withColumn("count", F.col("count") / F.lit(sum))
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, index_name)],
column_labels=self._internal.column_labels,
data_spark_columns=[scol_for(sdf, "count")],
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal))
def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don’t include NaN in the count.
approx: bool, default False
If False, will use the exact algorithm and return the exact number of unique.
If True, it uses the HyperLogLog approximate algorithm, which is significantly faster
for large amount of data.
Note: This parameter is specific to Koalas and is not found in pandas.
rsd: float, default 0.05
Maximum estimation error allowed in the HyperLogLog algorithm.
Note: Just like ``approx`` this parameter is specific to Koalas.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> ks.Series([1, 2, 3, np.nan]).nunique()
3
>>> ks.Series([1, 2, 3, np.nan]).nunique(dropna=False)
4
On big data, we recommend using the approximate algorithm to speed up this function.
The result will be very close to the exact unique count.
>>> ks.Series([1, 2, 3, np.nan]).nunique(approx=True)
3
>>> idx = ks.Index([1, 1, 2, None])
>>> idx
Float64Index([1.0, 1.0, 2.0, nan], dtype='float64')
>>> idx.nunique()
2
>>> idx.nunique(dropna=False)
3
"""
res = self._internal.spark_frame.select([self._nunique(dropna, approx, rsd)])
return res.collect()[0][0]
def _nunique(self, dropna=True, approx=False, rsd=0.05):
colname = self._internal.data_spark_column_names[0]
count_fn = partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct
if dropna:
return count_fn(self.spark.column).alias(colname)
else:
return (
count_fn(self.spark.column)
+ F.when(
F.count(F.when(self.spark.column.isNull(), 1).otherwise(None)) >= 1, 1
).otherwise(0)
).alias(colname)
def take(self, indices) -> Union["Series", "Index"]:
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
Series
>>> kser = ks.Series([100, 200, 300, 400, 500])
>>> kser
0 100
1 200
2 300
3 400
4 500
dtype: int64
>>> kser.take([0, 2, 4]).sort_index()
0 100
2 300
4 500
dtype: int64
Index
>>> kidx = ks.Index([100, 200, 300, 400, 500])
>>> kidx
Int64Index([100, 200, 300, 400, 500], dtype='int64')
>>> kidx.take([0, 2, 4]).sort_values()
Int64Index([100, 300, 500], dtype='int64')
MultiIndex
>>> kmidx = ks.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("x", "c")])
>>> kmidx # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'b'),
('x', 'c')],
)
>>> kmidx.take([0, 2]) # doctest: +SKIP
MultiIndex([('x', 'a'),
('x', 'c')],
)
"""
if not is_list_like(indices) or isinstance(indices, (dict, set)):
raise ValueError("`indices` must be a list-like except dict or set")
if isinstance(self, ks.Series):
return cast(ks.Series, self.iloc[indices])
else:
return self._kdf.iloc[indices].index
| 35.1883 | 100 | 0.551988 |
from abc import ABCMeta, abstractmethod
import datetime
from functools import wraps, partial
from typing import Any, Callable, Tuple, Union, cast, TYPE_CHECKING
import warnings
import numpy as np
import pandas as pd
from pandas.api.types import is_list_like
from pyspark import sql as spark
from pyspark.sql import functions as F, Window, Column
from pyspark.sql.types import (
BooleanType,
DateType,
DoubleType,
FloatType,
IntegralType,
LongType,
StringType,
TimestampType,
)
from databricks import koalas as ks
from databricks.koalas import numpy_compat
from databricks.koalas.config import get_option, option_context
from databricks.koalas.internal import (
InternalFrame,
DEFAULT_SERIES_NAME,
NATURAL_ORDER_COLUMN_NAME,
SPARK_DEFAULT_INDEX_NAME,
)
from databricks.koalas.spark import functions as SF
from databricks.koalas.spark.accessors import SparkIndexOpsMethods
from databricks.koalas.typedef import as_spark_type, spark_type_to_pandas_dtype
from databricks.koalas.utils import (
combine_frames,
same_anchor,
scol_for,
validate_axis,
ERROR_MESSAGE_CANNOT_COMBINE,
)
from databricks.koalas.frame import DataFrame
if TYPE_CHECKING:
from databricks.koalas.indexes import Index
from databricks.koalas.series import Series
def should_alignment_for_column_op(self: "IndexOpsMixin", other: "IndexOpsMixin") -> bool:
from databricks.koalas.series import Series
if isinstance(self, Series) and isinstance(other, Series):
return not same_anchor(self, other)
else:
return self._internal.spark_frame is not other._internal.spark_frame
def align_diff_index_ops(func, this_index_ops: "IndexOpsMixin", *args) -> "IndexOpsMixin":
from databricks.koalas.indexes import Index
from databricks.koalas.series import Series, first_series
cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]
if isinstance(this_index_ops, Series) and all(isinstance(col, Series) for col in cols):
combined = combine_frames(this_index_ops.to_frame(), *cols, how="full")
return column_op(func)(
combined["this"]._kser_for(combined["this"]._internal.column_labels[0]),
*[
combined["that"]._kser_for(label)
for label in combined["that"]._internal.column_labels
]
)
else:
# only work between at most two `Index`s. We might need to fix it in the future.
self_len = len(this_index_ops)
if any(len(col) != self_len for col in args if isinstance(col, IndexOpsMixin)):
raise ValueError("operands could not be broadcast together with shapes")
with option_context("compute.default_index_type", "distributed-sequence"):
if isinstance(this_index_ops, Index) and all(isinstance(col, Index) for col in cols):
return (
cast(
Series,
column_op(func)(
this_index_ops.to_series().reset_index(drop=True),
*[
arg.to_series().reset_index(drop=True)
if isinstance(arg, Index)
else arg
for arg in args
]
),
)
.sort_index()
.to_frame(DEFAULT_SERIES_NAME)
.set_index(DEFAULT_SERIES_NAME)
.index.rename(this_index_ops.name)
)
elif isinstance(this_index_ops, Series):
this = this_index_ops.reset_index()
that = [
cast(Series, col.to_series() if isinstance(col, Index) else col).reset_index(
drop=True
)
for col in cols
]
combined = combine_frames(this, *that, how="full").sort_index()
combined = combined.set_index(
combined._internal.column_labels[: this_index_ops._internal.index_level]
)
combined.index.names = this_index_ops._internal.index_names
return column_op(func)(
first_series(combined["this"]),
*[
combined["that"]._kser_for(label)
for label in combined["that"]._internal.column_labels
]
)
else:
this = cast(Index, this_index_ops).to_frame().reset_index(drop=True)
that_series = next(col for col in cols if isinstance(col, Series))
that_frame = that_series._kdf[
[col.to_series() if isinstance(col, Index) else col for col in cols]
]
combined = combine_frames(this, that_frame.reset_index()).sort_index()
self_index = (
combined["this"].set_index(combined["this"]._internal.column_labels).index
)
other = combined["that"].set_index(
combined["that"]._internal.column_labels[: that_series._internal.index_level]
)
other.index.names = that_series._internal.index_names
return column_op(func)(
self_index, *[other._kser_for(label) for label in other._internal.column_labels]
)
def booleanize_null(left_scol, scol, f) -> Column:
comp_ops = [
getattr(Column, "__{}__".format(comp_op))
for comp_op in ["eq", "ne", "lt", "le", "ge", "gt"]
]
if f in comp_ops:
# if `f` is "!=", fill null with True otherwise False
filler = f == Column.__ne__
scol = F.when(scol.isNull(), filler).otherwise(scol)
elif f == Column.__or__:
scol = F.when(left_scol.isNull() | scol.isNull(), False).otherwise(scol)
elif f == Column.__and__:
scol = F.when(scol.isNull(), False).otherwise(scol)
return scol
def column_op(f):
@wraps(f)
def wrapper(self, *args):
from databricks.koalas.series import Series
# It is possible for the function `f` takes other arguments than Spark Column.
# To cover this case, explicitly check if the argument is Koalas Series and
# extract Spark Column. For other arguments, they are used as are.
cols = [arg for arg in args if isinstance(arg, IndexOpsMixin)]
if all(not should_alignment_for_column_op(self, col) for col in cols):
# Same DataFrame anchors
args = [arg.spark.column if isinstance(arg, IndexOpsMixin) else arg for arg in args]
scol = f(self.spark.column, *args)
scol = booleanize_null(self.spark.column, scol, f)
if isinstance(self, Series) or not any(isinstance(col, Series) for col in cols):
index_ops = self._with_new_scol(scol)
else:
kser = next(col for col in cols if isinstance(col, Series))
index_ops = kser._with_new_scol(scol)
elif get_option("compute.ops_on_diff_frames"):
index_ops = align_diff_index_ops(f, self, *args)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
if not all(self.name == col.name for col in cols):
index_ops = index_ops.rename(None)
return index_ops
return wrapper
def numpy_column_op(f):
@wraps(f)
def wrapper(self, *args):
# PySpark does not support NumPy type out of the box. For now, we convert NumPy types
# into some primitive types understandable in PySpark.
new_args = []
for arg in args:
# TODO: This is a quick hack to support NumPy type. We should revisit this.
if isinstance(self.spark.data_type, LongType) and isinstance(arg, np.timedelta64):
new_args.append(float(arg / np.timedelta64(1, "s")))
else:
new_args.append(arg)
return column_op(f)(self, *new_args)
return wrapper
class IndexOpsMixin(object, metaclass=ABCMeta):
@property
@abstractmethod
def _internal(self) -> InternalFrame:
pass
@property
@abstractmethod
def _kdf(self) -> DataFrame:
pass
@abstractmethod
def _with_new_scol(self, scol: spark.Column):
pass
@property
@abstractmethod
def _column_label(self) -> Tuple:
pass
@property
@abstractmethod
def spark(self) -> SparkIndexOpsMethods:
pass
@property
def spark_column(self) -> Column:
warnings.warn(
"Series.spark_column is deprecated as of Series.spark.column. "
"Please use the API instead.",
FutureWarning,
)
return self.spark.column
spark_column.__doc__ = SparkIndexOpsMethods.column.__doc__
# arithmetic operators
__neg__ = column_op(Column.__neg__)
def __add__(self, other) -> Union["Series", "Index"]:
if not isinstance(self.spark.data_type, StringType) and (
(isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))
or isinstance(other, str)
):
raise TypeError("string addition can only be applied to string series or literals.")
if isinstance(self.spark.data_type, StringType):
# Concatenate string columns
if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType):
return column_op(F.concat)(self, other)
# Handle df['col'] + 'literal'
elif isinstance(other, str):
return column_op(F.concat)(self, F.lit(other))
else:
raise TypeError("string addition can only be applied to string series or literals.")
else:
return column_op(Column.__add__)(self, other)
def __sub__(self, other) -> Union["Series", "Index"]:
if (
isinstance(self.spark.data_type, StringType)
or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))
or isinstance(other, str)
):
raise TypeError("substraction can not be applied to string series or literals.")
if isinstance(self.spark.data_type, TimestampType):
# Note that timestamp subtraction casts arguments to integer. This is to mimic pandas's
msg = (
"Note that there is a behavior difference of timestamp subtraction. "
"The timestamp subtraction returns an integer in seconds, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(other, IndexOpsMixin) and isinstance(
other.spark.data_type, TimestampType
):
warnings.warn(msg, UserWarning)
return self.astype("long") - other.astype("long")
elif isinstance(other, datetime.datetime):
warnings.warn(msg, UserWarning)
return self.astype("long") - F.lit(other).cast(as_spark_type("long"))
else:
raise TypeError("datetime subtraction can only be applied to datetime series.")
elif isinstance(self.spark.data_type, DateType):
# behaviors. pandas returns 'timedelta64[ns]' in days from date's subtraction.
msg = (
"Note that there is a behavior difference of date subtraction. "
"The date subtraction returns an integer in days, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, DateType):
warnings.warn(msg, UserWarning)
return column_op(F.datediff)(self, other).astype("long")
elif isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):
warnings.warn(msg, UserWarning)
return column_op(F.datediff)(self, F.lit(other)).astype("long")
else:
raise TypeError("date subtraction can only be applied to date series.")
return column_op(Column.__sub__)(self, other)
def __mul__(self, other) -> Union["Series", "Index"]:
if isinstance(other, str):
raise TypeError("multiplication can not be applied to a string literal.")
if (
isinstance(self.spark.data_type, IntegralType)
and isinstance(other, IndexOpsMixin)
and isinstance(other.spark.data_type, StringType)
):
return column_op(SF.repeat)(other, self)
if isinstance(self.spark.data_type, StringType):
if (
isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, IntegralType)
) or isinstance(other, int):
return column_op(SF.repeat)(self, other)
else:
raise TypeError(
"a string series can only be multiplied to an int series or literal"
)
return column_op(Column.__mul__)(self, other)
def __truediv__(self, other) -> Union["Series", "Index"]:
if (
isinstance(self.spark.data_type, StringType)
or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))
or isinstance(other, str)
):
raise TypeError("division can not be applied on string series or literals.")
def truediv(left, right):
return F.when(F.lit(right != 0) | F.lit(right).isNull(), left.__div__(right)).otherwise(
F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(
F.lit(np.inf).__div__(left)
)
)
return numpy_column_op(truediv)(self, other)
def __mod__(self, other) -> Union["Series", "Index"]:
if (
isinstance(self.spark.data_type, StringType)
or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))
or isinstance(other, str)
):
raise TypeError("modulo can not be applied on string series or literals.")
def mod(left, right):
return ((left % right) + right) % right
return column_op(mod)(self, other)
def __radd__(self, other) -> Union["Series", "Index"]:
if not isinstance(self.spark.data_type, StringType) and isinstance(other, str):
raise TypeError("string addition can only be applied to string series or literals.")
if isinstance(self.spark.data_type, StringType):
if isinstance(other, str):
return self._with_new_scol(F.concat(F.lit(other), self.spark.column))
else:
raise TypeError("string addition can only be applied to string series or literals.")
else:
return column_op(Column.__radd__)(self, other)
def __rsub__(self, other) -> Union["Series", "Index"]:
if isinstance(self.spark.data_type, StringType) or isinstance(other, str):
raise TypeError("substraction can not be applied to string series or literals.")
if isinstance(self.spark.data_type, TimestampType):
# behaviors. pandas returns 'timedelta64[ns]' from 'datetime64[ns]'s subtraction.
msg = (
"Note that there is a behavior difference of timestamp subtraction. "
"The timestamp subtraction returns an integer in seconds, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(other, datetime.datetime):
warnings.warn(msg, UserWarning)
return -(self.astype("long") - F.lit(other).cast(as_spark_type("long")))
else:
raise TypeError("datetime subtraction can only be applied to datetime series.")
elif isinstance(self.spark.data_type, DateType):
# Note that date subtraction casts arguments to integer. This is to mimic pandas's
msg = (
"Note that there is a behavior difference of date subtraction. "
"The date subtraction returns an integer in days, "
"whereas pandas returns 'timedelta64[ns]'."
)
if isinstance(other, datetime.date) and not isinstance(other, datetime.datetime):
warnings.warn(msg, UserWarning)
return -column_op(F.datediff)(self, F.lit(other)).astype("long")
else:
raise TypeError("date subtraction can only be applied to date series.")
return column_op(Column.__rsub__)(self, other)
def __rmul__(self, other) -> Union["Series", "Index"]:
if isinstance(other, str):
raise TypeError("multiplication can not be applied to a string literal.")
if isinstance(self.spark.data_type, StringType):
if isinstance(other, int):
return column_op(SF.repeat)(self, other)
else:
raise TypeError(
"a string series can only be multiplied to an int series or literal"
)
return column_op(Column.__rmul__)(self, other)
def __rtruediv__(self, other) -> Union["Series", "Index"]:
if isinstance(self.spark.data_type, StringType) or isinstance(other, str):
raise TypeError("division can not be applied on string series or literals.")
def rtruediv(left, right):
return F.when(left == 0, F.lit(np.inf).__div__(right)).otherwise(
F.lit(right).__truediv__(left)
)
return numpy_column_op(rtruediv)(self, other)
def __floordiv__(self, other) -> Union["Series", "Index"]:
if (
isinstance(self.spark.data_type, StringType)
or (isinstance(other, IndexOpsMixin) and isinstance(other.spark.data_type, StringType))
or isinstance(other, str)
):
raise TypeError("division can not be applied on string series or literals.")
def floordiv(left, right):
return F.when(F.lit(right is np.nan), np.nan).otherwise(
F.when(
F.lit(right != 0) | F.lit(right).isNull(), F.floor(left.__div__(right))
).otherwise(
F.when(F.lit(left == np.inf) | F.lit(left == -np.inf), left).otherwise(
F.lit(np.inf).__div__(left)
)
)
)
return numpy_column_op(floordiv)(self, other)
def __rfloordiv__(self, other) -> Union["Series", "Index"]:
if isinstance(self.spark.data_type, StringType) or isinstance(other, str):
raise TypeError("division can not be applied on string series or literals.")
def rfloordiv(left, right):
return F.when(F.lit(left == 0), F.lit(np.inf).__div__(right)).otherwise(
F.when(F.lit(left) == np.nan, np.nan).otherwise(F.floor(F.lit(right).__div__(left)))
)
return numpy_column_op(rfloordiv)(self, other)
def __rmod__(self, other) -> Union["Series", "Index"]:
if isinstance(self.spark.data_type, StringType) or isinstance(other, str):
raise TypeError("modulo can not be applied on string series or literals.")
def rmod(left, right):
return ((right % left) + left) % left
return column_op(rmod)(self, other)
__pow__ = column_op(Column.__pow__)
__rpow__ = column_op(Column.__rpow__)
__abs__ = column_op(F.abs)
# comparison operators
__eq__ = column_op(Column.__eq__)
__ne__ = column_op(Column.__ne__)
__lt__ = column_op(Column.__lt__)
__le__ = column_op(Column.__le__)
__ge__ = column_op(Column.__ge__)
__gt__ = column_op(Column.__gt__)
# `and`, `or`, `not` cannot be overloaded in Python,
# so use bitwise operators as boolean operators
__and__ = column_op(Column.__and__)
__or__ = column_op(Column.__or__)
__invert__ = column_op(Column.__invert__)
__rand__ = column_op(Column.__rand__)
__ror__ = column_op(Column.__ror__)
def __len__(self):
return len(self._kdf)
# NDArray Compat
def __array_ufunc__(self, ufunc: Callable, method: str, *inputs: Any, **kwargs: Any):
# Try dunder methods first.
result = numpy_compat.maybe_dispatch_ufunc_to_dunder_op(
self, ufunc, method, *inputs, **kwargs
)
# After that, we try with PySpark APIs.
if result is NotImplemented:
result = numpy_compat.maybe_dispatch_ufunc_to_spark_func(
self, ufunc, method, *inputs, **kwargs
)
if result is not NotImplemented:
return result
else:
# TODO: support more APIs?
raise NotImplementedError("Koalas objects currently do not support %s." % ufunc)
@property
def dtype(self) -> np.dtype:
return spark_type_to_pandas_dtype(self.spark.data_type)
@property
def empty(self) -> bool:
return self._internal.resolved_copy.spark_frame.rdd.isEmpty()
@property
def hasnans(self) -> bool:
sdf = self._internal.spark_frame
scol = self.spark.column
if isinstance(self.spark.data_type, (DoubleType, FloatType)):
return sdf.select(F.max(scol.isNull() | F.isnan(scol))).collect()[0][0]
else:
return sdf.select(F.max(scol.isNull())).collect()[0][0]
@property
def is_monotonic(self) -> bool:
return self._is_monotonic("increasing")
is_monotonic_increasing = is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
return self._is_monotonic("decreasing")
def _is_locally_monotonic_spark_column(self, order):
window = (
Window.partitionBy(F.col("__partition_id"))
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-1, -1)
)
if order == "increasing":
return (F.col("__origin") >= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
else:
return (F.col("__origin") <= F.lag(F.col("__origin"), 1).over(window)) & F.col(
"__origin"
).isNotNull()
def _is_monotonic(self, order):
assert order in ("increasing", "decreasing")
sdf = self._internal.spark_frame
sdf = (
sdf.select(
F.spark_partition_id().alias(
"__partition_id"
), # Make sure we use the same partition id in the whole job.
F.col(NATURAL_ORDER_COLUMN_NAME),
self.spark.column.alias("__origin"),
)
.select(
F.col("__partition_id"),
F.col("__origin"),
self._is_locally_monotonic_spark_column(order).alias(
"__comparison_within_partition"
),
)
.groupby(F.col("__partition_id"))
.agg(
F.min(F.col("__origin")).alias("__partition_min"),
F.max(F.col("__origin")).alias("__partition_max"),
F.min(F.coalesce(F.col("__comparison_within_partition"), F.lit(True))).alias(
"__comparison_within_partition"
),
)
)
# Now we're windowing the aggregation results without partition specification.
window = Window.orderBy(F.col("__partition_id")).rowsBetween(-1, -1)
if order == "increasing":
comparison_col = F.col("__partition_min") >= F.lag(F.col("__partition_max"), 1).over(
window
)
else:
comparison_col = F.col("__partition_min") <= F.lag(F.col("__partition_max"), 1).over(
window
)
sdf = sdf.select(
comparison_col.alias("__comparison_between_partitions"),
F.col("__comparison_within_partition"),
)
ret = sdf.select(
F.min(F.coalesce(F.col("__comparison_between_partitions"), F.lit(True)))
& F.min(F.coalesce(F.col("__comparison_within_partition"), F.lit(True)))
).collect()[0][0]
if ret is None:
return True
else:
return ret
@property
def ndim(self) -> int:
return 1
def astype(self, dtype) -> Union["Index", "Series"]:
spark_type = as_spark_type(dtype)
if not spark_type:
raise ValueError("Type {} not understood".format(dtype))
if isinstance(spark_type, BooleanType):
if isinstance(self.spark.data_type, StringType):
scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(
F.length(self.spark.column) > 0
)
elif isinstance(self.spark.data_type, (FloatType, DoubleType)):
scol = F.when(
self.spark.column.isNull() | F.isnan(self.spark.column), F.lit(True)
).otherwise(self.spark.column.cast(spark_type))
else:
scol = F.when(self.spark.column.isNull(), F.lit(False)).otherwise(
self.spark.column.cast(spark_type)
)
elif isinstance(spark_type, StringType):
scol = F.when(self.spark.column.isNull(), str(None)).otherwise(
self.spark.column.cast(spark_type)
)
else:
scol = self.spark.column.cast(spark_type)
return self._with_new_scol(scol)
def isin(self, values) -> Union["Series", "Index"]:
if not is_list_like(values):
raise TypeError(
"only list-like objects are allowed to be passed"
" to isin(), you passed a [{values_type}]".format(values_type=type(values).__name__)
)
return self._with_new_scol(self.spark.column.isin(list(values)))
def isnull(self) -> Union["Series", "Index"]:
from databricks.koalas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
if isinstance(self.spark.data_type, (FloatType, DoubleType)):
return self._with_new_scol(self.spark.column.isNull() | F.isnan(self.spark.column))
else:
return self._with_new_scol(self.spark.column.isNull())
isna = isnull
def notnull(self) -> Union["Series", "Index"]:
from databricks.koalas.indexes import MultiIndex
if isinstance(self, MultiIndex):
raise NotImplementedError("notna is not defined for MultiIndex")
return (~self.isnull()).rename(
self.name
)
notna = notnull
def all(self, axis: Union[int, str] = 0) -> bool:
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# any and every was added as of Spark 3.0
# ret = sdf.select(F.expr("every(CAST(`%s` AS BOOLEAN))" % sdf.columns[0])).collect()[0][0]
# Here we use min as its alternative:
ret = sdf.select(F.min(F.coalesce(col.cast("boolean"), F.lit(True)))).collect()[0][0]
if ret is None:
return True
else:
return ret
# TODO: axis, skipna, and many arguments should be implemented.
def any(self, axis: Union[int, str] = 0) -> bool:
axis = validate_axis(axis)
if axis != 0:
raise NotImplementedError('axis should be either 0 or "index" currently.')
sdf = self._internal.spark_frame.select(self.spark.column)
col = scol_for(sdf, sdf.columns[0])
# Note that we're ignoring `None`s here for now.
ret = sdf.select(F.max(F.coalesce(col.cast("boolean"), F.lit(False)))).collect()[0][0]
if ret is None:
return False
else:
return ret
def shift(self, periods=1, fill_value=None) -> Union["Series", "Index"]:
return self._shift(periods, fill_value)
def _shift(self, periods, fill_value, part_cols=()):
if not isinstance(periods, int):
raise ValueError("periods should be an int; however, got [%s]" % type(periods).__name__)
col = self.spark.column
window = (
Window.partitionBy(*part_cols)
.orderBy(NATURAL_ORDER_COLUMN_NAME)
.rowsBetween(-periods, -periods)
)
lag_col = F.lag(col, periods).over(window)
col = F.when(lag_col.isNull() | F.isnan(lag_col), fill_value).otherwise(lag_col)
return self._with_new_scol(col)
def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
) -> "Series":
from databricks.koalas.series import first_series
if bins is not None:
raise NotImplementedError("value_counts currently does not support bins")
if dropna:
sdf_dropna = self._internal.spark_frame.select(self.spark.column).dropna()
else:
sdf_dropna = self._internal.spark_frame.select(self.spark.column)
index_name = SPARK_DEFAULT_INDEX_NAME
column_name = self._internal.data_spark_column_names[0]
sdf = sdf_dropna.groupby(scol_for(sdf_dropna, column_name).alias(index_name)).count()
if sort:
if ascending:
sdf = sdf.orderBy(F.col("count"))
else:
sdf = sdf.orderBy(F.col("count").desc())
if normalize:
sum = sdf_dropna.count()
sdf = sdf.withColumn("count", F.col("count") / F.lit(sum))
internal = InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, index_name)],
column_labels=self._internal.column_labels,
data_spark_columns=[scol_for(sdf, "count")],
column_label_names=self._internal.column_label_names,
)
return first_series(DataFrame(internal))
def nunique(self, dropna: bool = True, approx: bool = False, rsd: float = 0.05) -> int:
res = self._internal.spark_frame.select([self._nunique(dropna, approx, rsd)])
return res.collect()[0][0]
def _nunique(self, dropna=True, approx=False, rsd=0.05):
colname = self._internal.data_spark_column_names[0]
count_fn = partial(F.approx_count_distinct, rsd=rsd) if approx else F.countDistinct
if dropna:
return count_fn(self.spark.column).alias(colname)
else:
return (
count_fn(self.spark.column)
+ F.when(
F.count(F.when(self.spark.column.isNull(), 1).otherwise(None)) >= 1, 1
).otherwise(0)
).alias(colname)
def take(self, indices) -> Union["Series", "Index"]:
if not is_list_like(indices) or isinstance(indices, (dict, set)):
raise ValueError("`indices` must be a list-like except dict or set")
if isinstance(self, ks.Series):
return cast(ks.Series, self.iloc[indices])
else:
return self._kdf.iloc[indices].index
| true | true |
f735c8f12c8094d4f4ec79448a80403fe86acc03 | 10,531 | py | Python | main.py | abhi526691/Covid-Guard | 9c050ef44201c01f512169ffb146ad0da5278ec1 | [
"MIT"
] | 5 | 2020-08-26T18:39:29.000Z | 2020-11-16T13:05:11.000Z | main.py | abhi526691/Covid-Guard | 9c050ef44201c01f512169ffb146ad0da5278ec1 | [
"MIT"
] | null | null | null | main.py | abhi526691/Covid-Guard | 9c050ef44201c01f512169ffb146ad0da5278ec1 | [
"MIT"
] | 2 | 2020-10-07T15:12:51.000Z | 2020-10-22T08:49:13.000Z | # import the necessary packages
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.models import load_model
from imutils.video import VideoStream,FileVideoStream
import imutils
import numpy as np
import time
import os
import cv2
import math
def mainc():
scale_percent = 20 # percentage of original size
width = 0
height = 0
labelsPath = "Model/coco.names" #path for model
LABELS = open(labelsPath).read().strip().split("\n")
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
weightsPath = "Model/yolov3.weights" #path for yolov3 weights
configPath = "Model/yolov3.cfg" #path for yolov3 configuration file
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Could not open webcam")
exit()
else: #get dimension info
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
dim = (width, height)
print('Original Dimensions : ',dim)
width = int(width * scale_percent / 100)
height = int(height * scale_percent / 100)
dim = (width, height)
print('Resized Dimensions : ', dim)
def detect_and_predict_mask(frame, faceNet, maskNet):
# grab the dimensions of the frame and then construct a blob from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > 0.5:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
base_dir=os.getcwd()
base_dir=base_dir.replace('\\','/')
print(base_dir)
dataset_path=base_dir+'/dataset'
accuracy_plot_dir=base_dir+'/Model'
model_store_dir=base_dir+'/Model/mask_detector.model'
example=base_dir+'/Image/1.jpg'
confidence=0.4
face_detector_caffe=base_dir+'/Face Detector/res10_300x300_ssd_iter_140000.caffemodel'
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = base_dir+'/Face Detector/deploy.prototxt'
weightsPath = face_detector_caffe
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
maskNet = load_model(model_store_dir)
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
#time.sleep(2.0)
# loop over the frames from the video stream
iter=0
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=1200)
resized = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
(H, W) = frame.shape[:2]
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (224, 224), swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# print("Frame Prediction Time : {:.6f} seconds".format(end - start))
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > 0.1 and classID == 0:
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
if iter % 3 == 0:
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)
ind = []
for i in range(0, len(classIDs)):
if (classIDs[i] == 0):
ind.append(i)
a = []
b = []
if len(idxs) > 0:
for i in idxs.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
a.append(x)
b.append(y)
distance = []
nsd = []
for i in range(0, len(a) - 1):
for k in range(1, len(a)):
if (k == i):
break
else:
x_dist = (a[k] - a[i])
y_dist = (b[k] - b[i])
d = math.sqrt(x_dist * x_dist + y_dist * y_dist)
distance.append(d)
if (d <= 6912):
nsd.append(i)
nsd.append(k)
nsd = list(dict.fromkeys(nsd))
# print(nsd)
color = (0, 0, 255)
for i in nsd:
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "Alert"
cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
color = (0, 255, 0)
if len(idxs) > 0:
for i in idxs.flatten():
if (i in nsd):
break
else:
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = 'OK'
cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
text = "Social Distancing Violators: {}".format(len(nsd))
cv2.putText(frame, text, (660, frame.shape[0] - 45),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)
cv2.putText(frame, "Covid Guard: Team TrojanWave", (140, 45),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.rectangle(frame, (20, 60), (1170, 100), (170, 170, 170), 2)
cv2.putText(frame, "COLOR CODE: RISK ANALYSIS", (30, 85),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
cv2.putText(frame, "--- GREEN : SAFE", (500, 85),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
cv2.putText(frame, "--- RED: UNSAFE", (1000, 85),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
tot_str = "TOTAL: " + str(len(idxs))
high_str = "HIGH RISK: " + str(len(nsd))
low_str = "LOW RISK: " + str(0)
safe_str = "SAFE: " + str(len(idxs)-len(nsd))
sub_img = frame[H - 270: H , 0:240]
black_rect = np.ones(sub_img.shape, dtype=np.uint8) * 0
res = cv2.addWeighted(sub_img, 0.8, black_rect, 0.2, 1.0)
frame[H - 270:H, 0:240] = res
cv2.putText(frame, tot_str, (10, H - 235),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
cv2.putText(frame, safe_str, (10, H - 200),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.putText(frame, low_str, (10, H - 165),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 120, 255), 2)
cv2.putText(frame, high_str, (10, H - 130),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 150), 2)
#cv2.imshow("Social Distancing Detector", frame)
cv2.rectangle(frame, (10, H-100 ), (600, H-10), (170, 170, 170), 2)
cv2.putText(frame, "COLOR CODE: MASK DETECTION", (40, H-40),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 0), 2)
cv2.putText(frame, "--- RED : NO MASK", (420, H-70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.putText(frame, "--- GREEN : MASK", (420, H-35),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# cv2.putText(frame, "-- GREEN: SAFE", (565, 150),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
# show the output frame
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('frame', frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
| 32.603715 | 88 | 0.628145 |
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.models import load_model
from imutils.video import VideoStream,FileVideoStream
import imutils
import numpy as np
import time
import os
import cv2
import math
def mainc():
scale_percent = 20
width = 0
height = 0
labelsPath = "Model/coco.names"
LABELS = open(labelsPath).read().strip().split("\n")
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
weightsPath = "Model/yolov3.weights"
configPath = "Model/yolov3.cfg"
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Could not open webcam")
exit()
else:
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
dim = (width, height)
print('Original Dimensions : ',dim)
width = int(width * scale_percent / 100)
height = int(height * scale_percent / 100)
dim = (width, height)
print('Resized Dimensions : ', dim)
def detect_and_predict_mask(frame, faceNet, maskNet):
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
faceNet.setInput(blob)
detections = faceNet.forward()
faces = []
locs = []
preds = []
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
faces.append(face)
locs.append((startX, startY, endX, endY))
if len(faces) > 0:
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
base_dir=os.getcwd()
base_dir=base_dir.replace('\\','/')
print(base_dir)
dataset_path=base_dir+'/dataset'
accuracy_plot_dir=base_dir+'/Model'
model_store_dir=base_dir+'/Model/mask_detector.model'
example=base_dir+'/Image/1.jpg'
confidence=0.4
face_detector_caffe=base_dir+'/Face Detector/res10_300x300_ssd_iter_140000.caffemodel'
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = base_dir+'/Face Detector/deploy.prototxt'
weightsPath = face_detector_caffe
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
maskNet = load_model(model_store_dir)
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
#time.sleep(2.0)
# loop over the frames from the video stream
iter=0
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=1200)
resized = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
(H, W) = frame.shape[:2]
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (224, 224), swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# print("Frame Prediction Time : {:.6f} seconds".format(end - start))
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > 0.1 and classID == 0:
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
if iter % 3 == 0:
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3)
ind = []
for i in range(0, len(classIDs)):
if (classIDs[i] == 0):
ind.append(i)
a = []
b = []
if len(idxs) > 0:
for i in idxs.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
a.append(x)
b.append(y)
distance = []
nsd = []
for i in range(0, len(a) - 1):
for k in range(1, len(a)):
if (k == i):
break
else:
x_dist = (a[k] - a[i])
y_dist = (b[k] - b[i])
d = math.sqrt(x_dist * x_dist + y_dist * y_dist)
distance.append(d)
if (d <= 6912):
nsd.append(i)
nsd.append(k)
nsd = list(dict.fromkeys(nsd))
# print(nsd)
color = (0, 0, 255)
for i in nsd:
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "Alert"
cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
color = (0, 255, 0)
if len(idxs) > 0:
for i in idxs.flatten():
if (i in nsd):
break
else:
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = 'OK'
cv2.putText(frame, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
text = "Social Distancing Violators: {}".format(len(nsd))
cv2.putText(frame, text, (660, frame.shape[0] - 45),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 4)
cv2.putText(frame, "Covid Guard: Team TrojanWave", (140, 45),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
cv2.rectangle(frame, (20, 60), (1170, 100), (170, 170, 170), 2)
cv2.putText(frame, "COLOR CODE: RISK ANALYSIS", (30, 85),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
cv2.putText(frame, "--- GREEN : SAFE", (500, 85),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
cv2.putText(frame, "--- RED: UNSAFE", (1000, 85),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
tot_str = "TOTAL: " + str(len(idxs))
high_str = "HIGH RISK: " + str(len(nsd))
low_str = "LOW RISK: " + str(0)
safe_str = "SAFE: " + str(len(idxs)-len(nsd))
sub_img = frame[H - 270: H , 0:240]
black_rect = np.ones(sub_img.shape, dtype=np.uint8) * 0
res = cv2.addWeighted(sub_img, 0.8, black_rect, 0.2, 1.0)
frame[H - 270:H, 0:240] = res
cv2.putText(frame, tot_str, (10, H - 235),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2)
cv2.putText(frame, safe_str, (10, H - 200),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.putText(frame, low_str, (10, H - 165),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 120, 255), 2)
cv2.putText(frame, high_str, (10, H - 130),
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 150), 2)
#cv2.imshow("Social Distancing Detector", frame)
cv2.rectangle(frame, (10, H-100 ), (600, H-10), (170, 170, 170), 2)
cv2.putText(frame, "COLOR CODE: MASK DETECTION", (40, H-40),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 0), 2)
cv2.putText(frame, "--- RED : NO MASK", (420, H-70),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.putText(frame, "--- GREEN : MASK", (420, H-35),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# cv2.putText(frame, "-- GREEN: SAFE", (565, 150),
# cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
cv2.putText(frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('frame', frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
| true | true |
f735c99cac1d4b91c2534863c9a3f45ff76fd782 | 2,618 | py | Python | day16-part1/main.py | KSonny4/advent-of-code-2021 | ca4cf2e94799174468950019a562ed9063001599 | [
"MIT"
] | 4 | 2021-12-02T21:17:43.000Z | 2021-12-05T22:15:28.000Z | day16-part1/main.py | KSonny4/advent-of-code-2021 | ca4cf2e94799174468950019a562ed9063001599 | [
"MIT"
] | null | null | null | day16-part1/main.py | KSonny4/advent-of-code-2021 | ca4cf2e94799174468950019a562ed9063001599 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from enum import Enum
from typing import List, Tuple, Union, Any
VERSIONS_SUM = 0
class TypeID(Enum):
LITERAL_VALUE = 4
@dataclass
class LiteralValue:
packet_version: int
packet_type_id: int
parts: List[str]
decimal: int
@dataclass
class OperatorPacket:
packet_version: int
packet_type_id: int
length_type_id: int
length: int
packets: List[Any]
def parse_literal_value_recursive(bits: str, vals: List[str]) -> Tuple[List[str], str]:
if bits[0] == "1":
return parse_literal_value_recursive(bits[5:], vals + [bits[1:5]])
vals += [bits[1:5]]
return vals, bits[5:]
def parse_literal_value(bits: str) -> Tuple[LiteralValue, str]:
global VERSIONS_SUM
version = int(bits[0:3], 2)
VERSIONS_SUM += version
type_id = int(bits[3:6], 2)
bits_without_header = bits[6:]
parsed_literal_value, rest_bites = parse_literal_value_recursive(
bits_without_header, []
)
return (
LiteralValue(
packet_version=version,
packet_type_id=type_id,
parts=parsed_literal_value,
decimal=int("".join(parsed_literal_value), 2),
),
rest_bites,
)
def parse_operator(bits):
global VERSIONS_SUM
version = int(bits[0:3], 2)
type_id = int(bits[3:6], 2)
VERSIONS_SUM += version
length = -1
new_bits = ""
if bits[6] == "0":
length = int(bits[7:22], 2)
new_bits = bits[22:]
if bits[6] == "1":
length = int(bits[7:18], 2)
new_bits = bits[18:]
if length == -1:
raise Exception("Could not parse length properly.")
parsed_packet = parse_packet(new_bits, [])
return (
OperatorPacket(
packet_version=version,
packet_type_id=type_id,
length_type_id=int(bits[6]),
length=length,
packets=parsed_packet,
),
"",
)
def parse_packet(bits, values):
if len(bits) < 4 or set(x for x in bits) == {"0"}:
return values
type_id = int(bits[3:6], 2)
if type_id == TypeID.LITERAL_VALUE.value:
parsed_bites, rest = parse_literal_value(bits)
else:
parsed_bites, rest = parse_operator(bits)
return parse_packet(rest, values + [parsed_bites])
def main():
with open("input.txt", encoding="utf-8") as f:
packet = f.read().strip()
print(packet)
bits = bin(int("1" + packet, 16))[3:]
parsed_bites = parse_packet(bits, [])
print(parsed_bites)
print(VERSIONS_SUM)
if __name__ == "__main__":
main()
| 21.459016 | 87 | 0.607716 | from dataclasses import dataclass
from enum import Enum
from typing import List, Tuple, Union, Any
VERSIONS_SUM = 0
class TypeID(Enum):
LITERAL_VALUE = 4
@dataclass
class LiteralValue:
packet_version: int
packet_type_id: int
parts: List[str]
decimal: int
@dataclass
class OperatorPacket:
packet_version: int
packet_type_id: int
length_type_id: int
length: int
packets: List[Any]
def parse_literal_value_recursive(bits: str, vals: List[str]) -> Tuple[List[str], str]:
if bits[0] == "1":
return parse_literal_value_recursive(bits[5:], vals + [bits[1:5]])
vals += [bits[1:5]]
return vals, bits[5:]
def parse_literal_value(bits: str) -> Tuple[LiteralValue, str]:
global VERSIONS_SUM
version = int(bits[0:3], 2)
VERSIONS_SUM += version
type_id = int(bits[3:6], 2)
bits_without_header = bits[6:]
parsed_literal_value, rest_bites = parse_literal_value_recursive(
bits_without_header, []
)
return (
LiteralValue(
packet_version=version,
packet_type_id=type_id,
parts=parsed_literal_value,
decimal=int("".join(parsed_literal_value), 2),
),
rest_bites,
)
def parse_operator(bits):
global VERSIONS_SUM
version = int(bits[0:3], 2)
type_id = int(bits[3:6], 2)
VERSIONS_SUM += version
length = -1
new_bits = ""
if bits[6] == "0":
length = int(bits[7:22], 2)
new_bits = bits[22:]
if bits[6] == "1":
length = int(bits[7:18], 2)
new_bits = bits[18:]
if length == -1:
raise Exception("Could not parse length properly.")
parsed_packet = parse_packet(new_bits, [])
return (
OperatorPacket(
packet_version=version,
packet_type_id=type_id,
length_type_id=int(bits[6]),
length=length,
packets=parsed_packet,
),
"",
)
def parse_packet(bits, values):
if len(bits) < 4 or set(x for x in bits) == {"0"}:
return values
type_id = int(bits[3:6], 2)
if type_id == TypeID.LITERAL_VALUE.value:
parsed_bites, rest = parse_literal_value(bits)
else:
parsed_bites, rest = parse_operator(bits)
return parse_packet(rest, values + [parsed_bites])
def main():
with open("input.txt", encoding="utf-8") as f:
packet = f.read().strip()
print(packet)
bits = bin(int("1" + packet, 16))[3:]
parsed_bites = parse_packet(bits, [])
print(parsed_bites)
print(VERSIONS_SUM)
if __name__ == "__main__":
main()
| true | true |
f735ca903d6e0f2818ec50f7707d4002984535b4 | 3,119 | py | Python | strings/string_search/z_algorithm/python/Z_algorithm.py | avi-pal/al-go-rithms | 5167a20f1db7b366ff19f2962c1746a02e4f5067 | [
"CC0-1.0"
] | 1,253 | 2017-06-06T07:19:25.000Z | 2022-03-30T17:07:58.000Z | strings/string_search/z_algorithm/python/Z_algorithm.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 554 | 2017-09-29T18:56:01.000Z | 2022-02-21T15:48:13.000Z | strings/string_search/z_algorithm/python/Z_algorithm.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 2,226 | 2017-09-29T19:59:59.000Z | 2022-03-25T08:59:55.000Z | #!/bin/python3
import sys, string
from random import *
from timeit import default_timer as timer
def randstr(N,alphabet=string.ascii_lowercase):
l=len(alphabet)
return "".join( alphabet[randint(0,l-1)] for _ in range(N))
def timefunc(func, *args, **kwargs):
"""Time a function.
args:
iterations=1
Usage example:
timeit(myfunc, 1, b=2)
"""
try:
iterations = kwargs.pop('iterations')
except KeyError:
iterations = 1
elapsed = sys.maxsize
start = timer()
for _ in range(iterations):
result = func(*args, **kwargs)
elapsed = (timer() - start)/iterations
print(('{}() : {:.9f}'.format(func.__name__, elapsed)))
return result
#res=timefunc( searchKMP, S, P, iterations=rep)
#if(res!=res0): print("Wrong")
def getZ_naive(S):
N=len(S)
Z=[0]*N
for i in range(1,N):
k=0
while( i+k<N and S[i+k]==S[k]):
k+=1
Z[i]=k
return Z
#int L = 0, R = 0;
#for (int i = 1; i < n; i++) {
# if (i > R) {
# L = R = i;
# while (R < n && s[R-L] == s[R]) R++;
# z[i] = R-L; R--;
# } else {
# int k = i-L;
# if (z[k] < R-i+1) z[i] = z[k];
# else {
# L = i;
# while (R < n && s[R-L] == s[R]) R++;
# z[i] = R-L; R--;
# }
# }
#}
def getZ_0(S):
N=len(S)
Z=[0]*N
L,R=0,0
for i in range(1,N):
if i>R:
L=R=i
while R<N and S[R-L]==S[R]:
R+=1
Z[i]=R-L
R-=1
else:
k=i-L
if Z[k]<R-i+1:
Z[i]=Z[k]
else:
L=i
while R<N and S[R-L]==S[R]:
R+=1
Z[i]=R-L
R-=1
return Z
#from rookie rank 4, dna
#not optimal....
def getZ_1(S):
N=len(S)
Z=[0]*N
L,R=-1,-1
for i in range(N):
if i<R:
Z[i]=min(R-i, Z[i-L])
while i+Z[i]<N and S[Z[i]]==S[i+Z[i]]:
Z[i]+=1
if i+Z[i]>R:
L=i
R=i+Z[i]
Z[0]=0 #due to it=N as result
return Z
#void z_func(string s){
# int n = s.length();
# int z[n];
# z[0] = 0;
#
# for (int i = 1, l = 0, r = 1; i < n; i++, r = i < r ? r : i)
# for (z[i] = min(r - i, z[i - l]); i + z[i]<n && s[i + z[i]] == s[z[i]]; z[i]++, r = i + z[i], l = i);
#
# for (int i = 0; i < n; i++)
# cout << z[i] << " ";
#}
def getZ_2(S):
N=len(S)
Z=[0]*N
i=1
L=0
R=1
while i<N:
Z[i]=min(R-i,Z[i-L])
while i+Z[i]<N and S[i+Z[i]]==S[Z[i]]:
Z[i]+=1
R=i+Z[i]
L=i
i+=1
if i>=R:
R=i
return Z
if __name__ == "__main__":
alpha="AB"
#alpha=string.ascii_lowercase
S = randstr(30,alphabet=alpha)
#S=['A']*10000
rep=1
print(S)
res0=timefunc( getZ_naive, S, iterations=rep)
print(res0)
res=timefunc( getZ_0, S, iterations=rep)
print(res0==res)
res=timefunc( getZ_1, S, iterations=rep)
print(res0==res)
res=timefunc( getZ_2, S, iterations=rep)
print(res0==res)
| 20.385621 | 110 | 0.436358 |
import sys, string
from random import *
from timeit import default_timer as timer
def randstr(N,alphabet=string.ascii_lowercase):
l=len(alphabet)
return "".join( alphabet[randint(0,l-1)] for _ in range(N))
def timefunc(func, *args, **kwargs):
try:
iterations = kwargs.pop('iterations')
except KeyError:
iterations = 1
elapsed = sys.maxsize
start = timer()
for _ in range(iterations):
result = func(*args, **kwargs)
elapsed = (timer() - start)/iterations
print(('{}() : {:.9f}'.format(func.__name__, elapsed)))
return result
def getZ_naive(S):
N=len(S)
Z=[0]*N
for i in range(1,N):
k=0
while( i+k<N and S[i+k]==S[k]):
k+=1
Z[i]=k
return Z
def getZ_0(S):
N=len(S)
Z=[0]*N
L,R=0,0
for i in range(1,N):
if i>R:
L=R=i
while R<N and S[R-L]==S[R]:
R+=1
Z[i]=R-L
R-=1
else:
k=i-L
if Z[k]<R-i+1:
Z[i]=Z[k]
else:
L=i
while R<N and S[R-L]==S[R]:
R+=1
Z[i]=R-L
R-=1
return Z
def getZ_1(S):
N=len(S)
Z=[0]*N
L,R=-1,-1
for i in range(N):
if i<R:
Z[i]=min(R-i, Z[i-L])
while i+Z[i]<N and S[Z[i]]==S[i+Z[i]]:
Z[i]+=1
if i+Z[i]>R:
L=i
R=i+Z[i]
Z[0]=0
return Z
def getZ_2(S):
N=len(S)
Z=[0]*N
i=1
L=0
R=1
while i<N:
Z[i]=min(R-i,Z[i-L])
while i+Z[i]<N and S[i+Z[i]]==S[Z[i]]:
Z[i]+=1
R=i+Z[i]
L=i
i+=1
if i>=R:
R=i
return Z
if __name__ == "__main__":
alpha="AB"
S = randstr(30,alphabet=alpha)
rep=1
print(S)
res0=timefunc( getZ_naive, S, iterations=rep)
print(res0)
res=timefunc( getZ_0, S, iterations=rep)
print(res0==res)
res=timefunc( getZ_1, S, iterations=rep)
print(res0==res)
res=timefunc( getZ_2, S, iterations=rep)
print(res0==res)
| true | true |
f735caac43d63be3a59c8ea26a0d53fa7897631d | 131,132 | py | Python | pyNastran/op2/tables/geom/ept.py | fmamitrotta/pyNastran | 6f9592cf3a2ccb8c509918acb735282d6eef75aa | [
"BSD-3-Clause"
] | 293 | 2015-03-22T20:22:01.000Z | 2022-03-14T20:28:24.000Z | pyNastran/op2/tables/geom/ept.py | sean-engelstad/pyNastran | 90f957887a4f68f8e58b07c15e1ac69c66b9c6f4 | [
"BSD-3-Clause"
] | 512 | 2015-03-14T18:39:27.000Z | 2022-03-31T16:15:43.000Z | pyNastran/op2/tables/geom/ept.py | sean-engelstad/pyNastran | 90f957887a4f68f8e58b07c15e1ac69c66b9c6f4 | [
"BSD-3-Clause"
] | 136 | 2015-03-19T03:26:06.000Z | 2022-03-25T22:14:54.000Z | """
defines readers for BDF objects in the OP2 EPT/EPTS table
"""
#pylint: disable=C0103,R0914
from __future__ import annotations
from struct import unpack, Struct
from functools import partial
from typing import Tuple, List, TYPE_CHECKING
import numpy as np
#from pyNastran import is_release
from pyNastran.bdf.cards.properties.mass import PMASS, NSM, NSML
from pyNastran.bdf.cards.properties.bars import PBAR, PBARL, PBEND, PBEAM3
from pyNastran.bdf.cards.properties.beam import PBEAM, PBEAML, PBCOMP
from pyNastran.bdf.cards.properties.bush import PBUSH, PBUSHT
from pyNastran.bdf.cards.properties.damper import PDAMP, PVISC
from pyNastran.bdf.cards.properties.properties import PFAST, PGAP
from pyNastran.bdf.cards.properties.rods import PROD, PTUBE
from pyNastran.bdf.cards.properties.shell import PSHEAR, PSHELL, PCOMP
from pyNastran.bdf.cards.properties.solid import PSOLID
from pyNastran.bdf.cards.properties.springs import PELAS, PELAST
from pyNastran.bdf.cards.thermal.thermal import PCONV, PHBDY, PCONVM
# PCOMPG, PBUSH1D, PBEAML, PBEAM3
from pyNastran.op2.op2_interface.op2_reader import (
mapfmt, reshape_bytes_block_size) # reshape_bytes_block,
from .utils import get_minus1_start_end
from .geom2 import DoubleCardError
if TYPE_CHECKING: # pragma: no cover
from pyNastran.op2.op2_geom import OP2Geom
class EPT:
"""defines methods for reading op2 properties"""
@property
def size(self) -> int:
return self.op2.size
@property
def factor(self) -> int:
return self.op2.factor
def _read_fake(self, data: bytes, n: int) -> int:
return self.op2._read_fake(data, n)
def read_ept_4(self, data: bytes, ndata: int):
return self.op2._read_geom_4(self.ept_map, data, ndata)
def __init__(self, op2: OP2Geom):
self.op2 = op2
self.ept_map = {
(3201, 32, 55): ['NSM', self._read_nsm], # record 2
(52, 20, 181): ['PBAR', self._read_pbar], # record 11 - buggy
(9102, 91, 52): ['PBARL', self._read_pbarl], # record 12 - almost there...
(2706, 27, 287): ['PCOMP', self._read_pcomp], # record 22 - buggy
(302, 3, 46): ['PELAS', self._read_pelas], # record 39
(2102, 21, 121): ['PGAP', self._read_pgap], # record 42
(902, 9, 29): ['PROD', self._read_prod], # record 49
(1002, 10, 42): ['PSHEAR', self._read_pshear], # record 50
(2402, 24, 281): ['PSOLID', self._read_psolid], # record 51
(2302, 23, 283): ['PSHELL', self._read_pshell], # record 52
(1602, 16, 30): ['PTUBE', self._read_ptube], # record 56
(5402, 54, 262): ['PBEAM', self._read_pbeam], # record 14 - not done
(9202, 92, 53): ['PBEAML', self._read_pbeaml], # record 15
(2502, 25, 248): ['PBEND', self._read_pbend], # record 16 - not done
(1402, 14, 37): ['PBUSH', self._read_pbush], # record 19 - not done
(3101, 31, 219): ['PBUSH1D', self._read_pbush1d], # record 20 - not done
(152, 19, 147): ['PCONEAX', self._read_pconeax], # record 24 - not done
(11001, 110, 411): ['PCONV', self._read_pconv], # record 25 - not done
# record 26
(202, 2, 45): ['PDAMP', self._read_pdamp], # record 27 - not done
(2802, 28, 236): ['PHBDY', self._read_phbdy], # record 43 - not done
(402, 4, 44): ['PMASS', self._read_pmass], # record 48
(1802, 18, 31): ['PVISC', self._read_pvisc], # record 59
(10201, 102, 400): ['PVAL', self._read_pval], # record 58 - not done
(2606, 26, 289): ['VIEW', self._read_view], # record 62 - not done
(3201, 32, 991) : ['NSM', self._read_nsm_2], # record
(3301, 33, 992) : ['NSM1', self._read_nsm1], # record
(3701, 37, 995) : ['NSML1', self._read_nsml1_nx], # record
(3601, 36, 62): ['NSML1', self._read_nsml1_msc], # record 7
(15006, 150, 604): ['PCOMPG', self._read_pcompg], # record
(702, 7, 38): ['PBUSHT', self._read_pbusht], # record 1
(3301, 33, 56): ['NSM1', self._read_fake], # record 3
(3401, 34, 57) : ['NSMADD', self._read_fake], # record 5
(3501, 35, 58): ['NSML', self._read_fake], # record 6
(3501, 35, 994) : ['NSML', self._read_nsml],
(1502, 15, 36): ['PAABSF', self._read_fake], # record 8
(8300, 83, 382): ['PACABS', self._read_fake], # record 9
(8500, 85, 384): ['PACBAR', self._read_fake], # record 10
(5403, 55, 349): ['PBCOMP', self._read_pbcomp], # record 13
(13301, 133, 509): ['PBMSECT', self._read_fake], # record 17
(2902, 29, 420): ['PCONVM', self._read_pconvm], # record 26
(1202, 12, 33): ['PDAMPT', self._read_pdampt], # record 28
(8702, 87, 412): ['PDAMP5', self._read_pdamp5], # record 29
(6802, 68, 164): ['PDUM8', self._read_fake], # record 37
(6902, 69, 165): ['PDUM9', self._read_fake], # record 38
(1302, 13, 34): ['PELAST', self._read_pelast], # record 41
(12001, 120, 480): ['PINTC', self._read_fake], # record 44
(12101, 121, 484): ['PINTS', self._read_fake], # record 45
(4606, 46, 375): ['PLPLANE', self._read_plplane], # record 46
(4706, 47, 376): ['PLSOLID', self._read_plsolid], # record 47
(10301, 103, 399): ['PSET', self._read_pset], # record 57
(3002, 30, 415): ['VIEW3D', self._read_fake], # record 63
(13501, 135, 510) : ['PFAST', self._read_pfast_msc], # MSC-specific
(3601, 36, 55) : ['PFAST', self._read_pfast_nx], # NX-specific
(3801, 38, 979) : ['PPLANE', self._read_pplane],
(11801, 118, 560) : ['PWELD', self._read_fake],
(3401, 34, 993) : ['NSMADD', self._read_nsmadd],
(9300, 93, 684) : ['ELAR', self._read_fake],
(9400, 94, 685) : ['ELAR2', self._read_fake],
(16006, 160, 903) : ['PCOMPS', self._read_fake],
# MSC-specific
(14602, 146, 692): ['PSLDN1', self._read_fake],
(16502, 165, 916): ['PAXSYMH', self._read_fake],
(13201, 132, 513): ['PBRSECT', self._read_fake],
(13701, 137, 638): ['PWSEAM', self._read_fake],
(7001, 70, 632): ['???', self._read_fake],
(15106, 151, 953): ['PCOMPG1', self._read_fake],
(3901, 39, 969): ['PSHL3D', self._read_fake],
(17006, 170, 901): ['MATCID', self._read_fake],
(9601, 96, 691): ['PJOINT', self._read_fake],
(16502, 165, 916): ['???', self._read_fake],
(9701, 97, 692): ['PJOINT2', self._read_fake],
(13401, 134, 611): ['PBEAM3', self._read_pbeam3],
(8901, 89, 905): ['PSOLCZ', self._read_fake],
(9801, 98, 698): ['DESC', self._read_desc],
#(9701, 97, 692): ['???', self._read_fake],
#(9701, 97, 692): ['???', self._read_fake],
#(9701, 97, 692): ['???', self._read_fake],
}
def _add_op2_property(self, prop):
"""helper method for op2"""
op2 = self.op2
#if prop.pid > 100000000:
#raise RuntimeError('bad parsing; pid > 100000000...%s' % str(prop))
#print(str(prop)[:-1])
ntables = op2.table_names.count(b'EPT') + op2.table_names.count(b'EPTS')
pid = prop.pid
allow_overwrites = (
ntables > 1 and
pid in op2.properties and
op2.properties[pid].type == prop.type)
op2._add_methods._add_property_object(prop, allow_overwrites=allow_overwrites)
def _add_op2_property_mass(self, prop):
"""helper method for op2"""
op2 = self.op2
#if prop.pid > 100000000:
#raise RuntimeError('bad parsing; pid > 100000000...%s' % str(prop))
#print(str(prop)[:-1])
ntables = op2.table_names.count(b'EPT') + op2.table_names.count(b'EPTS')
pid = prop.pid
allow_overwrites = (
ntables > 1 and
pid in op2.properties_mass and
op2.properties_mass[pid].type == prop.type)
op2._add_methods._add_property_mass_object(prop, allow_overwrites=allow_overwrites)
def _add_pconv(self, prop: PCONV) -> None:
if prop.pconid > 100000000:
raise RuntimeError('bad parsing pconid > 100000000...%s' % str(prop))
self.op2._add_methods._add_convection_property_object(prop)
# HGSUPPR
def _read_desc(self, data: bytes, n: int) -> int:
"""
RECORD – DESC(9801,98,698)
Word Name Type Description
1 DID I Description identification number
2 NWORDS I Number of words for the description string
3 DESC CHAR4 Description
Words 3 repeats NWORDS times
data = (1, 14, 'FACE CONTACT(1) ')
"""
op2 = self.op2
assert self.size == 4, 'DESC size={self.size} is not supported'
#op2.show_data(data[n:], types='ifs')
struct_2i = Struct(op2._endian + b'2i')
while n < len(data):
datai = data[n:n+8]
desc_id, nwords = struct_2i.unpack(datai)
#print(desc_id, nwords)
ndatai = 8 + nwords * 4
word_bytes = data[n+8:n+ndatai]
word = word_bytes.decode('ascii').rstrip()
assert len(word_bytes) == nwords * 4
#print('word_bytes =', word_bytes)
op2.log.warning(f'geom skipping DESC={desc_id}: {word!r}')
n += ndatai
assert n == len(data), n
return n
def _read_nsml(self, data: bytes, n: int) -> int:
"""
NX 2019.2
RECORD – NSML(3501, 35, 994)
Defines a set of lumped nonstructural mass by ID.
Word Name Type Description
1 SID I Set identification number
2 PROP(2) CHAR4 Set of properties or elements
4 ID I Property of element identification number
5 VALUE RS Lumped nonstructural mass value
Words 4 and 5 repeat until -1 occurs
ints = (3, ELEMENT, 0, 200, 0.7, -1, 4, PSHELL, 0, 6401, 4.2, -1)
floats = (3, ELEMENT, 0.0, 200, 0.7, -1, 4, PSHELL, 0.0, 6401, 4.2, -1)
"""
op2 = self.op2
n0 = n
#op2.show_data(data[n:])
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
size = self.size
for (i0, i1) in zip(istart, iend):
#data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)
assert ints[i1] == -1, ints[i1]
sid = ints[i0]
prop_bytes = data[n0+(i0+1)*size:n0+(i0+3)*size]
#print(sid, prop_bytes)
ids = ints[i0+4:i1:2].tolist()
values = floats[i0+5:i1:2].tolist()
#print(ids, values)
assert len(ids) == len(values)
nsm_type = prop_bytes.decode('latin1').rstrip()
nsml = op2.add_nsml(sid, nsm_type, ids, values)
#print(nsml)
str(nsml)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSML'] = ncards
return n
def _read_nsmadd(self, data: bytes, n: int) -> int:
"""
NX 2019.2
(3401, 34, 993)
RECORD – NSMADD(3401,34,993)
Combines the nonstructural mass inputs.
Word Name Type Description
1 SID I Set identification number
2 ID I Set of properties or elements
Word 2 repeats until End of Record
(1, 2, 3, 4, -1)
"""
op2 = self.op2
ints = np.frombuffer(data[n:], op2.idtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
istart = [0] + list(iend + 1)
size = self.size
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
sid, *nsms = ints[i0:i1]
nsmadd = op2.add_nsmadd(sid, nsms)
#print(nsmadd)
str(nsmadd)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSMADD'] = ncards
return n
def _read_nsml1_nx(self, data: bytes, n: int) -> int:
"""
NSML1(3701, 37, 995)
Alternate form of NSML entry. Defines lumped nonstructural mass entries by VALUE, ID list.
Word Name Type Description
1 SID I Set identification number
2 PROP CHAR4 Set of properties
3 TYPE CHAR4 Set of elements
4 VALUE RS Lumped nonstructural mass value
5 SPECOPT I Specification option
SPECOPT=1 By IDs
6 ID I Property of element identification number
Word 6 repeats until -1 occurs
SPECOPT=2 All
6 ALL(2) CHAR4 Keyword ALL
Words 6 and 7 repeat until -1 occurs
SPECOPT=3 Thru range
6 ID1 I Starting identification number
7 THRU(2) CHAR4 Keyword THRU
9 ID2 I Ending identification number
Words 6 through 9 repeat until -1 occurs
SPECOPT=4 Thru range with by
6 ID1 I Starting identification number
7 THRU(2) CHAR4 Keyword THRU
9 ID2 I Ending identification number
10 BY(2) CHAR4 Keyword BY
12 N I Increment
Words 6 through 12 repeat until -1 occurs
data = (
3701, 37, 995,
1, ELEMENT, 466.2,
3, 249311, THRU, 250189, -1,
3, 250656, THRU, 251905, -1,
3, 270705, THRU, 275998, -1,
3, 332687, THRU, 334734, -1,
-2,
2, ELEMENT, 77.7,
3, 225740, THRU 227065, -1,
3, 227602, THRU, 228898, -1,
3, 229435, THRU, 230743, -1,
3, 231280, THRU, 233789, -1,
3, 233922, THRU, 235132, -1,
3, 235265, THRU, 236463, -1,
3, 338071, THRU, 341134, -1, -2)
"""
#ints = (1, ELEMENT, 466.2,
# 3, 249311, THRU, 250189, -1,
# 3, 250656, THRU, 251905, -1,
# 3, 270705, THRU, 275998, -1,
# 3, 332687, THRU, 334734, -1,
# -2,
#
# 2, ELEMENT, 77.7,
# 3, 225740, THRU 227065, -1,
# 3, 227602, THRU, 228898, -1,
# 3, 229435, THRU, 230743, -1,
# 3, 231280, THRU, 233789, -1,
# 3, 233922, THRU, 235132, -1,
# 3, 235265, THRU, 236463, -1,
# 3, 338071, THRU, 341134, -1, -2)
op2 = self.op2
n0 = n
#op2.show_data(data[n:])
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
iminus2 = np.where(ints == -2)[0]
istart = [0] + list(iminus2[:-1] + 1)
iend = iminus2
#print(istart, iend)
assert len(data[n:]) > 12, data[n:]
#op2.show_data(data[n:], types='ifs')
ncards = 0
istart = [0] + list(iend + 1)
size = self.size
for (i0, i1) in zip(istart, iend):
#data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)
assert ints[i1] == -2, ints[i1]
sid = ints[i0]
nsm_type = data[n0+(i0+1)*size:n0+(i0+2)*size].decode('latin1').rstrip()
value = float(floats[i0+3])
#print(f'sid={sid} nsm_type={nsm_type} value={value}')
iminus1 = i0 + np.where(ints[i0:i1] == -1)[0]
#print('-1', iminus1)
#print('-2', iminus2)
istart2 = [i0 + 4] + list(iminus1[:-1] + 1)
iend2 = iminus1
#print(istart2, iend2)
for istarti, iendi in zip(istart2, iend2):
#print(istarti, iendi)
spec_opt = ints[istarti] # 4
#print(f'ints[{istarti}] = spec_opt = {spec_opt}')
if spec_opt == 1:
# 6 ID I Property of element identification number
ivalues = list(range(istarti, iendi))
#print('ivalues =', ivalues)
pid_eids = ints[ivalues].tolist()
#print('pid_eids =', pid_eids)
elif spec_opt == 3:
# datai = (3, 249311, 'THRU ', 250189)
#print(f'i0={i0}')
#datai = data[n0+(i0+6)*size:n0+i1*size]
#op2.show_data(datai)
ids = ints[istarti:iendi]
istart = ids[1]
iend = ids[-1]
pid_eids = list(range(istart, iend+1))
else:
raise NotImplementedError(spec_opt)
if nsm_type == 'ELEM':
nsm_type = 'ELEMENT'
#for pid_eid in pid_eids:
#nsml = op2.add_nsml1(sid, nsm_type, pid_eids, [value])
assert len(pid_eids) > 0, pid_eids
nsml1 = op2.add_nsml1(sid, nsm_type, value, pid_eids)
#print(nsml1)
str(nsml1)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSML'] = ncards
return n
def _read_nsml1_msc(self, data: bytes, n: int) -> int:
r"""
NSML1(3601, 36, 62)
Word Name Type Description
1 SID I Set identification number
2 PROP CHAR4 Set of property or elements
3 VALUE RS Lumped nonstructural mass value
4 SPECOPT I Specification option
SPECOPT=1 By IDs
5 IDs , =FLG1LIST in ixidlst.prm
6 ID I Property or element ID
Word 6 repeats until End of Record
SPECOPT=2 means ALL, =FLG1ALL in ixidlst.prm
5 ALL(2) CHAR4 Keyword ALL
Words 5 through 6 repeat until End of Record
SPECOPT=3 means THRU range, =FLG1THRU in ixidlst.prm
5 ID1 I Starting ID
6 THRU(2) CHAR4 Keyword THRU
8 ID2 I Ending ID
Words 5 through 8 repeat until End of Record
SPECOPT=4 means THRU range with BY, =FLG1THBY in ixidlst.prm
5 ID1 I Starting ID
6 THRU(2) CHAR4 Keyword THRU
8 ID2 I Ending ID
9 BY(2) CHAR4 Keyword BY
11 N I Increment
Words 5 through 11 repeat until End of Record
End SPECOPT
Words 4 through max repeat until End of Record
C:\MSC.Software\simcenter_nastran_2019.2\tpl_post2\elsum15.op2
data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)
"""
op2 = self.op2
op2.log.info(f'geom skipping NSML1 in {op2.table_name}; ndata={len(data)-12}')
#op2.show_data(data[n:], types='ifs')
#bbb
return len(data)
def _read_nsm1(self, data: bytes, n: int) -> int:
"""
NSM1(3301, 33, 992)
Defines the properties of a nonstructural mass.
Word Name Type Description
1 SID I Set identification number
2 PROP CHAR4 Set of properties
3 TYPE CHAR4 Set of elements
4 ORIGIN I Entry origin
5 VALUE RS Nonstructural mass value
6 SPECOPT I Specification option
SPECOPT=1 By IDs
7 ID I
Word 7 repeats until -1 occurs
SPECOPT=2 All
7 ALL(2) CHAR4
Words 7 and 8 repeat until -1 occurs
SPECOPT=3 Thru range
7 ID I
8 THRU(2) CHAR4
10 ID I
Words 7 through 10 repeat until -1 occurs
SPECOPT=4 Thru range with by
7 ID I
8 THRU(2) CHAR4
10 ID I
11 BY(2) CHAR4
13 N I
Words 7 through 13 repeat until -1 occurs
data = (3, PCOMP, 0, 0.37, 2, ALL, -1,
4, ELEMENT, 2, 2.1, 1, 3301, -1)
"""
op2 = self.op2
#op2.show_data(data[n:], types='ifs')
n0 = n
#op2.show_data(data[n:])
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
size = self.size
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
# 1 SID I Set identification number
sid = ints[i0]
# 2 PROP CHAR4 Set of properties
# 3 TYPE CHAR4 Set of elements
# 4 ORIGIN I Entry origin
# 5 VALUE RS Nonstructural mass value
# 6 SPECOPT I Specification option
nsm_type = data[n0+(i0+1)*size:n0+(i0+3)*size].decode('latin1').rstrip()
zero_two = ints[i0+3]
value = float(floats[i0+4])
spec_opt = ints[i0+5]
assert zero_two in [0, 2], zero_two
#nii = 6
#print(ints[i0+nii:i1])
#print(floats[i0+nii:i1])
#print(sid, nsm_type, value, spec_opt)
iminus1 = i0 + np.where(ints[i0:i1] == -1)[0]
#print('-1', iminus1)
#print('-2', iminus2)
istart2 = [i0 + 5] + list(iminus1[:-1] + 1)
iend2 = iminus1
#print(istart2, iend2)
if spec_opt == 1:
# 7 ID I
ids = ints[i0+6:i1]
elif spec_opt == 2:
word = data[n0+(i0+6)*size:n0+i1*size]
ids = word
elif spec_opt == 3: # thru
# datai = (249311, 'THRU ', 250189)
#datai = data[n0+(i0+6)*size:n0+i1*size]
ids = ints[i0+6:i1]
istart = ids[0]
iend = ids[-1]
ids = list(range(istart, iend+1))
else:
raise NotImplementedError(spec_opt)
#print(sid, nsm_type, zero_two, value, ids)
#if nsm_type == 'ELEM':
#nsm_type = 'ELEMENT'
#for pid_eid in pid_eids:
#nsml = self.add_nsml1(sid, nsm_type, pid_eids, [value])
nsm1 = op2.add_nsm1(sid, nsm_type, value, ids)
#print(nsm1)
str(nsm1)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSM1'] = ncards
return n
def _read_nsm(self, data: bytes, n: int) -> int:
"""NSM"""
op2 = self.op2
n = op2.reader_geom2._read_dual_card(
data, n,
self._read_nsm_nx, self._read_nsm_msc,
'NSM', op2._add_methods._add_nsm_object)
return n
def _read_nsm_2(self, data: bytes, n: int) -> int:
"""
NX 2019.2
NSM(3201, 32, 991)
RECORD – NSM(3201,32,991)
Defines the properties of a nonstructural mass.
Word Name Type Description
1 SID I Set identification number
2 PROP CHAR4 Set of properties
3 TYPE CHAR4 Set of elements <---- not right...it's an integer and not used...
4 ID I Property or element identification number
5 VALUE RS Nonstructural mass value
Words 5 through 6 repeat until End of Record
NSM,2,conrod,1007,0.3
data = (2, CONROD, 0, 1007, 0.3, -1,
2, ELEMENT, 0, 200, 0.20, -1,
3, PSHELL, 0, 3301, 0.20, -1,
3, ELEMENT, 2, 200, 1.0, -1,
4, PSHELL, 2, 6401, 4.2, -1)
"""
op2 = self.op2
n0 = n
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
size = self.size
for (i0, i1) in zip(istart, iend):
#data = (4, ELEMENT, 2.1, 1, 3301, -1, -2)
assert ints[i1] == -1, ints[i1]
sid = ints[i0]
prop_type = data[n0+(i0+1)*size:n0+(i0+3)*size]
elem_type = data[n0+(i0+3)*size:n0+(i0+4)*size]
nsm_type = prop_type.decode('latin1').rstrip()
dunno_int = ints[i0+3]
#print(ints[i0+4:i1])
#print(floats[i0+4:i1])
ids = ints[i0+4:i1:2].tolist()
values = floats[i0+5:i1:2].tolist()
assert len(ids) == len(values)
assert dunno_int in [0, 2], (sid, prop_type, (ints[i0+3], floats[i0+4]), ids, values)
#print(sid, prop_type, (ints[i0+3], floats[i0+4]), ids, values)
nsm = op2.add_nsm(sid, nsm_type, ids, values)
#print(nsm[0])
str(nsm)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSM'] = ncards
return n
def _read_nsm_msc(self, data: bytes, n: int) -> int:
"""
NSM(3201,32,55) - the marker for Record 2
MSC
1 SID I Set identification number
2 PROP CHAR4 Set of property or elements
3 ID I Property or element identification number
4 VALUE RS Nonstructural mass value
ORIGIN=0 NSM Bulk Data entry
5 ID I Property or element ID
6 VALUE RS Nonstructural mass value
Words 5 through 6 repeat until End of Record
ORIGIN=2 NSML Bulk Data entry
5 ID I Property or element ID
6 VALUE RS Nonstructural mass value
Words 5 through 6 repeat until End of Record
Words 3 through 4 repeat until End of Record
"""
op2 = self.op2
properties = []
struct1 = Struct(op2._endian + b'i 4s if')
ndelta = 16
i = 0
ints = np.frombuffer(data[n:], op2.idtype).copy()
floats = np.frombuffer(data[n:], op2.fdtype).copy()
while n < len(data):
edata = data[n:n+ndelta]
out = struct1.unpack(edata)
(sid, prop_set, pid, value) = out
# 538976312
assert pid < 100000000
i += 4
n += ndelta
prop_set = prop_set.decode('utf8').rstrip(' ') # \x00
values = [value]
#print('ints[i:]=', ints[i:])
while ints[i] != -1:
value2 = floats[i]
values.append(value2)
n += 4
i += 1
op2.log.info("MSC: NSM-sid=%s prop_set=%s pid=%s values=%s" % (
sid, prop_set, pid, values))
prop = NSM.add_op2_data([sid, prop_set, pid, value])
#op2._add_methods._add_nsm_object(prop)
properties.append(prop)
# handle the trailing -1
i += 1
n += 4
return n, properties
def _read_nsm_nx(self, data: bytes, n: int) -> int:
"""
NSM(3201,32,55) - the marker for Record 2
1 SID I Set identification number
2 PROP(2) CHAR4 Set of properties or elements
4 ORIGIN I Entry origin
5 ID I Property or element identification number
6 VALUE RS Nonstructural mass value
Words 5 through 6 repeat until End of Record
"""
op2 = self.op2
properties = []
#NX: C:\Users\sdoyle\Dropbox\move_tpl\nsmlcr2s.op2
struct1 = Struct(op2._endian + b'i 8s ii f')
ndelta = 24
#op2.show_data(data[12:], 'ifs')
i = 0
ints = np.frombuffer(data[n:], op2.idtype).copy()
floats = np.frombuffer(data[n:], op2.fdtype).copy()
unused_packs = break_by_minus1(ints)
#for pack in packs:
#print(pack)
#ipack = 0
while n < len(data):
#print('ints[i:]=', ints[i:].tolist())
#i1, i2 = packs[ipack]
#print('idata=%s' % idata[i1:i2])
#print('fdata=%s' % fdata[i1:i2])
#print(idata[i1:i2])
edata = data[n:n+ndelta]
out = struct1.unpack(edata)
(sid, prop_set, origin, pid, value) = out
# 538976312
assert pid < 100000000
i += 6
n += ndelta
prop_set = prop_set.decode('utf8').rstrip(' ') # \x00
pids = [pid]
values = [value]
#print('ints[i:]=', ints[i:].tolist())
while ints[i] != -1:
pid = ints[i]
value2 = floats[i+1]
assert pid != -1
pids.append(pid)
values.append(value2)
n += 8
i += 2
for pid, value in zip(pids, values):
if origin == 0:
#op2.log.info("NX: NSM-sid=%s prop_set=%s pid=%s values=%s" % (
#sid, prop_set, pid, values))
prop = NSM.add_op2_data([sid, prop_set, pid, value])
elif origin == 2:
#op2.log.info("NX: NSML-sid=%s prop_set=%s pid=%s values=%s" % (
#sid, prop_set, pid, values))
prop = NSML.add_op2_data([sid, prop_set, pid, value])
#print(prop.rstrip(), pid, value)
#op2._add_methods._add_nsm_object(prop)
properties.append(prop)
#print('----')
# handle the trailing -1
i += 1
n += 4
#ipack += 1
return n, properties
# NSM1
# NSML1
# NSMADD
# NSML
# NSML1
# PAABSF
# PACABS
# PACBAR
def _read_pbar(self, data: bytes, n: int) -> int:
"""
PBAR(52,20,181) - the marker for Record 11
.. warning:: this makes a funny property...
MSC 2016/NX10
Word Name Type Description
1 PID I Property identification number
2 MID I Material identification number
3 A RS Area
4 I1 RS Area moment of inertia in plane 1
5 I2 RS Area moment of inertia in plane 2
6 J RS Torsional constant
7 NSM RS Nonstructural mass per unit length
8 FE RS
9 C1 RS Stress recovery location at point C in element y-axis
10 C2 RS Stress recovery location at point C in element z-axis
11 D1 RS Stress recovery location at point D in element y-axis
12 D2 RS Stress recovery location at point D in element z-axis
13 E1 RS Stress recovery location at point E in element y-axis
14 E2 RS Stress recovery location at point E in element z-axis
15 F1 RS Stress recovery location at point F in element y-axis
16 F2 RS Stress recovery location at point F in element z-axis
17 K1 RS Area factor for shear in plane 1
18 K2 RS Area factor for shear in plane 2
19 I12 RS Area product of inertia for plane 1 and 2
"""
op2 = self.op2
ntotal = 76 * self.factor # 19*4
struct1 = Struct(mapfmt(op2._endian + b'2i17f', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
#(pid, mid, a, I1, I2, J, nsm, fe, c1, c2, d1, d2,
#e1, e2, f1, f2, k1, k2, I12) = out
prop = PBAR.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PBAR'] = nentries
return n
def _read_pbarl(self, data: bytes, n: int) -> int:
"""
PBARL(9102,91,52) - the marker for Record 12
TODO: buggy
It's possible to have a PBARL and a PBAR at the same time.
NSM is at the end of the element.
"""
op2 = self.op2
valid_types = {
'ROD': 1,
'TUBE': 2,
'TUBE2': 2,
'I': 6,
'CHAN': 4,
'T': 4,
'BOX': 4,
'BAR': 2,
'CROSS': 4,
'H': 4,
'T1': 4,
'I1': 4,
'CHAN1': 4,
'Z': 4,
'CHAN2': 4,
"T2": 4,
'BOX1': 6,
'HEXA': 3,
'HAT': 4,
'HAT1': 5,
'DBOX': 10, # was 12
#'MLO TUBE' : 2,
} # for GROUP="MSCBML0"
size = self.size
ntotal = 28 * self.factor # 7*4 - ROD - shortest entry...could be buggy... # TODO fix this
if size == 4:
struct1 = Struct(op2._endian + b'2i 8s 8s f')
else:
struct1 = Struct(op2._endian + b'2q 16s 16s d')
#nentries = (len(data) - n) // ntotal
#print(self.show_ndata(80))
ndata = len(data)
while ndata - n > ntotal:
edata = data[n:n+ntotal]
n += ntotal
out = struct1.unpack(edata)
(pid, mid, group, beam_type, value) = out
if pid > 100000000 or pid < 1:
op2.log.debug(" pid=%s mid=%s group=%r beam_type=%r value=%s" % (
pid, mid, group, beam_type, value))
raise RuntimeError('bad parsing...')
beam_type = reshape_bytes_block_size(beam_type, size=size)
group = reshape_bytes_block_size(group, size=size)
data_in = [pid, mid, group, beam_type, value]
expected_length = valid_types[beam_type]
iformat = op2._endian + b'%if' % expected_length
ndelta = expected_length * 4
dims_nsm = list(unpack(iformat, data[n:n+ndelta]))
data_in += dims_nsm
#print(" pid=%s mid=%s group=%r beam_type=%r value=%s dims_nsm=%s" % (
#pid, mid, group, beam_type, value, dims_nsm))
# TODO why do i need the +4???
# is that for the nsm?
#min_len = expected_length * 4 + 4
#if len(data)
#data = data[n + expected_length * 4 + 4:]
n += ndelta
#prin( "len(out) = ",len(out)))
#print("PBARL = %s" % data_in)
prop = PBARL.add_op2_data(data_in) # last value is nsm
pid = prop.pid
if pid in op2.properties:
#op2.log.debug("removing:\n%s" % op2.properties[pid])
op2._type_to_id_map['PBAR'].remove(pid)
del op2.properties[pid]
self._add_op2_property(prop)
#op2.properties[pid] = prop
#print(prop.get_stats())
#print(op2.show_data(data[n-8:-100]))
# the PBARL ends with a -1 flag
#value, = unpack(op2._endian + b'i', data[n:n+4])
n += 4 * self.factor
if len(op2._type_to_id_map['PBAR']) == 0 and 'PBAR' in op2.card_count:
del op2._type_to_id_map['PBAR']
del op2.card_count['PBAR']
op2.increase_card_count('PBARL')
#assert len(data) == n
if self.size == 8:
n += 16
#n += 8 # same for 32/64 bit - not 100% that it's always active
return n
def _read_pbcomp(self, data: bytes, n: int) -> int:
"""
PBCOMP(5403, 55, 349)
pid mid A I1 I2 I12 J NSM
PBCOMP 3 2 2.00E-4 6.67E-9 1.67E-9 0.0 4.58E-9 0.0 +
pid mid
floats = (3, 2, 0.0002, 6.67e-09, 1.67e-09, 0.0, 4.58e-09, 0.0, 1.0, 1.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
ints = (3, 2, 0.0002, 6.67E-9, 1.67E-9, 0, 4.58E-9, 0, 1.0, 1.0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
"""
op2 = self.op2
struct1 = Struct(mapfmt(op2._endian + b'2i 12f i', self.size))
struct2 = Struct(mapfmt(op2._endian + b'3f 2i', self.size))
nproperties = 0
ntotal1 = 60 * self.factor # 4*15
ntotal2 = 20 * self.factor
ndata = len(data)
#print(ntotal1, ntotal2)
if self.factor == 2:
op2.show_data(data[12*self.factor:], types='qd')
#print(len(data[12*self.factor:]))
while n < ndata:
#op2.log.debug(f"n={n} ndata={ndata}")
edata = data[n:n+ntotal1]
#if len(edata) == ntotal1:
data1 = struct1.unpack(edata)
#else:
#op2.show_data(edata, types='qdi')
#n += ntotal2
#continue
nsections = data1[-1]
if op2.is_debug_file:
(pid, mid, a, i1, i2, i12, j, nsm, k1, k2,
m1, m2, n1, n2, unused_nsections) = data1
op2.log.info(f'PBCOMP pid={pid} mid={mid} nsections={nsections} '
f'k1={k1} k2={k2} m=({m1},{m2}) n=({n1},{n2})\n')
#if pid > 0 and nsections == 0:
#print('n1')
#n += ntotal1
#continue
#if pid == 0 and nsections == 0:
#print('n2')
#n += ntotal2
#continue
data2 = []
n += ntotal1
if nsections in [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]:
# 16 Y RS Lumped area location along element's y-axis
# 17 Z RS Lumped area location along element's z-axis
# 18 C RS Fraction of the total area for the lumped area
# 19 MID I Material identification number
# 20 UNDEF None
# Words 16 through 20 repeat NSECT times
for unused_i in range(nsections):
datai = data[n:n+ntotal2]
xi, yi, ci, mid, unused_null = struct2.unpack(datai)
data2.append((xi, yi, ci, mid))
n += ntotal2
else:
op2.log.error(f'PBCOMP={data1[0]} has no sections; check your bdf')
return n
#raise NotImplementedError('PBCOMP nsections=%r' % nsections)
if op2.is_debug_file:
op2.binary_debug.write(' PBCOMP: %s\n' % str([data1, data2]))
msg = (
' i=%-2s so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '
'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (
nsections, None, -9999., a, i1, i2, i12, j, nsm,
None, None, None, None, None, None, None, None,)
)
op2.log.debug(msg)
#op2.log.debug(data1)
#op2.log.debug(data2)
data_in = [data1, data2]
prop = PBCOMP.add_op2_data(data_in)
pid = data1[0]
if pid in op2.properties:
op2._type_to_id_map['PBEAM'].remove(pid)
del op2.properties[pid]
self._add_op2_property(prop)
nproperties += 1
#print(f"n={n} ndata={ndata}")
assert nproperties > 0, 'PBCOMP nproperties=%s' % (nproperties)
if len(op2._type_to_id_map['PBEAM']) == 0 and 'PBEAM' in op2.card_count:
del op2._type_to_id_map['PBEAM']
del op2.card_count['PBEAM']
op2.card_count['PBCOMP'] = nproperties
return n
def _read_pbeam(self, data: bytes, n: int) -> int:
"""
PBEAM(5402,54,262) - the marker for Record 14
.. todo:: add object
"""
op2 = self.op2
cross_section_type_map = {
0 : 'variable',
1 : 'constant',
2 : '???',
}
struct1 = Struct(mapfmt(op2._endian + b'4if', self.size))
struct2 = Struct(mapfmt(op2._endian + b'16f', self.size))
struct3 = Struct(mapfmt(op2._endian + b'16f', self.size))
unused_ntotal = 768 # 4*(5+16*12)
#nproperties = (len(data) - n) // ntotal
#assert nproperties > 0, 'ndata-n=%s n=%s datai\n%s' % (len(data)-n, n, op2.show_data(data[n:100+n]))
ndata = len(data)
#op2.show_data(data[12:], 'if')
#assert ndata % ntotal == 0, 'ndata-n=%s n=%s ndata%%ntotal=%s' % (len(data)-n, n, ndata % ntotal)
nproperties = 0
ntotal1 = 20 * self.factor
ntotal2 = 64 * self.factor
while n < ndata:
#while 1: #for i in range(nproperties):
edata = data[n:n+ntotal1]
n += ntotal1
data_in = list(struct1.unpack(edata))
#if op2.is_debug_file:
#op2.log.info('PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s\n' % tuple(data_in))
(pid, unused_mid, unused_nsegments, ccf, unused_x) = data_in
#op2.log.info('PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s' % tuple(data_in))
# Constant cross-section flag: 1=yes and 0=no
# what is 2?
if ccf not in [0, 1, 2]:
msg = (' PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s; '
'ccf must be in [0, 1, 2]\n' % tuple(data_in))
raise ValueError(msg)
cross_section_type = cross_section_type_map[ccf]
#print('cross_section_type = %s' % cross_section_type)
is_pbcomp = False
is_bad_so = False
so = []
xxb = []
for i in range(11):
edata = data[n:n+ntotal2]
if len(edata) != ntotal2:
endpack = []
raise RuntimeError(f'PBEAM unexpected length i={i:d}...')
n += ntotal2
pack = struct2.unpack(edata)
(soi, xxbi, a, i1, i2, i12, j, nsm, c1, c2,
d1, d2, e1, e2, f1, f2) = pack
xxb.append(xxbi)
so.append(soi)
if soi == 0.0:
so_str = 'NO'
elif soi == 1.0:
so_str = 'YES'
else:
so_str = str(soi)
is_bad_so = True
#msg = 'PBEAM pid=%s i=%s x/xb=%s soi=%s; soi not in 0.0 or 1.0' % (
#pid, i, xxb, soi)
#raise NotImplementedError(msg)
#if xxb != 0.0:
#msg = 'PBEAM pid=%s i=%s x/xb=%s soi=%s; xxb not in 0.0 or 1.0' % (
#pid, i, xxb, soi)
#raise NotImplementedError(msg)
pack2 = (so_str, xxbi, a, i1, i2, i12, j, nsm, c1, c2,
d1, d2, e1, e2, f1, f2)
data_in.append(pack2)
if op2.is_debug_file:
op2.binary_debug.write(f' {pack}\n')
msg = (
' i=%-2s' % i + ' so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '
'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (tuple(pack2))
)
op2.binary_debug.write(msg)
#msg = (
#' i=%-2s' % i + ' so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '
#'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (tuple(pack2))
#)
#print(msg)
edata = data[n:n+ntotal2]
if len(edata) != ntotal2:
endpack = []
raise RuntimeError('PBEAM unexpected length 2...')
endpack = struct3.unpack(edata)
n += ntotal2
assert len(endpack) == 16, endpack
#(k1, k2, s1, s2, nsia, nsib, cwa, cwb, # 8
#m1a, m2a, m1b, m2b, n1a, n2a, n1b, n2b) = endpack # 8 -> 16
if op2.is_debug_file:
op2.binary_debug.write(' k=[%s,%s] s=[%s,%s] nsi=[%s,%s] cw=[%s,%s] '
'ma=[%s,%s] mb=[%s,%s] na=[%s,%s] nb=[%s,%s]' % (
tuple(endpack)))
data_in.append(endpack)
if is_bad_so:
#if soi < 0.:
xxb_str = ', '.join(['%g' % xxbi for xxbi in xxb])
so_str = ', '.join(['%g' % soi for soi in so])
msg = (f'PBEAM pid={pid} i={i} soi=[{so_str}]; '
'soi not 0.0 or 1.0; assuming PBCOMP & dropping')
op2.log.error(msg)
is_pbcomp = True
if min(xxb) < 0.0 or max(xxb) > 1.0:
xxb_str = ', '.join(['%g' % xxbi for xxbi in xxb])
msg = (f'PBEAM pid={pid} i={i} x/xb=[{xxb_str}]; '
'x/xb must be between 0.0 and 1.0; assuming PBCOMP & dropping')
op2.log.error(msg)
is_pbcomp = True
if is_pbcomp:
continue
if pid in op2.properties:
if op2.properties[pid].type == 'PBCOMP':
continue
prop = PBEAM.add_op2_data(data_in)
nproperties += 1
self._add_op2_property(prop)
if nproperties:
op2.card_count['PBEAM'] = nproperties
return n
def _read_pbeaml(self, data: bytes, n: int) -> int:
"""
PBEAML(9202,92,53)
Word Name Type Description
1 PID I Property identification number
2 MID I Material identification number
3 GROUP(2) CHAR4 Cross-section group name
5 TYPE(2) CHAR4 Cross section type
7 VALUE RS Cross section values for XXB, SO, NSM, and dimensions
Word 7 repeats until (-1) occurs
"""
op2 = self.op2
#strs = numpy.core.defchararray.reshapesplit(data, sep=",")
#ints = np.frombuffer(data[n:], self._uendian + 'i').copy()
#floats = np.frombuffer(data[n:], self._uendian + 'f').copy()
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
size = self.size
nproperties = len(istart)
if size == 4:
struct1 = Struct(op2._endian + b'2i 8s 8s')
else:
struct1 = Struct(op2._endian + b'2q 16s 16s')
for unused_i, (istarti, iendi) in enumerate(zip(istart, iend)):
idata = data[n+istarti*size : n+(istarti+6)*size]
pid, mid, group, beam_type = struct1.unpack(idata)
group = group.decode('latin1').strip()
beam_type = beam_type.decode('latin1').strip()
fvalues = floats[istarti+6: iendi]
if op2.is_debug_file:
op2.binary_debug.write(' %s\n' % str(fvalues))
op2.log.debug(f'pid={pid:d} mid={mid:d} group={group} beam_type={beam_type}')
op2.log.debug(fvalues)
#op2.log.debug(f'pid={pid:d} mid={mid:d} group={group} beam_type={beam_type}')
data_in = [pid, mid, group, beam_type, fvalues]
prop = PBEAML.add_op2_data(data_in)
if pid in op2.properties:
# this is a fake PSHELL
propi = op2.properties[pid]
assert propi.type in ['PBEAM'], propi.get_stats()
nproperties -= 1
continue
self._add_op2_property(prop)
if nproperties:
op2.card_count['PBEAML'] = nproperties
return len(data)
def _read_pbend(self, data: bytes, n: int) -> int:
"""PBEND"""
op2 = self.op2
n = op2.reader_geom2._read_dual_card(
data, n,
self._read_pbend_nx, self._read_pbend_msc,
'PBEND', op2._add_methods._add_property_object)
return n
def _read_pbend_msc(self, data: bytes, n: int) -> int:
"""
PBEND
1 PID I Property identification number
2 MID I Material identification number
3 A RS Area
4 I1 RS Area moment of inertia in plane 1
5 I2 RS Area moment of inertia in plane 2
6 J RS Torsional constant
7 FSI I flexibility and stress intensification factors
8 RM RS Mean cross-sectional radius of the curved pipe
9 T RS Wall thickness of the curved pipe
10 P RS Internal pressure
11 RB RS Bend radius of the line of centroids
12 THETAB RS Arc angle of element
13 C1 RS Stress recovery location at point C in element y-axis
14 C2 RS Stress recovery location at point C in element z-axis
15 D1 RS Stress recovery location at point D in element y-axis
16 D2 RS Stress recovery location at point D in element z-axis
17 E1 RS Stress recovery location at point E in element y-axis
18 E2 RS Stress recovery location at point E in element z-axis
19 F1 RS Stress recovery location at point F in element y-axis
20 F2 RS Stress recovery location at point F in element z-axis
21 K1 RS Area factor for shear in plane 1
22 K2 RS Area factor for shear in plane 2
23 NSM RS Nonstructural mass per unit length
24 RC RS Radial offset of the geometric centroid
25 ZC RS Offset of the geometric centroid
26 DELTAN I Radial offset of the neutral axis from the geometric
centroid
"""
op2 = self.op2
ntotal = 104 # 26*4
struct1 = Struct(op2._endian + b'2i 4f i 18f f') # delta_n is a float, not an integer
nproperties = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
assert nproperties > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
properties = []
for unused_i in range(nproperties):
edata = data[n:n+104]
out = struct1.unpack(edata)
(pid, mid, area, i1, i2, j, fsi, rm, t, p, rb, theta_b,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, nsm, rc, zc,
delta_n) = out
beam_type = fsi
if (area, rm, t, p) == (0., 0., 0., 0.):
area = None
rm = None
t = None
p = None
delta_n = None
beam_type = 2
if delta_n == 0:
#: Radial offset of the neutral axis from the geometric
#: centroid, positive is toward the center of curvature
delta_n = None
pbend = PBEND(pid, mid, beam_type, area, i1, i2, j,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2,
nsm, rc, zc, delta_n, fsi, rm, t, p, rb, theta_b)
#print(pbend)
pbend.validate()
properties.append(pbend)
n += ntotal
return n, properties
def _read_pbend_nx(self, data: bytes, n: int) -> int:
"""
PBEND
1 PID I Property identification number
2 MID I Material identification number
3 A RS Area
4 I1 RS Area moment of inertia in plane 1
5 I2 RS Area moment of inertia in plane 2
6 J RS Torsional constant
7 FSI I Flexibility and stress intensification factors
8 RM RS Mean cross-sectional radius of the curved pipe
9 T RS Wall thickness of the curved pipe
10 P RS Internal pressure
11 RB RS Bend radius of the line of centroids
12 THETAB RS Arc angle of element
13 C1 RS Stress recovery location at point C in element y-axis
14 C2 RS Stress recovery location at point C in element z-axis
15 D1 RS Stress recovery location at point D in element y-axis
16 D2 RS Stress recovery location at point D in element z-axis
17 E1 RS Stress recovery location at point E in element y-axis
18 E2 RS Stress recovery location at point E in element z-axis
19 F1 RS Stress recovery location at point F in element y-axis
20 F2 RS Stress recovery location at point F in element z-axis
21 K1 RS Area factor for shear in plane 1
22 K2 RS Area factor for shear in plane 2
23 NSM RS Nonstructural mass per unit length
24 RC RS Radial offset of the geometric centroid
25 ZC RS Offset of the geometric centroid
26 DELTAN RS Radial offset of the neutral axis from the geometric
centroid
27 SACL RS Miter spacing at center line.
28 ALPHA RS One-half angle between the adjacent miter axis
(Degrees).
29 FLANGE I For FSI=5, defines the number of flanges attached.
30 KX RS For FSI=6, the user defined flexibility factor for the
torsional moment.
31 KY RS For FSI=6, the user defined flexibility factor for the
out-of-plane bending moment.
32 KZ RS For FSI=6, the user defined flexbility factor for the
in-plane bending moment.
33 Not used
"""
op2 = self.op2
#op2.log.info('geom skipping PBEND in EPT')
#return len(data)
ntotal = 132 # 33*4
struct1 = Struct(op2._endian + b'2i 4f i 21f i 4f')
nproperties = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
assert nproperties > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
properties = []
for unused_i in range(nproperties):
edata = data[n:n+132]
out = struct1.unpack(edata)
(pid, mid, area, i1, i2, j, fsi, rm, t, p, rb, theta_b,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, nsm, rc, zc,
delta_n, unused_sacl, unused_alpha, unused_flange,
unused_kx, unused_ky, unused_kz, unused_junk,) = out
beam_type = fsi
pbend = PBEND(pid, mid, beam_type, area, i1, i2, j,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2,
nsm, rc, zc, delta_n, fsi, rm, t, p, rb, theta_b)
pbend.validate()
properties.append(pbend)
n += ntotal
return n, properties
# PBMSECT
# PBRSECT
def _read_pbush(self, data: bytes, n: int) -> int:
"""
The PBUSH card is different between MSC and NX Nastran.
DMAP NX 11
----------
NX has 23 fields in NX 11-NX 2019.2 (same as MSC 2005)
NX has 18 fields in the pre-2001 format
DMAP MSC 2005
-------------
MSC has 23 fields in 2005
MSC has 18 fields in the pre-2001 format
DMAP MSC 2016
-------------
MSC has 24 fields in 2016.1
MSC has 18 fields in the pre-2001 format
DMAP MSC 2021
-------------
MSC has 27 fields in 2021
"""
op2 = self.op2
card_name = 'PBUSH'
card_obj = PBUSH
methods = {
72 : self._read_pbush_nx_72, # 72=4*18
92 : self._read_pbush_msc_92, # 92=4*23
96 : self._read_pbush_msc_96, # 96=4*24
108 : self._read_pbush_msc_108, # 108=4*27
}
try:
n = op2.reader_geom2._read_double_card(
card_name, card_obj, self._add_op2_property,
methods, data, n)
except DoubleCardError:
nx_method = partial(self._read_pbush_nx_72, card_obj)
msc_method = partial(self._read_pbush_msc_92, card_obj)
n = op2.reader_geom2._read_dual_card(
data, n,
nx_method, msc_method,
card_name, self._add_op2_property)
# we're listing nx twice because NX/MSC used to be consistent
# the new form for MSC is not supported
#n = self._read_dual_card(data, n, self._read_pbush_nx, self._read_pbush_msc,
#'PBUSH', self._add_op2_property)
return n
def _read_pbush_nx_72(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
"""
PBUSH(1402,14,37) - 18 fields
legacy MSC/NX format
"""
op2 = self.op2
ntotal = 72 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'i17f', self.size))
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1, sa, st, ea, et) = out
#op2.log.debug(out)
assert pid > 0, pid
g2 = g3 = g4 = g5 = g6 = g1
data_in = (pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
g1, g2, g3, g4, g5, g6, sa, st, ea, et)
prop = PBUSH.add_op2_data(data_in)
props.append(prop)
n += ntotal
return n, props
def _read_pbush_msc_92(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
"""PBUSH(1402,14,37) - 23 fields
MSC 2005r2 to <MSC 2016
"""
op2 = self.op2
ntotal = 92 * self.factor # 23*4
struct1 = Struct(mapfmt(op2._endian + b'i22f', self.size))
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
#(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
#g1, g2, g3, g4, g5, g6, sa, st, ea, et) = out
pid = out[0]
assert pid > 0, pid
prop = PBUSH.add_op2_data(out)
props.append(prop)
n += ntotal
return n, props
def _read_pbush_msc_96(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
"""PBUSH(1402,14,37) - 24 fields
MSC 2016.1? to 2020
"""
op2 = self.op2
ntotal = 96 * self.factor # 24*4
struct1 = Struct(mapfmt(op2._endian + b'i22f f', self.size))
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
#(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
#g1, g2, g3, g4, g5, g6, sa, st, ea, et, mass) = out
pid = out[0]
assert pid > 0, pid
prop = PBUSH.add_op2_data(out)
props.append(prop)
n += ntotal
return n, props
def _read_pbush_msc_108(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
"""
PBUSH(1402,14,37) - 27 fields
MSC 2021 to current
ints = (1402, 14, 37, 2, 100000.0, 200000.0, 300000.0, 0.15, 0.25, 0.35, 1000.0, 2000.0, 3000.0, 0.0015, 0.0025, 0.0035, 0,
-1577048263, -1577048263, -1577048263, -1577048263, -1577048263, 1065353216, 1065353216, 1065353216, 1065353216, 0, 0, 0, 0)
floats = (1402, 14, 37,
2, 100000.0, 200000.0, 300000.0, 0.15, 0.25, 0.35, 1000.0, 2000.0, 3000.0, 0.0015, 0.0025, 0.0035, 0.0,
-1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, -1.7367999061094683e-18, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0)
"""
op2 = self.op2
ntotal = 108 * self.factor # 27*4
struct1 = Struct(mapfmt(op2._endian + b'i22f 4f', self.size))
#op2.show_data(data, types='ifs')
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
#(pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
#g1, g2, g3, g4, g5, g6, sa, st, ea, et) = out
pid = out[0]
assert pid > 0, pid
prop = PBUSH.add_op2_data(out)
str(prop)
props.append(prop)
n += ntotal
return n, props
def _read_pbush1d(self, data: bytes, n: int) -> int:
"""
Record 18 -- PBUSH1D(3101,31,219)
1 PID I Property identification number
2 K RS Stiffness
3 C RS Viscous Damping
4 M RS Mass
5 ALPHA RS Temperature coefficient
6 SA RS Stress recovery coefficient
7 EA/SE RS Strain recovery coefficient
8 TYPEA I Shock data type:0=Null, 1=Table, 2=Equation
9 CVT RS Coefficient of translation velocity tension
10 CVC RS Coefficient of translation velocity compression
11 EXPVT RS Exponent of velocity tension
12 EXPVC RS Exponent of velocity compression
13 IDTSU I TABLEDi or DEQATN entry identification number for scale factor vs displacement
14 IDTCU I DEQATN entry identification number for scale factor vs displacement
15 IDTSUD I DEQATN entry identification number for derivative tension
16 IDCSUD I DEQATN entry identification number for derivative compression
17 TYPES I Spring data type: 0=Null, 1=Table, 2=Equation
18 IDTS I TABLEDi or DEQATN entry identification number for tension compression
19 IDCS I DEQATN entry identification number for compression
20 IDTDU I DEQATN entry identification number for scale factor vs displacement
21 IDCDU I DEQATN entry identification number for force vs displacement
22 TYPED I Damper data type: 0=Null, 1=Table, 2=Equation
23 IDTD I TABLEDi or DEQATN entry identification number for tension compression
24 IDCD I DEQATN entry identification number for compression
25 IDTDV I DEQATN entry identification number for scale factor versus velocity
26 IDCDV I DEQATN entry identification number for force versus velocity
27 TYPEG I General data type: 0=Null, 1=Table, 2=Equation
28 IDTG I TABLEDi or DEQATN entry identification number for tension compression
29 IDCG I DEQATN entry identification number for compression
30 IDTDU I DEQATN entry identification number for scale factor versus displacement
31 IDCDU I DEQATN entry identification number for force versus displacement
32 IDTDV I DEQATN entry identification number for scale factor versus velocity
33 IDCDV I DEQATN entry identification number for force vs velocity
34 TYPEF I Fuse data type: 0=Null, 1=Table
35 IDTF I TABLEDi entry identification number for tension
36 IDCF I TABLEDi entry identification number for compression
37 UT RS Ultimate tension
38 UC RS Ultimate compression
"""
op2 = self.op2
type_map = {
0 : None, # NULL
1 : 'TABLE',
2 : 'EQUAT',
}
ntotal = 152 * self.factor # 38*4
struct1 = Struct(mapfmt(op2._endian + b'i 6f i 4f 24i 2f', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid, k, c, m, unused_alpha, sa, se,
typea, cvt, cvc, expvt, expvc, idtsu, idtcu, idtsud, idcsud,
types, idts, idcs, idtdus, idcdus,
typed, idtd, idcd, idtdvd, idcdvd,
typeg, idtg, idcg, idtdug, idcdug, idtdvg, idcdvg,
typef, idtf, idcf,
unused_ut, unused_uc) = out
# test_op2_other_05
#pbush1d, 204, 1.e+5, 1000., , , , , , +pb1
#+pb1, spring, table, 205, , , , , , +pb2
#+pb2, damper, table, 206
#pid=204 k=100000.0 c=1000.0 m=0.0 sa=nan se=nan
msg = f'PBUSH1D pid={pid} k={k} c={c} m={m} sa={sa} se={se}'
optional_vars = {}
typea_str = type_map[typea]
types_str = type_map[types]
typed_str = type_map[typed]
unused_typeg_str = type_map[typeg]
unused_typef_str = type_map[typef]
if min([typea, types, typed, typeg, typef]) < 0:
raise RuntimeError(f'typea={typea} types={types} typed={typed} typeg={typeg} typef={typef}')
if typea in [1, 2]: # SHOCKA?
#pbush1d, 204, 1.e+5, 1000., , , , , , +pb4
#+pb4, shocka, table, 1000., , 1., , 214, , +pb41
#+pb41, spring, table, 205
idts = idtsu # if typea_str == 'TABLE' else 0
idets = idtsu # if typea_str == 'EQUAT' else 0
optional_vars['SHOCKA'] = [typea_str, cvt, cvc, expvt, expvc,
idts, idets, idtcu, idtsud, idcsud]
#(shock_type, shock_cvt, shock_cvc, shock_exp_vt, shock_exp_vc,
#shock_idts, shock_idets, shock_idecs, shock_idetsd, shock_idecsd
#)
#print('shock_idts, shock_idets', typea_str, idtsu, idtsu)
msg += (
f' SHOCKA type={typea} cvt={cvt} cvc={cvc} expvt={expvt} expvc={expvc}\n'
f' idtsu={idtsu} (idts={idts} idets={idets}) idtcu={idtcu} idtsud={idtsud} idcsud={idcsud}')
if types in [1, 2]: # SPRING: Spring data type: 0=Null, 1=Table, 2=Equation
#(spring_type, spring_idt, spring_idc, spring_idtdu, spring_idcdu) = values
# SPRING, TYPE IDT IDC IDTDU IDCDU
optional_vars['SPRING'] = [types_str, idts, idcs, idtdus, idcdus]
msg += f' SPRING type={types} idt={idts} idc={idcs} idtdu={idtdus} idcdu={idcdus}'
if typed in [1, 2]: # Damper data type: 0=Null, 1=Table, 2=Equation
optional_vars['DAMPER'] = [typed_str, idtd, idcd, idtdvd, idcdvd]
msg += f' DAMPER type={typed} idt={idtd} idc={idtd} idtdv={idtdvd} idcdv={idcdvd}'
if typeg in [1, 2]: # general, GENER?: 0=Null, 1=Table 2=Equation
# C:\NASA\m4\formats\git\examples\move_tpl\ar29scbt.bdf
#pbush1d, 206, 1.e+3, 10., , , , , , +pb6
#+pb6, gener, equat, 315, , 3015, , 3016
msg += f' GENER type={typeg} idt={idtg} idc={idcg} idtdu={idtdug} idcdu={idcdug} idtdv={idtdvg} idcdv={idcdvg}'
optional_vars['GENER'] = [idtg, idcg, idtdug, idcdug, idtdvg, idcdvg]
if typef in [1, 2]: # Fuse data type: 0=Null, 1=Table
raise NotImplementedError(f'typef={typef} idtf={idtf} idcf={idcf}')
if op2.is_debug_file:
op2.binary_debug.write(msg)
pbush1d = op2.add_pbush1d(pid, k=k, c=c, m=m, sa=sa, se=se,
optional_vars=optional_vars,)
str(pbush1d)
n += ntotal
op2.card_count['PBUSH1D'] = nentries
return n
#def _read_pbusht(self, data: bytes, n: int) -> int:
#"""reads the PBUSHT(702, 7, 38)"""
#n, props = self._read_pbusht_nx(data, n)
#for prop in props:
##print(prop)
#op2._add_pbusht_object(prop)
#return n
def _read_pbusht(self, data: bytes, n: int) -> int:
"""
NX 12 / MSC 2005
Word Name Type Description
1 PID I Property identification number
2 TKID(6) I TABLEDi entry identification numbers for stiffness
8 TBID(6) I TABLEDi entry identification numbers for viscous damping
14 TGEID(6) I TABLEDi entry identification number for structural damping
20 TKNID(6) I TABLEDi entry identification numbers for force versus deflection
old style
Word Name Type Description
1 PID I Property identification number
2 TKID(6) I TABLEDi entry identification numbers for stiffness
8 TBID(6) I TABLEDi entry identification numbers for viscous damping
14 TGEID I TABLEDi entry identification number for structural damping
15 TKNID(6) I TABLEDi entry IDs for force versus deflection
"""
op2 = self.op2
card_name = 'PBUSHT'
card_obj = PBUSHT
methods = {
80 : self._read_pbusht_80,
100 : self._read_pbusht_100,
136 : self._read_pbusht_136,
}
try:
n = op2.reader_geom2._read_double_card(
card_name, card_obj, op2._add_methods._add_pbusht_object,
methods, data, n)
except DoubleCardError:
raise
op2.log.warning(f'try-except {card_name}')
#n = self._read_split_card(data, n,
#self._read_cquad8_current, self._read_cquad8_v2001,
#card_name, self.add_op2_element)
#nelements = op2.card_count['CQUAD8']
#op2.log.debug(f'nCQUAD8 = {nelements}')
#n = self._read_dual_card(data, n, self._read_ctriax_8, self._read_ctriax_9,
#'CTRIAX', self.add_op2_element)
return n
def _read_pbusht_nx_old(self, data: bytes, n: int) -> int:
op2 = self.op2
#op2.show_data(data[12:])
ndata = (len(data) - n) // self.factor
if ndata % 100 == 0 and ndata % 80 == 0:
op2.log.warning(f"skipping PBUSHT in EPT because nfields={ndata//4}, which is "
'nproperties*25 or nproperties*20')
return len(data), []
if ndata % 100 == 0:
n, props = self._read_pbusht_100(data, n)
elif ndata % 80 == 0:
n, props = self._read_pbusht_80(data, n)
else:
# C:\MSC.Software\msc_nastran_runs\mbsh14.op2
# ints = (1,
# 51, 51, 0, 0, 0, 0,
# 61, 61, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0,
# 0, '', '', 0, 0, '', '', 0, 0, 925353388, 0, 0, 0, 0, 0,
# 7,
# 51, 51, 0, 0, 0, 0,
# 61, 61, 0, 0, 0, 0,
# 0, 0, 0, 0, 0, 0,
# 0, '', '', 0, 0, '', '', 0, 0, 925353388, 0, 0, 0, 0, 0)
# strings = (b"1 51 51 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00=\x00\x00\x00=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\xac\xc5'7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x003\x00\x00\x003\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00=\x00\x00\x00=\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00\xac\xc5'7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",)
# ints = (1, 51, 51, 0, 0, 0, 0, 61, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ' ', ' ', 0, 0, ' ', ' ', 0, 0, 1e-5, 0, 0, 0, 0 , 0,
#
# 7, 51, 51, 0, 0, 0, 0, 61, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ' ', ' ', 0, 0, ' ', ' ', 0, 0, 1e-5, 0, 0, 0, 0, 0)
#op2.show_data(data[n:], types='is')
raise NotImplementedError('You have blank lines in your PBUSHT')
return n, props
def _read_pbusht_80(self, card_obj, data: bytes, n: int) -> int:
"""
Word Name Type Description
1 PID I Property identification number
2 TKID(6) I TABLEDi entry identification numbers for stiffness
8 TBID(6) I TABLEDi entry identification numbers for viscous damping
14 TGEID I TABLEDi entry identification number for structural damping
15 TKNID(6) I TABLEDi entry identification numbers for force versus deflection
16,17,18,19,20
???
"""
op2 = self.op2
ntotal = 80 * self.factor
struct1 = Struct(op2._endian + b'20i')
nentries = (len(data) - n) // ntotal
assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
#(pid,
#k1, k2, k3, k4, k5, k6,
#b1, b2, b3, b4, b5, b6,
#g1, sa, st, ea, et) = out
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1,
n1, n2, n3, n4, n5, n6) = out
g2 = g3 = g4 = g5 = g6 = g1
k_tables = [k1, k2, k3, k4, k5, k6]
b_tables = [b1, b2, b3, b4, b5, b6]
ge_tables = [g1, g2, g3, g4, g5, g6]
kn_tables = [n1, n2, n3, n4, n5, n6]
prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)
props.append(prop)
n += ntotal
return n, props
def _read_pbusht_100(self, card_obj, data: bytes, n: int) -> int:
op2 = self.op2
props = []
ntotal = 100 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'25i', self.size))
nentries = (len(data) - n) // ntotal
assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1, g2, g3, g4, g5, g6,
n1, n2, n3, n4, n5, n6) = out
k_tables = [k1, k2, k3, k4, k5, k6]
b_tables = [b1, b2, b3, b4, b5, b6]
ge_tables = [g1, g2, g3, g4, g5, g6]
kn_tables = [n1, n2, n3, n4, n5, n6]
prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)
props.append(prop)
n += ntotal
return n, props
def _read_pbusht_136(self, card_obj, data: bytes, n: int) -> int:
r"""not 100%
1 PID I Property identification number
2 TKID(6) I TABLEDi entry identification numbers for stiffness
8 TBID(6) I TABLEDi entry identification numbers for viscous damping
14 TGEID(6) I TABLEDi entry identification number for structural damping
20 TKNID(6) I TABLEDi entry IDs for force vs. deflection
26 FDC(2) CHAR4 Force deflection curve rule
28 FUSE I Failure level
29 DIR I Fuse direction
30 OPTION(2) CHAR4 Failure mode
32 LOWER RS Lower failure bound
33 UPPER RS Upper failure bound
34 FRATE RS FACTOR of scales the stiffness
35 LRGR I Controls large rotation
36 UNDEF(4) none
# C:\MSC.Software\msc_nastran_runs\mbsh14.op2
PBUSHT 1 K 51 51
B 61 61
PBUSHT 7 K 51 51
B 61 61
538976288 = ' '
ints = (
702, 7, 38,
1, (51, 51, 0, 0, 0, 0), (61, 61, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0, 538976288, 538976288, 0, 0, 538976288, 538976288, 0, 0, 925353388, 0, 0, 0, 0, 0,
7, (51, 51, 0, 0, 0, 0), (61, 61, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0), 0, 538976288, 538976288, 0, 0, 538976288, 538976288, 0, 0, 925353388, 0, 0, 0, 0, 0)
floats = (
702, 7, 38,
1, 51, 51, 0.0, 0.0, 0.0, 0.0, 61, 61, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 1.e-7, 0.0, 0.0, 0.0, 0.0, 0.0,
7, 51, 51, 0.0, 0.0, 0.0, 0.0, 61, 61, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 538976288, 538976288, 0.0, 0.0, 1.e-7, 0.0, 0.0, 0.0, 0.0, 0.0)
"""
op2 = self.op2
props = []
ntotal = 136 * self.factor # k b g n fdc
struct1 = Struct(mapfmt(op2._endian + b'i 6i 6i 6i 6i 4s 2i i 5i', self.size))
nentries = (len(data) - n) // ntotal
assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1, g2, g3, g4, g5, g6,
n1, n2, n3, n4, n5, n6,
word1, a, word2, c, *other) = out
k_tables = [ki if ki != 538976288 else 0
for ki in [k1, k2, k3, k4, k5, k6]]
b_tables = [bi if bi != 538976288 else 0
for bi in [b1, b2, b3, b4, b5, b6]]
ge_tables = [gei if gei != 538976288 else 0
for gei in [g1, g2, g3, g4, g5, g6]]
kn_tables = [kni if kni != 538976288 else 0
for kni in [n1, n2, n3, n4, n5, n6]]
op2.log.warning(
f'PBUSHT: pid={pid} '
f'k={k_tables} '
f'b={b_tables} '
f'ge={ge_tables} '
f'n={kn_tables} ' +
'words=' + str([word1, a, word2, c]) +
f' other={other}')
assert sum(other) == 0, other
prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)
props.append(prop)
n += ntotal
return n, props
def _read_pcomp(self, data: bytes, n: int) -> int:
r"""
PCOMP(2706,27,287) - the marker for Record 22
standard:
EPTS; 64-bit: C:\MSC.Software\simcenter_nastran_2019.2\tpl_post1\cqrdbxdra3lg.op2
optistruct:
ints = (2706, 27, 287,
5,
3, -2.75, 0, 0, 1, 0, 0,
2, 0.25, 0, 2, # why is sout=2?
3, 5.0, 0, 3, # why is sout=3?
2, 0.25, 0, 2, # why is sout=2?
6, 5, -3.0, 0, 0, 1, 0, 0,
2, 0.25, 0, 2,
2, 0.25, 0, 2,
3, 5.0, 0, 3,
2, 0.25, 0, 2,
2, 0.25, 0, 2, 7, 7, -1068498944, 0, 0, 1, 0, 0, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 3, 5.0, 0, 3, 2, 0.25, 0, 2, 2, 0.25, 0, 2, 2, 0.25, 0, 2)
floats = (2706, 27, 287,
5, 3, -2.75, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 6, 5, -3.0, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 9.80908925027372e-45, 9.80908925027372e-45, -3.25, 0.0, 0.0, 1, 0.0, 0.0, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 3, 5.0, 0.0, 3, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2, 2, 0.25, 0.0, 2)
"""
op2 = self.op2
if self.size == 4:
n2, props = self._read_pcomp_32_bit(data, n)
nproperties = len(props)
for prop in props:
self._add_op2_property(prop)
op2.card_count['PCOMP'] = nproperties
else:
n2 = op2.reader_geom2._read_dual_card(
data, n, self._read_pcomp_32_bit,
self._read_pcomp_64_bit,
'PCOMP', self._add_op2_property)
return n2
def _read_pcomp_64_bit(self, data: bytes, n: int) -> Tuple[int, List[PCOMP]]:
"""
PCOMP(2706,27,287) - the marker for Record 22
1 PID I Property identification number
2 N(C) I Number of plies
3 Z0 RS Distance from the reference plane to the bottom surface
4 NSM RS Nonstructural mass per unit area
5 SB RS Allowable shear stress of the bonding material
6 FT I Failure theory
7 TREF RS Reference temperature
8 GE RS Damping coefficient
9 MID I Material identification number
10 T RS Thicknesses of the ply
11 THETA RS Orientation angle of the longitudinal direction of the ply
12 SOUT I Stress or strain output request of the ply
Words 9 through 12 repeat N times
TODO:
64-bit bug: why is the number of plies 0???
doubles (float64) = (
1, 0.0, 1.7368e-18, 0.0, 1.0, 1.5e-323, 0.0, 0.0,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
-1, -1, -1, -1,
21, 0.0, 1.7368e-18, 0.0, 1.0, 1.5e-323, 0.0, 0.0,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
-1, -1, -1, -1)
long long (int64) = (
1, 0, 1.7368e-18, 0, 1.0, 3, 0, 0, 1, 4592590756007337001, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
-1, -1, -1, -1,
21, 0, 4341475431749739292, 0, 4607182418800017408, 3, 0, 0,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
1, 0.11, 0, 1,
-1, -1, -1, -1)
doubles (float64) = (5e-324, 0.0, -0.005, 0.0, 0.0, 0.0, 0.0, 0.0,
4e-323, 0.005, 0.0, 5e-324,
4e-323, 0.005, 0.0, 5e-324,
nan, nan, nan, nan)
long long (int64) = (1, 0, -4650957407178058629, 0, 0, 0, 0, 0,
8, 4572414629676717179, 0, 1,
8, 4572414629676717179, 0, 1,
-1, -1, -1, -1)
C:\MSC.Software\simcenter_nastran_2019.2\tpl_post2\dbxdr12lg.op2
data = (3321, 2, -0.5, 0.0, 1.0, 4, 0.0, 0.0,
3, 0.5, 0, 1,
3, 0.5, 0, 1)
"""
op2 = self.op2
op2.to_nx(' because PCOMP-64 was found')
nproperties = 0
s1 = Struct(mapfmt(op2._endian + b'2i3fi2f', self.size))
ntotal1 = 32 * self.factor
s2 = Struct(mapfmt(op2._endian + b'i2fi', self.size))
four_minus1 = Struct(mapfmt(op2._endian + b'4i', self.size))
ndata = len(data)
ntotal2 = 16 * self.factor
props = []
while n < (ndata - ntotal1):
out = s1.unpack(data[n:n+ntotal1])
(pid, nlayers, z0, nsm, sb, ft, tref, ge) = out
assert pid > 0
if op2.binary_debug:
op2.binary_debug.write(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '
f'sb={sb} ft={ft} Tref={tref} ge={ge}')
assert isinstance(nlayers, int), out
#print(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '
#f'sb={sb} ft={ft} Tref={tref} ge={ge}')
n += ntotal1
# None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'
is_symmetrical = 'NO'
#if nlayers < 0:
#is_symmetrical = 'SYM'
#nlayers = abs(nlayers)
mids = []
T = []
thetas = []
souts = []
edata2 = data[n:n+ntotal2]
idata = four_minus1.unpack(edata2)
while idata != (-1, -1, -1, -1):
(mid, t, theta, sout) = s2.unpack(edata2)
mids.append(mid)
T.append(t)
thetas.append(theta)
souts.append(sout)
if op2.is_debug_file:
op2.binary_debug.write(f' mid={mid} t={t} theta={theta} sout={sout}\n')
n += ntotal2
#print(f' mid={mid} t={t} theta={theta} sout={sout}')
edata2 = data[n:n+ntotal2]
if n == ndata:
op2.log.warning(' no (-1, -1, -1, -1) flag was found to close the PCOMPs')
break
idata = four_minus1.unpack(edata2)
if self.size == 4:
assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s Tref=%s ge=%s' % (
pid, nlayers, z0, nsm, sb, ft, tref, ge)
else:
assert nlayers == 0, nlayers
nlayers = len(mids)
data_in = [
pid, z0, nsm, sb, ft, tref, ge,
is_symmetrical, mids, T, thetas, souts]
prop = PCOMP.add_op2_data(data_in)
nproperties += 1
n += ntotal2
props.append(prop)
return n, props
def _read_pcomp_32_bit(self, data: bytes, n: int) -> Tuple[int, List[PCOMP]]: # pragma: no cover
"""PCOMP(2706,27,287) - the marker for Record 22"""
op2 = self.op2
nproperties = 0
s1 = Struct(mapfmt(op2._endian + b'2i3fi2f', self.size))
ntotal1 = 32 * self.factor
s2 = Struct(mapfmt(op2._endian + b'i2fi', self.size))
ndata = len(data)
ntotal2 = 16 * self.factor
props = []
while n < (ndata - ntotal1):
out = s1.unpack(data[n:n+ntotal1])
(pid, nlayers, z0, nsm, sb, ft, tref, ge) = out
assert pid > 0
if op2.binary_debug:
op2.binary_debug.write(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '
f'sb={sb} ft={ft} Tref={tref} ge={ge}')
assert isinstance(nlayers, int), out
#print(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '
#f'sb={sb} ft={ft} Tref={tref} ge={ge}')
n += ntotal1
mids = []
T = []
thetas = []
souts = []
# None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'
is_symmetrical = 'NO'
if nlayers < 0:
is_symmetrical = 'SYM'
nlayers = abs(nlayers)
assert nlayers > 0, out
assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s' % (
pid, nlayers, z0, nsm, sb, ft, tref, ge)
if op2.is_debug_file:
op2.binary_debug.write(' pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s\n' % (
pid, nlayers, z0, nsm, sb, ft, tref, ge))
#if op2._nastran_format == 'optistruct':
#print(' pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s' % (
#pid, nlayers, z0, nsm, sb, ft, tref, ge))
for unused_ilayer in range(nlayers):
(mid, t, theta, sout) = s2.unpack(data[n:n+ntotal2])
if op2._nastran_format == 'optistruct':
#print(f' mid={mid} t={t} theta={theta} sout={sout}')
if sout in [2, 3]: # TODO: Why is this 2/3?
sout = 1 # YES
mids.append(mid)
assert mid > 0
T.append(t)
thetas.append(theta)
souts.append(sout)
if op2.is_debug_file:
op2.binary_debug.write(f' mid={mid} t={t} theta={theta} sout={sout}\n')
n += ntotal2
data_in = [
pid, z0, nsm, sb, ft, tref, ge,
is_symmetrical, mids, T, thetas, souts]
prop = PCOMP.add_op2_data(data_in)
#print(prop)
props.append(prop)
nproperties += 1
return n, props
def _read_pcompg(self, data: bytes, n: int) -> int:
"""
PCOMP(2706,27,287)
1 PID I Property identification number
2 LAMOPT I Laminate option
3 Z0 RS Distance from the reference plane to the bottom surface
4 NSM RS Nonstructural mass per unit area
5 SB RS Allowable shear stress of the bonding material
6 FT I Failure theory
7 TREF RS Reference temperature
8 GE RS Damping coefficient
9 GPLYIDi I Global ply IDs.
10 MID I Material identification number
11 T RS Thicknesses of the ply
12 THETA RS Orientation angle of the longitudinal direction of the ply
13 SOUT I Stress or strain output request of the ply
Words 9 through 13 repeat N times (until -1, -1, -1, -1, -1 as Nplies doesn't exist...)
float = (15006, 150, 604,
5, 0.0, 1.7368e-18, 0.0, 0.0, 0.0, 20.0, 0.0,
5e-324, 5e-324, 2.0, 0.0, 0.0,
1e-323, 1e-323, 3.0, 0.0, 0.0,
1.5e-323, 1e-323, 3.0, 0.0, 0.0,
2e-323, 5e-324, 2.0, 0.0, 0.0,
nan, nan, nan, nan, nan)
int = (15006, 150, 604,
5, 0, 1.7368e-18, 0, 0, 0, 20.0, 0,
1, 1, 4611686018427387904, 0, 0,
2, 2, 4613937818241073152, 0, 0,
3, 2, 4613937818241073152, 0, 0,
4, 1, 4611686018427387904, 0, 0,
-1, -1, -1, -1, -1)
"""
op2 = self.op2
nproperties = 0
s1 = Struct(mapfmt(op2._endian + b'2i 3f i 2f', self.size))
s2 = Struct(mapfmt(op2._endian + b'2i 2f i', self.size))
struct_i5 = Struct(mapfmt(op2._endian + b'5i', self.size))
# lam - SYM, MEM, BEND, SMEAR, SMCORE, None
lam_map = {
0 : None,
# MEM
# BEND
# SMEAR
# SMCORE
}
# ft - HILL, HOFF, TSAI, STRN, None
ft_map = {
0 : None,
# HILL
# HOFF
3 : 'TSAI',
# STRN
}
# sout - YES, NO
sout_map = {
0 : 'NO',
1 : 'YES',
}
ndata = len(data)
#op2.show_data(data, types='qd')
ntotal1 = 32 * self.factor
ntotal2 = 20 * self.factor
while n < (ndata - ntotal1):
out = s1.unpack(data[n:n+ntotal1])
(pid, lam_int, z0, nsm, sb, ft_int, tref, ge) = out
if op2.binary_debug:
op2.binary_debug.write(f'PCOMPG pid={pid} lam_int={lam_int} z0={z0} nsm={nsm} '
f'sb={sb} ft_int={ft_int} tref={tref} ge={ge}')
#print(f'PCOMPG pid={pid} lam_int={lam_int} z0={z0} nsm={nsm} sb={sb} '
#f'ft_int={ft_int} tref={tref} ge={ge}')
assert isinstance(lam_int, int), out
assert pid > -1, out
n += ntotal1
mids = []
thicknesses = []
thetas = []
souts = []
global_ply_ids = []
# None, 'SYM', 'MEM', 'BEND', 'SMEAR', 'SMCORE', 'NO'
#is_symmetrical = 'NO'
#if nlayers < 0:
#is_symmetrical = 'SYM'
#nlayers = abs(nlayers)
#assert nlayers > 0, out
#assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s tref=%s ge=%s' % (
#pid, nlayers, z0, nsm, sb, ft, tref, ge)
#if op2.is_debug_file:
#op2.binary_debug.write(' pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s tref=%s ge=%s\n' % (
#pid, nlayers, z0, nsm, sb, ft, tref, ge))
ilayer = 0
while ilayer < 1000:
ints5 = struct_i5.unpack(data[n:n+ntotal2])
if ints5 == (-1, -1, -1, -1, -1):
if op2.is_debug_file:
op2.binary_debug.write(' global_ply=%-1 mid=%-1 t=%-1 theta=%-1 sout=-1\n')
break
(global_ply, mid, t, theta, sout_int) = s2.unpack(data[n:n+ntotal2])
#print(' ', (global_ply, mid, t, theta, sout_int))
try:
sout = sout_map[sout_int]
except KeyError:
op2.log.error('cant parse global_ply=%s sout=%s; assuming 0=NO' % (
global_ply, sout_int))
sout = 'NO'
global_ply_ids.append(global_ply)
mids.append(mid)
thicknesses.append(t)
thetas.append(theta)
souts.append(sout)
if op2.is_debug_file:
op2.binary_debug.write(' global_ply=%s mid=%s t=%s theta=%s sout_int=%s sout=%r\n' % (
global_ply, mid, t, theta, sout_int, sout))
n += ntotal2
ilayer += 1
n += ntotal2
try:
ft = ft_map[ft_int]
except KeyError:
op2.log.error('pid=%s cant parse ft=%s; should be HILL, HOFF, TSAI, STRN'
'...skipping' % (pid, ft_int))
continue
try:
lam = lam_map[lam_int]
except KeyError:
op2.log.error('pid=%s cant parse lam=%s; should be HILL, HOFF, TSAI, STRN'
'...skipping' % (pid, lam_int))
continue
# apparently Nastran makes duplicate property ids...
if pid in op2.properties and op2.properties[pid].type == 'PCOMP':
del op2.properties[pid]
op2.add_pcompg(pid, global_ply_ids, mids, thicknesses, thetas=thetas, souts=souts,
nsm=nsm, sb=sb, ft=ft, tref=tref, ge=ge, lam=lam, z0=z0, comment='')
nproperties += 1
op2.card_count['PCOMPG'] = nproperties
return n
# PCOMPA
def _read_pconeax(self, data: bytes, n: int) -> int:
"""
(152,19,147) - Record 24
"""
self.op2.log.info('geom skipping PCONEAX in EPT')
return len(data)
def _read_pconv(self, data: bytes, n: int) -> int:
"""common method for reading PCONVs"""
op2 = self.op2
#n = self._read_dual_card(data, n, self._read_pconv_nx, self._read_pconv_msc,
#'PCONV', self._add_pconv)
card_name = 'PCONV'
card_obj = PCONV
methods = {
16 : self._read_pconv_nx_16, # 16=4*4
56 : self._read_pconv_msc_56, # 56=4*14
}
try:
n, elements = op2.reader_geom2._read_double_card_load(
card_name, card_obj,
methods, data, n)
except DoubleCardError:
nx_method = partial(self._read_pconv_nx_16, card_obj)
msc_method = partial(self._read_pconv_msc_56, card_obj)
n, elements = op2._read_dual_card_load(
data, n,
nx_method, msc_method,
card_name, self._add_op2_property)
nelements = len(elements)
for prop in elements:
key = prop.pconid
if key in op2.convection_properties:
prop_old = op2.convection_properties[key]
if prop != prop_old:
op2.log.warning(prop.raw_fields())
op2.log.warning(prop_old.raw_fields())
op2.log.warning(f'PCONV pconid={key}; old, new\n{prop_old}{prop}')
# this will fail due to a duplicate id
self._add_pconv(prop)
#else:
# already exists
else:
self._add_pconv(prop)
op2.card_count['PCONV'] = nelements
return n
def _read_pconv_nx_16(self, card_obj: PCONV, data: bytes, n: int) -> int:
"""
(11001,110,411)- NX version
"""
op2 = self.op2
ntotal = 16 # 4*4
struct_3if = Struct(op2._endian + b'3if')
nentries = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
props = []
for unused_i in range(nentries):
out = struct_3if.unpack(data[n:n+ntotal])
(pconid, mid, form, expf) = out
ftype = tid = chlen = gidin = ce = e1 = e2 = e3 = None
data_in = (pconid, mid, form, expf, ftype, tid, chlen,
gidin, ce, e1, e2, e3)
prop = PCONV.add_op2_data(data_in)
props.append(prop)
n += ntotal
return n, props
def _read_pconv_msc_56(self, card_obj: PCONV, data: bytes, n: int) -> int:
"""
(11001,110,411)- MSC version - Record 25
"""
op2 = self.op2
ntotal = 56 # 14*4
s = Struct(op2._endian + b'3if 4i fii 3f')
nentries = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
props = []
for unused_i in range(nentries):
out = s.unpack(data[n:n+ntotal])
(pconid, mid, form, expf, ftype, tid, unused_undef1, unused_undef2, chlen,
gidin, ce, e1, e2, e3) = out
data_in = (pconid, mid, form, expf, ftype, tid, chlen,
gidin, ce, e1, e2, e3)
prop = PCONV.add_op2_data(data_in)
props.append(prop)
n += ntotal
return n, props
def _read_pconvm(self, data: bytes, n: int) -> int:
"""Record 24 -- PCONVM(2902,29,420)
1 PID I Property identification number
2 MID I Material identification number
3 FORM I Type of formula used for free convection
4 FLAG I Flag for mass flow convection
5 COEF RS Constant coefficient used for forced convection
6 EXPR RS Reynolds number convection exponent
7 EXPPI RS Prandtl number convection exponent into the working fluid
8 EXPPO RS Prandtl number convection exponent out of the working fluid
"""
op2 = self.op2
ntotal = 32 # 8*4
structi = Struct(op2._endian + b'4i 4f')
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = structi.unpack(data[n:n+ntotal])
if out != (0, 0, 0, 0, 0., 0., 0., 0.):
(pconid, mid, form, flag, coeff, expr, expri, exppo) = out
#print(out)
prop = PCONVM(pconid, mid, coeff, form=form, flag=flag,
expr=expr, exppi=expri, exppo=exppo, comment='')
op2._add_methods._add_convection_property_object(prop)
n += ntotal
op2.card_count['PCONVM'] = nentries
return n
def _read_pdamp(self, data: bytes, n: int) -> int:
"""
PDAMP(202,2,45) - the marker for Record ???
"""
op2 = self.op2
ntotal = 8 * self.factor # 2*4
struct_if = Struct(mapfmt(op2._endian + b'if', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = struct_if.unpack(data[n:n+ntotal])
#(pid, b) = out
prop = PDAMP.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PDAMP'] = nentries
return n
def _read_pdampt(self, data: bytes, n: int) -> int: # 26
self.op2.log.info('geom skipping PDAMPT in EPT')
return len(data)
def _read_pdamp5(self, data: bytes, n: int) -> int: # 26
self.op2.log.info('geom skipping PDAMP5 in EPT')
return len(data)
# PDUM1
# PDUM2
# PDUM3
# PDUM4
# PDUM5
# PDUM6
# PDUM7
# PDUM8
# PDUM9
def _read_pelas(self, data: bytes, n: int) -> int:
"""PELAS(302,3,46) - the marker for Record 39"""
op2 = self.op2
struct_i3f = Struct(mapfmt(op2._endian + b'i3f', self.size))
ntotal = 16 * self.factor # 4*4
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_i3f.unpack(edata)
#(pid, k, ge, s) = out
if op2.is_debug_file:
op2.binary_debug.write(' PELAS=%s\n' % str(out))
prop = PELAS.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PELAS'] = nproperties
return n
def _read_pfast_msc(self, data: bytes, n: int) -> int:
r"""
Word Name Type Description
1 PID I Property identification number
2 MID I Material property identification number
3 D RS Diameter of the fastener
4 CONNBEH I Connection behavior (0=FF/F, 1=FR, 10=RF/R, 11=RR)
5 CONNTYPE I Connection type (0=clamp, 1=hinge, 2=bolt)
6 EXTCON I External constraint flag (0=off, 1=on)
7 CONDTYPE I Condition type (0=rigid, 1=equivalent)
8 WELDTYPE I Weld type (0=spot weld, 1=but seam, 2=T-seam)
9 MINLEN RS Minimum length of spot weld
10 MAXLEN RS Maximum length of spot weld
11 GMCHK I Perform geometry check
12 SPCGS I SPC the master grid GS
13 CMASS RS Concentrated mass
14 GE RS Structureal Damping
15 UNDEF(3) none Not used
18 MCID I Element stiffness coordinate system
19 MFLAG I Defined the coordinate system type
20 KT(3) RS Stiffness values in direction 1
23 KR(3) RS Rotation stiffness values in direction 1
C:\MSC.Software\msc_nastran_runs\cfmass.op2
pid mid D con con ext cond weld min max chk spc cmass ge und und und mcid mfag kt1 kt2 kt3 kr1 kr2 kr3
ints = (99, 0, 0.1, 0, 0, 0, 0, -1, 0.2, 5.0, 0, 0, 7.9, 0, 0, 0, 0, -1, 0, 471200.0, 181200.0, 181200.0, 226.6, 45610.0, 45610.0)
floats = (99, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, -1, 0.2, 5.0, 0.0, 0.0, 7.9, 0.0, 0.0, 0.0, 0.0, -1, 0.0, 471200.0, 181200.0, 181200.0, 226.6, 45610.0, 45610.0)
"""
op2 = self.op2
#op2.show_data(data[n:], types='ifs')
#ntotal = 92 * self.factor # 26*4
#struct1 = Struct(op2._endian + b'ifii 3f')
ntotal = 100 * self.factor # 25*4
struct1 = Struct(op2._endian + b'2if 5i 2f2i2f 3i 2i 6f')
ndatai = len(data) - n
nproperties = ndatai // ntotal
delta = ndatai % ntotal
assert delta == 0, 'len(data)-n=%s n=%s' % (ndatai, ndatai / 100.)
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PFAST=%s\n' % str(out))
(pid, d, mcid, unused_connbeh, unused_conntype, unused_extcon,
unused_condtype, unused_weldtype, unused_minlen, unused_maxlen,
unused_gmcheck, unused_spcgs, mass, ge,
unused_aa, unused_bb, unused_cc, mcid, mflag,
kt1, kt2, kt3, kr1, kr2, kr3) = out
data_in = (pid, d, mcid, mflag, kt1, kt2, kt3,
kr1, kr2, kr3, mass, ge)
prop = PFAST.add_op2_data(data_in)
str(prop)
#print(prop)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PFAST'] = nproperties
return n
def _read_pfast_nx(self, data: bytes, n: int) -> int:
"""
PFAST(3601,36,55)
NX only
"""
op2 = self.op2
ntotal = 48
struct1 = Struct(op2._endian + b'ifii 8f')
nproperties = (len(data) - n) // ntotal
delta = (len(data) - n) % ntotal
assert delta == 0, 'len(data)-n=%s n=%s' % (len(data) - n, (len(data) - n) / 48.)
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PFAST=%s\n' % str(out))
(pid, d, mcid, mflag, kt1, kt2, kt3, kr1, kr2, kr3, mass, ge) = out
data_in = (pid, d, mcid, mflag, kt1, kt2, kt3,
kr1, kr2, kr3, mass, ge)
prop = PFAST.add_op2_data(data_in)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PFAST'] = nproperties
op2.to_nx(' because PFAST-NX was found')
return n
def _read_pelast(self, data: bytes, n: int) -> int:
"""
Record 41 -- PELAST(1302,13,34)
1 PID I Property identification number
2 TKID I TABLEDi entry identification number for stiffness
3 TGEID I TABLEDi entry identification number for structural
damping
4 TKNID I TABLEDi entry
"""
op2 = self.op2
ntotal = 16 * self.factor
struct_4i = Struct(mapfmt(op2._endian + b'4i', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_4i.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PELAST=%s\n' % str(out))
#(pid, tkid, tgeid, tknid) = out
prop = PELAST.add_op2_data(out)
op2._add_methods._add_pelast_object(prop)
n += ntotal
op2.card_count['PELAST'] = nproperties
return n
def _read_pgap(self, data: bytes, n: int) -> int:
"""
PGAP(2102,21,121) - the marker for Record 42
"""
op2 = self.op2
ntotal = 44 * self.factor
struct_i10f = Struct(mapfmt(op2._endian + b'i10f', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_i10f.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PGAP=%s\n' % str(out))
#(pid,u0,f0,ka,kb,kt,mu1,mu2,tmax,mar,trmin) = out
prop = PGAP.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PGAP'] = nproperties
return n
def _read_phbdy(self, data: bytes, n: int) -> int:
"""
PHBDY(2802,28,236) - the marker for Record 43
"""
op2 = self.op2
struct_i3f = Struct(op2._endian + b'ifff')
nproperties = (len(data) - n) // 16
for unused_i in range(nproperties):
edata = data[n:n+16]
out = struct_i3f.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PHBDY=%s\n' % str(out))
#(pid, af, d1, d2) = out
prop = PHBDY.add_op2_data(out)
op2._add_methods._add_phbdy_object(prop)
n += 16
op2.card_count['PHBDY'] = nproperties
return n
def _read_pintc(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping PINTC in EPT')
return len(data)
def _read_pints(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping PINTS in EPT')
return len(data)
def _read_pbeam3(self, data: bytes, n: int) -> int:
op2 = self.op2
card_name = 'PBUSHT'
card_obj = PBUSHT
methods = {
264 : self._read_pbeam3_264,
456 : self._read_pbeam3_456,
}
try:
n = op2.reader_geom2._read_double_card(
card_name, card_obj, self._add_op2_property,
methods, data, n)
except DoubleCardError:
raise
op2.log.warning(f'try-except {card_name}')
return n
def _read_pbeam3_456(self, card_obj, data: bytes, n: int) -> int:
r"""
# per C:\MSC.Software\msc_nastran_runs\b3plod3.op2
ints = (2201, 1, 1.0, 0.1833, 0.0833, 0, -1.0, 0, -0.5, -0.5, -0.5, 0.5, 0.5, 0.5, 0.5, -0.5,
2, 1.0, 0.1833, 0.0833, 0, -1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 1.0, 0.1833, 0.0833, 0, -1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2901, 2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0.5, 0, 0, 0.5, -0.5, 0, 0, -0.5,
2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2, 0.1, 0.1, 0.1, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
floats = (2201, 1, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, -0.5, -0.5, -0.5, 0.5, 0.5, 0.5, 0.5, -0.5,
2, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
2, 1.0, 0.1833, 0.0833, 0.0, -1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
2901, 2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.5, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, -0.5,
2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
2, 0.1, 0.1, 0.1, 0.0, 0.2, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
"""
op2 = self.op2
#op2.show_data(data[n:])
ntotal = 456 * self.factor # 114*4
#
struct1 = Struct(mapfmt(op2._endian +
b'2i' # pid, mid
b'3f' # A, Iy, Iz
b'5f' # # a, b, c, d, e
b'5f fi 14f i' #fj ki 14f i
b'2i3f' #aa-ee - good
b'5f' #ff-jj
b'5f' #kk-oo
b'5f' #pp-tt
b'6f' #uu-zz
b'5f' #aaa-eee
b'4i' #fff-iii
# jjj-ooo
b'2f iii f'
# ppp-ttt
b'5f'
# uuu-zzz
b'6f'
b'30f', self.size))
ndatai = len(data) - n
nentries = ndatai // ntotal
assert ndatai % ntotal == 0
props = []
for unused_i in range(nentries):
#print(n, ntotal)
datai = data[n:n+ntotal]
#op2.show_data(datai, types='ifqd')
n += ntotal
(pid, mid, A, iz, iy,
a, b, c, d, e,
f, g, h, i, j,
k, inta, l, m, ni, o, p, q, r, s, t, u, v, w, x, y, z,
aa, bb, cc, dd, ee,
ff, gg, hh, ii, jj,
kk, ll, mm, nn, oo,
pp, qq, rr, ss, tt,
uu, vv, ww, xx, yy, zz,
aaa, bbb, ccc, ddd, eee,
fff, ggg, hhh, iii,
jjj, kkk, lll, mmm, nnn, ooo,
ppp, qqq, rrr, sss, ttt,
uuu, vvv, www, xxx, yyy, zzz,
*other) = struct1.unpack(datai)
#print(pid, mid, A, iz, iy)
#print('a-e', (a, b, c, d, e))
#print('f-j', (f, g, h, i, j))
#print(k, inta, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z)
#print('aa-ee', (aa, bb, cc, dd, ee))
#print('ff-jj', (ff, gg, hh, ii, jj))
#print('kk-oo', (kk, ll, mm, nn, oo))
#print('pp-tt', (pp, qq, rr, ss, tt))
#print('uu-zz', (uu, vv, ww, xx, yy, zz))
#print('aaa-eee', (aaa, bbb, ccc, ddd, eee))
#print('fff-jjj', (fff, ggg, hhh, iii))
#print('jjj-ooo', (jjj, kkk, lll, mmm, nnn, ooo))
#print('ppp-ttt', (ppp, qqq, rrr, sss, ttt))
#print('uuu-zzz', (uuu, vvv, www, xxx, yyy, zzz))
if mid == 0:
continue
#assert sum(other) < 100, other
prop = PBEAM3(
pid, mid, A, iz, iy, iyz=None, j=None, nsm=0.,
so=None,
cy=None, cz=None,
dy=None, dz=None,
ey=None, ez=None,
fy=None, fz=None,
ky=1., kz=1.,
ny=None, nz=None, my=None, mz=None,
nsiy=None, nsiz=None, nsiyz=None,
cw=None, stress='GRID',
w=None, wy=None, wz=None, comment='')
assert pid > 0, prop.get_stats()
assert mid > 0, prop.get_stats()
str(prop)
props.append(prop)
#self._add_op2_property(prop)
#op2.card_count['PBEAM3'] = nentries
return n, props
def _read_pbeam3_264(self, card_obj, data: bytes, n: int) -> int:
"""
TODO: partial
# per test_cbeam_cbeam3???
ints = (2901, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0.5, 0, 0, 0.5, -0.5, 0, 0, -0.5, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0.1, 0.1, 0.1, 0, 0.02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1.0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0)
floats = (2901, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.5, 0.0, 0.0, 0.5, -0.5, 0.0, 0.0, -0.5, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2, 0.1, 0.1, 0.1, 0.0, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0)
"""
op2 = self.op2
ntotal = 264 * self.factor # 66*4
# p/m ayz ae fj ki 14f i
struct1 = Struct(mapfmt(op2._endian + b'2i 3f 5f 5f fi 14f i 30f 4i', self.size))
ndatai = len(data) - n
nentries = ndatai // ntotal
assert ndatai % ntotal == 0
props = []
for unused_i in range(nentries):
pid, mid, A, iz, iy, a, b, c, d, e, f, g, h, i, j, k, inta, *other = struct1.unpack(data[n:n+ntotal])
#print(pid, mid, A, iz, iy)
#print((a, b, c, d, e))
#print((f, g, h, i, j))
#print(k, inta)
assert sum(other) < 100, other
prop = PBEAM3(
pid, mid, A, iz, iy, iyz=None, j=None, nsm=0.,
so=None,
cy=None, cz=None,
dy=None, dz=None,
ey=None, ez=None,
fy=None, fz=None,
ky=1., kz=1.,
ny=None, nz=None, my=None, mz=None,
nsiy=None, nsiz=None, nsiyz=None,
cw=None, stress='GRID',
w=None, wy=None, wz=None, comment='')
assert pid > 0, prop.get_stats()
assert mid > 0, prop.get_stats()
str(prop)
props.append(prop)
n += ntotal
return n, props
def _read_pplane(self, data: bytes, n: int) -> int:
"""
RECORD – PPLANE(3801,38,979)
Word Name Type Description
1 PID I Property identification number
2 MID I Material identification number
3 T RS Default membrane thickness for Ti on the connection entry
4 NSM RS Nonstructural mass per unit area
5 FOROPT I Formulation option number
6 CSOPT I Reserved for coordinate system definition of plane
7 UNDEF(2) None
ints = (1, 1, 1.0, 0, 0, 0, 0, 0, 2, 2, 1.0, 0, 0, 0, 0, 0)
floats = (1, 1, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2, 2, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0)
"""
op2 = self.op2
ntotal = 32 * self.factor # 8*4
struct1 = Struct(mapfmt(op2._endian + b'2i 2f 4i', self.size))
ndatai = len(data) - n
nentries = ndatai // ntotal
assert ndatai % ntotal == 0
for unused_i in range(nentries):
out = struct1.unpack(data[n:n+ntotal])
pid, mid, t, nsm, foropt, csopt = out[:6]
#print(out)
assert csopt == 0, csopt
pplane = op2.add_pplane(pid, mid, t=t, nsm=nsm,
formulation_option=foropt)
pplane.validate()
#print(pplane)
str(pplane)
n += ntotal
op2.card_count['PLPLANE'] = nentries
return n
def _read_plplane(self, data: bytes, n: int) -> int:
"""
PLPLANE(4606,46,375)
NX 10
1 PID I Property identification number
2 MID I Material identification number
3 CID I Coordinate system identification number
4 STR CHAR4 Location of stress and strain output
5 T RS Default membrane thickness for Ti on the connection entry
6 CSOPT I Reserved for coordinate system definition of plane
7 UNDEF(5) None
MSC 2016
PID I Property identification number
2 MID I Material identification number
3 CID I Coordinate system identification number
4 STR CHAR4 Location of stress and strain output
5 UNDEF(7 ) none Not used
.. warning:: CSOPT ad T are not supported
"""
op2 = self.op2
ntotal = 44 * self.factor # 4*11
if self.size == 4:
s = Struct(op2._endian + b'3i 4s f 6i')
else:
s = Struct(op2._endian + b'3q 8s d 6q')
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = s.unpack(data[n:n+ntotal])
pid, mid, cid, location, unused_t, unused_csopt = out[:6]
location = location.decode('latin1')
#op2.show_data(data[n:n+ntotal], 'ifs')
op2.add_plplane(pid, mid, cid=cid, stress_strain_output_location=location)
n += ntotal
op2.card_count['PLPLANE'] = nentries
return n
def _read_plsolid(self, data: bytes, n: int) -> int:
"""
MSC 2016
1 PID I Property identification number
2 MID I Material identification number
3 STR CHAR4 Location of stress and strain output
4 UNDEF(4 ) none Not used
NX 10
1 PID I Property identification number
2 MID I Material identification number
3 STR CHAR4 Location of stress and strain output
4 CSOPT I Reserved for coordinate system definition of plane
5 UNDEF(3) None
.. warning:: CSOPT is not supported
"""
op2 = self.op2
ntotal = 28 * self.factor # 4*7
if self.size == 4:
struct1 = Struct(op2._endian + b'2i 4s 4i')
else:
struct1 = Struct(op2._endian + b'2q 8s 4q')
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = struct1.unpack(data[n:n+ntotal])
pid, mid, location, unused_csopt, unused_null_a, unused_null_b, unused_null_c = out
location = location.decode('latin1')
#op2.show_data(data[n:n+ntotal], 'ifs')
op2.add_plsolid(pid, mid, stress_strain=location, ge=0.)
n += ntotal
op2.card_count['PLSOLID'] = nentries
return n
def _read_pmass(self, data: bytes, n: int) -> int:
"""
PMASS(402,4,44) - the marker for Record 48
"""
op2 = self.op2
ntotal = 8 * self.factor # 2*4
struct_if = Struct(mapfmt(op2._endian + b'if', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
edata = data[n:n + ntotal]
out = struct_if.unpack(edata)
#out = (pid, mass)
if op2.is_debug_file:
op2.binary_debug.write(' PMASS=%s\n' % str(out))
prop = PMASS.add_op2_data(out)
self._add_op2_property_mass(prop)
n += ntotal
return n
def _read_prod(self, data: bytes, n: int) -> int:
"""
PROD(902,9,29) - the marker for Record 49
"""
op2 = self.op2
ntotal = 24 * self.factor # 6*4
struct_2i4f = Struct(mapfmt(op2._endian + b'2i4f', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_2i4f.unpack(edata)
#(pid, mid, a, j, c, nsm) = out
prop = PROD.add_op2_data(out)
if op2.is_debug_file:
op2.binary_debug.write(' PROD=%s\n' % str(out))
self._add_op2_property(prop)
n += ntotal
op2.card_count['PROD'] = nproperties
return n
def _read_pshear(self, data: bytes, n: int) -> int:
"""
PSHEAR(1002,10,42) - the marker for Record 50
"""
op2 = self.op2
ntotal = 24 * self.factor
struct_2i4f = Struct(mapfmt(op2._endian + b'2i4f', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_2i4f.unpack(edata)
#(pid, mid, t, nsm, f1, f2) = out
if op2.is_debug_file:
op2.binary_debug.write(' PSHEAR=%s\n' % str(out))
prop = PSHEAR.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PSHEAR'] = nproperties
return n
def _read_pshell(self, data: bytes, n: int) -> int:
"""
PSHELL(2302,23,283) - the marker for Record 51
"""
op2 = self.op2
ntotal = 44 * self.factor # 11*4
s = Struct(mapfmt(op2._endian + b'iififi4fi', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = s.unpack(edata)
(pid, mid1, unused_t, mid2, unused_bk, mid3, unused_ts,
unused_nsm, unused_z1, unused_z2, mid4) = out
if op2.is_debug_file:
op2.binary_debug.write(' PSHELL=%s\n' % str(out))
prop = PSHELL.add_op2_data(out)
n += ntotal
if pid in op2.properties:
# this is a fake PSHELL
propi = op2.properties[pid]
if prop == propi:
op2.log.warning(f'Fake PSHELL {pid:d} (skipping):\n{propi}')
nproperties -= 1
continue
#assert propi.type in ['PCOMP', 'PCOMPG'], propi.get_stats()
op2.log.error(f'PSHELL {pid:d} is also {propi.type} (skipping PSHELL):\n{propi}{prop}')
nproperties -= 1
continue
#continue
#if max(pid, mid1, mid2, mid3, mid4) > 1e8:
#self.big_properties[pid] = prop
#else:
self._add_op2_property(prop)
if nproperties:
op2.card_count['PSHELL'] = nproperties
return n
def _read_psolid(self, data: bytes, n: int) -> int:
"""
PSOLID(2402,24,281) - the marker for Record 52
"""
op2 = self.op2
#print("reading PSOLID")
if self.size == 4:
ntotal = 28 # 7*4
struct_6i4s = Struct(op2._endian + b'6i4s')
else:
ntotal = 28 * 2
struct_6i4s = Struct(op2._endian + b'6q8s')
nproperties = (len(data) - n) // ntotal
nproperties_found = 0
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_6i4s.unpack(edata)
#(pid, mid, cid, inp, stress, isop, fctn) = out
#data_in = [pid, mid, cid, inp, stress, isop, fctn]
if op2.is_debug_file:
op2.binary_debug.write(' PSOLID=%s\n' % str(out))
n += ntotal
fctn = out[-1]
if fctn == b'FAKE':
op2.log.warning(' PSOLID=%s; is this a PCOMPLS?' % str(out))
continue
prop = PSOLID.add_op2_data(out)
self._add_op2_property(prop)
nproperties_found += 1
op2.card_count['PSOLID'] = nproperties_found
return n
# PSOLIDL
# PTRIA6
# PTSHELL
def _read_ptube(self, data: bytes, n: int) -> int:
"""
PTUBE(1602,16,30) - the marker for Record 56
.. todo:: OD2 only exists for heat transfer...
how do i know if there's heat transfer at this point?
I could store all the tubes and add them later,
but what about themal/non-thermal subcases?
.. warning:: assuming OD2 is not written (only done for thermal)
"""
op2 = self.op2
struct_2i3f = Struct(op2._endian + b'2i3f')
nproperties = (len(data) - n) // 20
for unused_i in range(nproperties):
edata = data[n:n+20] # or 24???
out = struct_2i3f.unpack(edata)
(pid, mid, OD, t, nsm) = out
data_in = [pid, mid, OD, t, nsm]
if op2.is_debug_file:
op2.binary_debug.write(' PTUBE=%s\n' % str(out))
prop = PTUBE.add_op2_data(data_in)
self._add_op2_property(prop)
n += 20
op2.card_count['PTUBE'] = nproperties
return n
def _read_pset(self, data: bytes, n: int) -> int:
op2 = self.op2
struct_5i4si = Struct(op2._endian + b'5i4si')
nentries = 0
while n < len(data):
edata = data[n:n+28]
out = struct_5i4si.unpack(edata)
#print(out)
idi, poly1, poly2, poly3, cid, typei, typeid = out
typei = typei.rstrip().decode('latin1')
assert typei in ['SET', 'ELID'], (idi, poly1, poly2, poly3, cid, typei, typeid)
if op2.is_debug_file:
op2.binary_debug.write(' PVAL=%s\n' % str(out))
#print(idi, poly1, poly2, poly3, cid, typei, typeid)
typeids = []
n += 28
while typeid != -1:
typeids.append(typeid)
typeid, = op2.struct_i.unpack(data[n:n+4])
n += 4
#print(val)
#print(typeids)
# PSET ID POLY1 POLY2 POLY3 CID SETTYP ID
if len(typeids) == 1:
typeids = typeids[0]
op2.add_pset(idi, poly1, poly2, poly3, cid, typei, typeids)
op2.card_count['PSET'] = nentries
return n
def _read_pval(self, data: bytes, n: int) -> int:
"""
PVAL(10201,102,400)
Word Name Type Description
1 ID I p-value set identification number
2 POLY1 I Polynomial order in 1 direction of the CID system
3 POLY2 I Polynomial order in 2 direction of the CID system
4 POLY3 I Polynomial order in 2 direction of the CID system
5 CID I Coordinate system identification number
6 TYPE CHAR4 Type of set provided: "SET" or "ELID"
7 TYPEID I SET identification number or element identification
number with this p-value specification.
Words 1 through 7 repeat until End of Record
"""
op2 = self.op2
#op2.show_data(data[n:])
if self.size == 4:
struct_5i4si = Struct(op2._endian + b'5i 4s i')
struct_i = op2.struct_i
else:
struct_5i4si = Struct(op2._endian + b'5q 8s q')
struct_i = op2.struct_q
nentries = 0
ntotal = 28 * self.factor
size = self.size
while n < len(data):
edata = data[n:n+ntotal]
out = struct_5i4si.unpack(edata)
#print(out)
idi, poly1, poly2, poly3, cid, typei, typeid = out
typei = typei.rstrip().decode('latin1')
assert typei in ['SET', 'ELID'], f'idi={idi} poly1={poly1} poly2={poly2} poly3={poly3} cid={cid} typei={typei} typeid={typeid}'
if op2.is_debug_file:
op2.binary_debug.write(' PVAL=%s\n' % str(out))
#print(idi, poly1, poly2, poly3, cid, typei, typeid)
typeids = []
n += ntotal
while typeid != -1:
typeids.append(typeid)
typeid, = struct_i.unpack(data[n:n+size])
n += size
#print(val)
#print(typeids)
# PVAL ID POLY1 POLY2 POLY3 CID SETTYP ID
op2.add_pval(idi, poly1, poly2, poly3, cid, typei, typeids)
op2.card_count['PVAL'] = nentries
return n
def _read_pvisc(self, data: bytes, n: int) -> int:
"""PVISC(1802,18,31) - the marker for Record 39"""
op2 = self.op2
struct_i2f = Struct(op2._endian + b'i2f')
nproperties = (len(data) - n) // 12
for unused_i in range(nproperties):
edata = data[n:n+12]
out = struct_i2f.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PVISC=%s\n' % str(out))
#(pid, ce, cr) = out
prop = PVISC.add_op2_data(out)
self._add_op2_property(prop)
n += 12
op2.card_count['PVISC'] = nproperties
return n
# PWELD
# PWSEAM
def _read_view(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping VIEW in EPT')
return len(data)
def _read_view3d(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping VIEW3D in EPT')
return len(data)
def break_by_minus1(idata):
"""helper for ``read_nsm_nx``"""
i1 = 0
i = 0
i2 = None
packs = []
for idatai in idata:
#print('data[i:] = ', data[i:])
if idatai == -1:
i2 = i
packs.append((i1, i2))
i1 = i2 + 1
i += 1
continue
i += 1
#print(packs)
return packs
| 41.615995 | 952 | 0.503676 |
from __future__ import annotations
from struct import unpack, Struct
from functools import partial
from typing import Tuple, List, TYPE_CHECKING
import numpy as np
from pyNastran.bdf.cards.properties.mass import PMASS, NSM, NSML
from pyNastran.bdf.cards.properties.bars import PBAR, PBARL, PBEND, PBEAM3
from pyNastran.bdf.cards.properties.beam import PBEAM, PBEAML, PBCOMP
from pyNastran.bdf.cards.properties.bush import PBUSH, PBUSHT
from pyNastran.bdf.cards.properties.damper import PDAMP, PVISC
from pyNastran.bdf.cards.properties.properties import PFAST, PGAP
from pyNastran.bdf.cards.properties.rods import PROD, PTUBE
from pyNastran.bdf.cards.properties.shell import PSHEAR, PSHELL, PCOMP
from pyNastran.bdf.cards.properties.solid import PSOLID
from pyNastran.bdf.cards.properties.springs import PELAS, PELAST
from pyNastran.bdf.cards.thermal.thermal import PCONV, PHBDY, PCONVM
from pyNastran.op2.op2_interface.op2_reader import (
mapfmt, reshape_bytes_block_size)
from .utils import get_minus1_start_end
from .geom2 import DoubleCardError
if TYPE_CHECKING:
from pyNastran.op2.op2_geom import OP2Geom
class EPT:
@property
def size(self) -> int:
return self.op2.size
@property
def factor(self) -> int:
return self.op2.factor
def _read_fake(self, data: bytes, n: int) -> int:
return self.op2._read_fake(data, n)
def read_ept_4(self, data: bytes, ndata: int):
return self.op2._read_geom_4(self.ept_map, data, ndata)
def __init__(self, op2: OP2Geom):
self.op2 = op2
self.ept_map = {
(3201, 32, 55): ['NSM', self._read_nsm],
(52, 20, 181): ['PBAR', self._read_pbar],
(9102, 91, 52): ['PBARL', self._read_pbarl],
(2706, 27, 287): ['PCOMP', self._read_pcomp],
(302, 3, 46): ['PELAS', self._read_pelas],
(2102, 21, 121): ['PGAP', self._read_pgap],
(902, 9, 29): ['PROD', self._read_prod],
(1002, 10, 42): ['PSHEAR', self._read_pshear],
(2402, 24, 281): ['PSOLID', self._read_psolid],
(2302, 23, 283): ['PSHELL', self._read_pshell],
(1602, 16, 30): ['PTUBE', self._read_ptube],
(5402, 54, 262): ['PBEAM', self._read_pbeam],
(9202, 92, 53): ['PBEAML', self._read_pbeaml],
(2502, 25, 248): ['PBEND', self._read_pbend],
(1402, 14, 37): ['PBUSH', self._read_pbush],
(3101, 31, 219): ['PBUSH1D', self._read_pbush1d],
(152, 19, 147): ['PCONEAX', self._read_pconeax],
(11001, 110, 411): ['PCONV', self._read_pconv],
(202, 2, 45): ['PDAMP', self._read_pdamp],
(2802, 28, 236): ['PHBDY', self._read_phbdy],
(402, 4, 44): ['PMASS', self._read_pmass],
(1802, 18, 31): ['PVISC', self._read_pvisc],
(10201, 102, 400): ['PVAL', self._read_pval],
(2606, 26, 289): ['VIEW', self._read_view],
(3201, 32, 991) : ['NSM', self._read_nsm_2],
(3301, 33, 992) : ['NSM1', self._read_nsm1],
(3701, 37, 995) : ['NSML1', self._read_nsml1_nx],
(3601, 36, 62): ['NSML1', self._read_nsml1_msc],
(15006, 150, 604): ['PCOMPG', self._read_pcompg],
(702, 7, 38): ['PBUSHT', self._read_pbusht],
(3301, 33, 56): ['NSM1', self._read_fake],
(3401, 34, 57) : ['NSMADD', self._read_fake],
(3501, 35, 58): ['NSML', self._read_fake],
(3501, 35, 994) : ['NSML', self._read_nsml],
(1502, 15, 36): ['PAABSF', self._read_fake],
(8300, 83, 382): ['PACABS', self._read_fake],
(8500, 85, 384): ['PACBAR', self._read_fake],
(5403, 55, 349): ['PBCOMP', self._read_pbcomp],
(13301, 133, 509): ['PBMSECT', self._read_fake],
(2902, 29, 420): ['PCONVM', self._read_pconvm],
(1202, 12, 33): ['PDAMPT', self._read_pdampt],
(8702, 87, 412): ['PDAMP5', self._read_pdamp5],
(6802, 68, 164): ['PDUM8', self._read_fake],
(6902, 69, 165): ['PDUM9', self._read_fake],
(1302, 13, 34): ['PELAST', self._read_pelast],
(12001, 120, 480): ['PINTC', self._read_fake],
(12101, 121, 484): ['PINTS', self._read_fake],
(4606, 46, 375): ['PLPLANE', self._read_plplane],
(4706, 47, 376): ['PLSOLID', self._read_plsolid],
(10301, 103, 399): ['PSET', self._read_pset],
(3002, 30, 415): ['VIEW3D', self._read_fake],
(13501, 135, 510) : ['PFAST', self._read_pfast_msc],
(3601, 36, 55) : ['PFAST', self._read_pfast_nx],
(3801, 38, 979) : ['PPLANE', self._read_pplane],
(11801, 118, 560) : ['PWELD', self._read_fake],
(3401, 34, 993) : ['NSMADD', self._read_nsmadd],
(9300, 93, 684) : ['ELAR', self._read_fake],
(9400, 94, 685) : ['ELAR2', self._read_fake],
(16006, 160, 903) : ['PCOMPS', self._read_fake],
(14602, 146, 692): ['PSLDN1', self._read_fake],
(16502, 165, 916): ['PAXSYMH', self._read_fake],
(13201, 132, 513): ['PBRSECT', self._read_fake],
(13701, 137, 638): ['PWSEAM', self._read_fake],
(7001, 70, 632): ['???', self._read_fake],
(15106, 151, 953): ['PCOMPG1', self._read_fake],
(3901, 39, 969): ['PSHL3D', self._read_fake],
(17006, 170, 901): ['MATCID', self._read_fake],
(9601, 96, 691): ['PJOINT', self._read_fake],
(16502, 165, 916): ['???', self._read_fake],
(9701, 97, 692): ['PJOINT2', self._read_fake],
(13401, 134, 611): ['PBEAM3', self._read_pbeam3],
(8901, 89, 905): ['PSOLCZ', self._read_fake],
(9801, 98, 698): ['DESC', self._read_desc],
}
def _add_op2_property(self, prop):
op2 = self.op2
ntables = op2.table_names.count(b'EPT') + op2.table_names.count(b'EPTS')
pid = prop.pid
allow_overwrites = (
ntables > 1 and
pid in op2.properties and
op2.properties[pid].type == prop.type)
op2._add_methods._add_property_object(prop, allow_overwrites=allow_overwrites)
def _add_op2_property_mass(self, prop):
op2 = self.op2
ntables = op2.table_names.count(b'EPT') + op2.table_names.count(b'EPTS')
pid = prop.pid
allow_overwrites = (
ntables > 1 and
pid in op2.properties_mass and
op2.properties_mass[pid].type == prop.type)
op2._add_methods._add_property_mass_object(prop, allow_overwrites=allow_overwrites)
def _add_pconv(self, prop: PCONV) -> None:
if prop.pconid > 100000000:
raise RuntimeError('bad parsing pconid > 100000000...%s' % str(prop))
self.op2._add_methods._add_convection_property_object(prop)
def _read_desc(self, data: bytes, n: int) -> int:
op2 = self.op2
assert self.size == 4, 'DESC size={self.size} is not supported'
struct_2i = Struct(op2._endian + b'2i')
while n < len(data):
datai = data[n:n+8]
desc_id, nwords = struct_2i.unpack(datai)
ndatai = 8 + nwords * 4
word_bytes = data[n+8:n+ndatai]
word = word_bytes.decode('ascii').rstrip()
assert len(word_bytes) == nwords * 4
op2.log.warning(f'geom skipping DESC={desc_id}: {word!r}')
n += ndatai
assert n == len(data), n
return n
def _read_nsml(self, data: bytes, n: int) -> int:
op2 = self.op2
n0 = n
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
size = self.size
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
sid = ints[i0]
prop_bytes = data[n0+(i0+1)*size:n0+(i0+3)*size]
ids = ints[i0+4:i1:2].tolist()
values = floats[i0+5:i1:2].tolist()
assert len(ids) == len(values)
nsm_type = prop_bytes.decode('latin1').rstrip()
nsml = op2.add_nsml(sid, nsm_type, ids, values)
str(nsml)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSML'] = ncards
return n
def _read_nsmadd(self, data: bytes, n: int) -> int:
op2 = self.op2
ints = np.frombuffer(data[n:], op2.idtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
istart = [0] + list(iend + 1)
size = self.size
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
sid, *nsms = ints[i0:i1]
nsmadd = op2.add_nsmadd(sid, nsms)
str(nsmadd)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSMADD'] = ncards
return n
def _read_nsml1_nx(self, data: bytes, n: int) -> int:
op2 = self.op2
n0 = n
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
iminus2 = np.where(ints == -2)[0]
istart = [0] + list(iminus2[:-1] + 1)
iend = iminus2
assert len(data[n:]) > 12, data[n:]
ncards = 0
istart = [0] + list(iend + 1)
size = self.size
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -2, ints[i1]
sid = ints[i0]
nsm_type = data[n0+(i0+1)*size:n0+(i0+2)*size].decode('latin1').rstrip()
value = float(floats[i0+3])
iminus1 = i0 + np.where(ints[i0:i1] == -1)[0]
istart2 = [i0 + 4] + list(iminus1[:-1] + 1)
iend2 = iminus1
for istarti, iendi in zip(istart2, iend2):
spec_opt = ints[istarti]
if spec_opt == 1:
ivalues = list(range(istarti, iendi))
pid_eids = ints[ivalues].tolist()
elif spec_opt == 3:
ids = ints[istarti:iendi]
istart = ids[1]
iend = ids[-1]
pid_eids = list(range(istart, iend+1))
else:
raise NotImplementedError(spec_opt)
if nsm_type == 'ELEM':
nsm_type = 'ELEMENT'
assert len(pid_eids) > 0, pid_eids
nsml1 = op2.add_nsml1(sid, nsm_type, value, pid_eids)
str(nsml1)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSML'] = ncards
return n
def _read_nsml1_msc(self, data: bytes, n: int) -> int:
op2 = self.op2
op2.log.info(f'geom skipping NSML1 in {op2.table_name}; ndata={len(data)-12}')
return len(data)
def _read_nsm1(self, data: bytes, n: int) -> int:
op2 = self.op2
n0 = n
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
size = self.size
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
sid = ints[i0]
nsm_type = data[n0+(i0+1)*size:n0+(i0+3)*size].decode('latin1').rstrip()
zero_two = ints[i0+3]
value = float(floats[i0+4])
spec_opt = ints[i0+5]
assert zero_two in [0, 2], zero_two
iminus1 = i0 + np.where(ints[i0:i1] == -1)[0]
istart2 = [i0 + 5] + list(iminus1[:-1] + 1)
iend2 = iminus1
if spec_opt == 1:
ids = ints[i0+6:i1]
elif spec_opt == 2:
word = data[n0+(i0+6)*size:n0+i1*size]
ids = word
elif spec_opt == 3:
ids = ints[i0+6:i1]
istart = ids[0]
iend = ids[-1]
ids = list(range(istart, iend+1))
else:
raise NotImplementedError(spec_opt)
nsm1 = op2.add_nsm1(sid, nsm_type, value, ids)
str(nsm1)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSM1'] = ncards
return n
def _read_nsm(self, data: bytes, n: int) -> int:
op2 = self.op2
n = op2.reader_geom2._read_dual_card(
data, n,
self._read_nsm_nx, self._read_nsm_msc,
'NSM', op2._add_methods._add_nsm_object)
return n
def _read_nsm_2(self, data: bytes, n: int) -> int:
op2 = self.op2
n0 = n
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
ncards = 0
size = self.size
for (i0, i1) in zip(istart, iend):
assert ints[i1] == -1, ints[i1]
sid = ints[i0]
prop_type = data[n0+(i0+1)*size:n0+(i0+3)*size]
elem_type = data[n0+(i0+3)*size:n0+(i0+4)*size]
nsm_type = prop_type.decode('latin1').rstrip()
dunno_int = ints[i0+3]
ids = ints[i0+4:i1:2].tolist()
values = floats[i0+5:i1:2].tolist()
assert len(ids) == len(values)
assert dunno_int in [0, 2], (sid, prop_type, (ints[i0+3], floats[i0+4]), ids, values)
nsm = op2.add_nsm(sid, nsm_type, ids, values)
str(nsm)
n += (i1 - i0 + 1) * size
ncards += 1
op2.card_count['NSM'] = ncards
return n
def _read_nsm_msc(self, data: bytes, n: int) -> int:
op2 = self.op2
properties = []
struct1 = Struct(op2._endian + b'i 4s if')
ndelta = 16
i = 0
ints = np.frombuffer(data[n:], op2.idtype).copy()
floats = np.frombuffer(data[n:], op2.fdtype).copy()
while n < len(data):
edata = data[n:n+ndelta]
out = struct1.unpack(edata)
(sid, prop_set, pid, value) = out
assert pid < 100000000
i += 4
n += ndelta
prop_set = prop_set.decode('utf8').rstrip(' ')
values = [value]
while ints[i] != -1:
value2 = floats[i]
values.append(value2)
n += 4
i += 1
op2.log.info("MSC: NSM-sid=%s prop_set=%s pid=%s values=%s" % (
sid, prop_set, pid, values))
prop = NSM.add_op2_data([sid, prop_set, pid, value])
properties.append(prop)
i += 1
n += 4
return n, properties
def _read_nsm_nx(self, data: bytes, n: int) -> int:
op2 = self.op2
properties = []
struct1 = Struct(op2._endian + b'i 8s ii f')
ndelta = 24
i = 0
ints = np.frombuffer(data[n:], op2.idtype).copy()
floats = np.frombuffer(data[n:], op2.fdtype).copy()
unused_packs = break_by_minus1(ints)
while n < len(data):
edata = data[n:n+ndelta]
out = struct1.unpack(edata)
(sid, prop_set, origin, pid, value) = out
assert pid < 100000000
i += 6
n += ndelta
prop_set = prop_set.decode('utf8').rstrip(' ')
pids = [pid]
values = [value]
while ints[i] != -1:
pid = ints[i]
value2 = floats[i+1]
assert pid != -1
pids.append(pid)
values.append(value2)
n += 8
i += 2
for pid, value in zip(pids, values):
if origin == 0:
prop = NSM.add_op2_data([sid, prop_set, pid, value])
elif origin == 2:
prop = NSML.add_op2_data([sid, prop_set, pid, value])
properties.append(prop)
i += 1
n += 4
return n, properties
def _read_pbar(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 76 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'2i17f', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
prop = PBAR.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PBAR'] = nentries
return n
def _read_pbarl(self, data: bytes, n: int) -> int:
op2 = self.op2
valid_types = {
'ROD': 1,
'TUBE': 2,
'TUBE2': 2,
'I': 6,
'CHAN': 4,
'T': 4,
'BOX': 4,
'BAR': 2,
'CROSS': 4,
'H': 4,
'T1': 4,
'I1': 4,
'CHAN1': 4,
'Z': 4,
'CHAN2': 4,
"T2": 4,
'BOX1': 6,
'HEXA': 3,
'HAT': 4,
'HAT1': 5,
'DBOX': 10,
}
size = self.size
ntotal = 28 * self.factor e == 4:
struct1 = Struct(op2._endian + b'2i 8s 8s f')
else:
struct1 = Struct(op2._endian + b'2q 16s 16s d')
ndata = len(data)
while ndata - n > ntotal:
edata = data[n:n+ntotal]
n += ntotal
out = struct1.unpack(edata)
(pid, mid, group, beam_type, value) = out
if pid > 100000000 or pid < 1:
op2.log.debug(" pid=%s mid=%s group=%r beam_type=%r value=%s" % (
pid, mid, group, beam_type, value))
raise RuntimeError('bad parsing...')
beam_type = reshape_bytes_block_size(beam_type, size=size)
group = reshape_bytes_block_size(group, size=size)
data_in = [pid, mid, group, beam_type, value]
expected_length = valid_types[beam_type]
iformat = op2._endian + b'%if' % expected_length
ndelta = expected_length * 4
dims_nsm = list(unpack(iformat, data[n:n+ndelta]))
data_in += dims_nsm
n += ndelta
prop = PBARL.add_op2_data(data_in)
pid = prop.pid
if pid in op2.properties:
op2._type_to_id_map['PBAR'].remove(pid)
del op2.properties[pid]
self._add_op2_property(prop)
n += 4 * self.factor
if len(op2._type_to_id_map['PBAR']) == 0 and 'PBAR' in op2.card_count:
del op2._type_to_id_map['PBAR']
del op2.card_count['PBAR']
op2.increase_card_count('PBARL')
if self.size == 8:
n += 16
es, n: int) -> int:
op2 = self.op2
struct1 = Struct(mapfmt(op2._endian + b'2i 12f i', self.size))
struct2 = Struct(mapfmt(op2._endian + b'3f 2i', self.size))
nproperties = 0
ntotal1 = 60 * self.factor # 4*15
ntotal2 = 20 * self.factor
ndata = len(data)
#print(ntotal1, ntotal2)
if self.factor == 2:
op2.show_data(data[12*self.factor:], types='qd')
#print(len(data[12*self.factor:]))
while n < ndata:
#op2.log.debug(f"n={n} ndata={ndata}")
edata = data[n:n+ntotal1]
#if len(edata) == ntotal1:
data1 = struct1.unpack(edata)
#else:
#op2.show_data(edata, types='qdi')
#n += ntotal2
#continue
nsections = data1[-1]
if op2.is_debug_file:
(pid, mid, a, i1, i2, i12, j, nsm, k1, k2,
m1, m2, n1, n2, unused_nsections) = data1
op2.log.info(f'PBCOMP pid={pid} mid={mid} nsections={nsections} '
f'k1={k1} k2={k2} m=({m1},{m2}) n=({n1},{n2})\n')
#if pid > 0 and nsections == 0:
#print('n1')
#n += ntotal1
#continue
#if pid == 0 and nsections == 0:
#print('n2')
#n += ntotal2
#continue
data2 = []
n += ntotal1
if nsections in [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]:
# 16 Y RS Lumped area location along element's y-axis
# 18 C RS Fraction of the total area for the lumped area
# 19 MID I Material identification number
# 20 UNDEF None
# Words 16 through 20 repeat NSECT times
for unused_i in range(nsections):
datai = data[n:n+ntotal2]
xi, yi, ci, mid, unused_null = struct2.unpack(datai)
data2.append((xi, yi, ci, mid))
n += ntotal2
else:
op2.log.error(f'PBCOMP={data1[0]} has no sections; check your bdf')
return n
#raise NotImplementedError('PBCOMP nsections=%r' % nsections)
if op2.is_debug_file:
op2.binary_debug.write(' PBCOMP: %s\n' % str([data1, data2]))
msg = (
' i=%-2s so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '
'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (
nsections, None, -9999., a, i1, i2, i12, j, nsm,
None, None, None, None, None, None, None, None,)
)
op2.log.debug(msg)
#op2.log.debug(data1)
#op2.log.debug(data2)
data_in = [data1, data2]
prop = PBCOMP.add_op2_data(data_in)
pid = data1[0]
if pid in op2.properties:
op2._type_to_id_map['PBEAM'].remove(pid)
del op2.properties[pid]
self._add_op2_property(prop)
nproperties += 1
#print(f"n={n} ndata={ndata}")
assert nproperties > 0, 'PBCOMP nproperties=%s' % (nproperties)
if len(op2._type_to_id_map['PBEAM']) == 0 and 'PBEAM' in op2.card_count:
del op2._type_to_id_map['PBEAM']
del op2.card_count['PBEAM']
op2.card_count['PBCOMP'] = nproperties
return n
def _read_pbeam(self, data: bytes, n: int) -> int:
op2 = self.op2
cross_section_type_map = {
0 : 'variable',
1 : 'constant',
2 : '???',
}
struct1 = Struct(mapfmt(op2._endian + b'4if', self.size))
struct2 = Struct(mapfmt(op2._endian + b'16f', self.size))
struct3 = Struct(mapfmt(op2._endian + b'16f', self.size))
unused_ntotal = 768 # 4*(5+16*12)
#nproperties = (len(data) - n) // ntotal
#assert nproperties > 0, 'ndata-n=%s n=%s datai\n%s' % (len(data)-n, n, op2.show_data(data[n:100+n]))
ndata = len(data)
#op2.show_data(data[12:], 'if')
#assert ndata % ntotal == 0, 'ndata-n=%s n=%s ndata%%ntotal=%s' % (len(data)-n, n, ndata % ntotal)
nproperties = 0
ntotal1 = 20 * self.factor
ntotal2 = 64 * self.factor
while n < ndata:
#while 1: #for i in range(nproperties):
edata = data[n:n+ntotal1]
n += ntotal1
data_in = list(struct1.unpack(edata))
#if op2.is_debug_file:
#op2.log.info('PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s\n' % tuple(data_in))
(pid, unused_mid, unused_nsegments, ccf, unused_x) = data_in
#op2.log.info('PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s' % tuple(data_in))
# Constant cross-section flag: 1=yes and 0=no
# what is 2?
if ccf not in [0, 1, 2]:
msg = (' PBEAM pid=%s mid=%s nsegments=%s ccf=%s x=%s; '
'ccf must be in [0, 1, 2]\n' % tuple(data_in))
raise ValueError(msg)
cross_section_type = cross_section_type_map[ccf]
#print('cross_section_type = %s' % cross_section_type)
is_pbcomp = False
is_bad_so = False
so = []
xxb = []
for i in range(11):
edata = data[n:n+ntotal2]
if len(edata) != ntotal2:
endpack = []
raise RuntimeError(f'PBEAM unexpected length i={i:d}...')
n += ntotal2
pack = struct2.unpack(edata)
(soi, xxbi, a, i1, i2, i12, j, nsm, c1, c2,
d1, d2, e1, e2, f1, f2) = pack
xxb.append(xxbi)
so.append(soi)
if soi == 0.0:
so_str = 'NO'
elif soi == 1.0:
so_str = 'YES'
else:
so_str = str(soi)
is_bad_so = True
#msg = 'PBEAM pid=%s i=%s x/xb=%s soi=%s; soi not in 0.0 or 1.0' % (
#pid, i, xxb, soi)
#raise NotImplementedError(msg)
#if xxb != 0.0:
#msg = 'PBEAM pid=%s i=%s x/xb=%s soi=%s; xxb not in 0.0 or 1.0' % (
#pid, i, xxb, soi)
#raise NotImplementedError(msg)
pack2 = (so_str, xxbi, a, i1, i2, i12, j, nsm, c1, c2,
d1, d2, e1, e2, f1, f2)
data_in.append(pack2)
if op2.is_debug_file:
op2.binary_debug.write(f' {pack}\n')
msg = (
' i=%-2s' % i + ' so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '
'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (tuple(pack2))
)
op2.binary_debug.write(msg)
#msg = (
#' i=%-2s' % i + ' so=%s xxb=%.1f a=%g i1=%g i2=%g i12=%g j=%g nsm=%g '
#'c=[%s,%s] d=[%s,%s] e=[%s,%s] f=[%s,%s]' % (tuple(pack2))
#)
#print(msg)
edata = data[n:n+ntotal2]
if len(edata) != ntotal2:
endpack = []
raise RuntimeError('PBEAM unexpected length 2...')
endpack = struct3.unpack(edata)
n += ntotal2
assert len(endpack) == 16, endpack
#(k1, k2, s1, s2, nsia, nsib, cwa, cwb, # 8
#m1a, m2a, m1b, m2b, n1a, n2a, n1b, n2b) = endpack # 8 -> 16
if op2.is_debug_file:
op2.binary_debug.write(' k=[%s,%s] s=[%s,%s] nsi=[%s,%s] cw=[%s,%s] '
'ma=[%s,%s] mb=[%s,%s] na=[%s,%s] nb=[%s,%s]' % (
tuple(endpack)))
data_in.append(endpack)
if is_bad_so:
#if soi < 0.:
xxb_str = ', '.join(['%g' % xxbi for xxbi in xxb])
so_str = ', '.join(['%g' % soi for soi in so])
msg = (f'PBEAM pid={pid} i={i} soi=[{so_str}]; '
'soi not 0.0 or 1.0; assuming PBCOMP & dropping')
op2.log.error(msg)
is_pbcomp = True
if min(xxb) < 0.0 or max(xxb) > 1.0:
xxb_str = ', '.join(['%g' % xxbi for xxbi in xxb])
msg = (f'PBEAM pid={pid} i={i} x/xb=[{xxb_str}]; '
'x/xb must be between 0.0 and 1.0; assuming PBCOMP & dropping')
op2.log.error(msg)
is_pbcomp = True
if is_pbcomp:
continue
if pid in op2.properties:
if op2.properties[pid].type == 'PBCOMP':
continue
prop = PBEAM.add_op2_data(data_in)
nproperties += 1
self._add_op2_property(prop)
if nproperties:
op2.card_count['PBEAM'] = nproperties
return n
def _read_pbeaml(self, data: bytes, n: int) -> int:
op2 = self.op2
#strs = numpy.core.defchararray.reshapesplit(data, sep=",")
#ints = np.frombuffer(data[n:], self._uendian + 'i').copy()
#floats = np.frombuffer(data[n:], self._uendian + 'f').copy()
ints = np.frombuffer(data[n:], op2.idtype8).copy()
floats = np.frombuffer(data[n:], op2.fdtype8).copy()
istart, iend = get_minus1_start_end(ints)
size = self.size
nproperties = len(istart)
if size == 4:
struct1 = Struct(op2._endian + b'2i 8s 8s')
else:
struct1 = Struct(op2._endian + b'2q 16s 16s')
for unused_i, (istarti, iendi) in enumerate(zip(istart, iend)):
idata = data[n+istarti*size : n+(istarti+6)*size]
pid, mid, group, beam_type = struct1.unpack(idata)
group = group.decode('latin1').strip()
beam_type = beam_type.decode('latin1').strip()
fvalues = floats[istarti+6: iendi]
if op2.is_debug_file:
op2.binary_debug.write(' %s\n' % str(fvalues))
op2.log.debug(f'pid={pid:d} mid={mid:d} group={group} beam_type={beam_type}')
op2.log.debug(fvalues)
#op2.log.debug(f'pid={pid:d} mid={mid:d} group={group} beam_type={beam_type}')
data_in = [pid, mid, group, beam_type, fvalues]
prop = PBEAML.add_op2_data(data_in)
if pid in op2.properties:
# this is a fake PSHELL
propi = op2.properties[pid]
assert propi.type in ['PBEAM'], propi.get_stats()
nproperties -= 1
continue
self._add_op2_property(prop)
if nproperties:
op2.card_count['PBEAML'] = nproperties
return len(data)
def _read_pbend(self, data: bytes, n: int) -> int:
op2 = self.op2
n = op2.reader_geom2._read_dual_card(
data, n,
self._read_pbend_nx, self._read_pbend_msc,
'PBEND', op2._add_methods._add_property_object)
return n
def _read_pbend_msc(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 104 # 26*4
struct1 = Struct(op2._endian + b'2i 4f i 18f f') # delta_n is a float, not an integer
nproperties = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
assert nproperties > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
properties = []
for unused_i in range(nproperties):
edata = data[n:n+104]
out = struct1.unpack(edata)
(pid, mid, area, i1, i2, j, fsi, rm, t, p, rb, theta_b,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, nsm, rc, zc,
delta_n) = out
beam_type = fsi
if (area, rm, t, p) == (0., 0., 0., 0.):
area = None
rm = None
t = None
p = None
delta_n = None
beam_type = 2
if delta_n == 0:
#: Radial offset of the neutral axis from the geometric
#: centroid, positive is toward the center of curvature
delta_n = None
pbend = PBEND(pid, mid, beam_type, area, i1, i2, j,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2,
nsm, rc, zc, delta_n, fsi, rm, t, p, rb, theta_b)
#print(pbend)
pbend.validate()
properties.append(pbend)
n += ntotal
return n, properties
def _read_pbend_nx(self, data: bytes, n: int) -> int:
op2 = self.op2
#op2.log.info('geom skipping PBEND in EPT')
#return len(data)
ntotal = 132 # 33*4
struct1 = Struct(op2._endian + b'2i 4f i 21f i 4f')
nproperties = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
assert nproperties > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
properties = []
for unused_i in range(nproperties):
edata = data[n:n+132]
out = struct1.unpack(edata)
(pid, mid, area, i1, i2, j, fsi, rm, t, p, rb, theta_b,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2, nsm, rc, zc,
delta_n, unused_sacl, unused_alpha, unused_flange,
unused_kx, unused_ky, unused_kz, unused_junk,) = out
beam_type = fsi
pbend = PBEND(pid, mid, beam_type, area, i1, i2, j,
c1, c2, d1, d2, e1, e2, f1, f2, k1, k2,
nsm, rc, zc, delta_n, fsi, rm, t, p, rb, theta_b)
pbend.validate()
properties.append(pbend)
n += ntotal
return n, properties
# PBMSECT
# PBRSECT
def _read_pbush(self, data: bytes, n: int) -> int:
op2 = self.op2
card_name = 'PBUSH'
card_obj = PBUSH
methods = {
72 : self._read_pbush_nx_72, # 72=4*18
92 : self._read_pbush_msc_92, # 92=4*23
96 : self._read_pbush_msc_96, # 96=4*24
108 : self._read_pbush_msc_108, # 108=4*27
}
try:
n = op2.reader_geom2._read_double_card(
card_name, card_obj, self._add_op2_property,
methods, data, n)
except DoubleCardError:
nx_method = partial(self._read_pbush_nx_72, card_obj)
msc_method = partial(self._read_pbush_msc_92, card_obj)
n = op2.reader_geom2._read_dual_card(
data, n,
nx_method, msc_method,
card_name, self._add_op2_property)
# we're listing nx twice because NX/MSC used to be consistent
return n
def _read_pbush_nx_72(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
op2 = self.op2
ntotal = 72 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'i17f', self.size))
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1, sa, st, ea, et) = out
assert pid > 0, pid
g2 = g3 = g4 = g5 = g6 = g1
data_in = (pid, k1, k2, k3, k4, k5, k6, b1, b2, b3, b4, b5, b6,
g1, g2, g3, g4, g5, g6, sa, st, ea, et)
prop = PBUSH.add_op2_data(data_in)
props.append(prop)
n += ntotal
return n, props
def _read_pbush_msc_92(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
op2 = self.op2
ntotal = 92 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'i22f', self.size))
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
pid = out[0]
assert pid > 0, pid
prop = PBUSH.add_op2_data(out)
props.append(prop)
n += ntotal
return n, props
def _read_pbush_msc_96(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
op2 = self.op2
ntotal = 96 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'i22f f', self.size))
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
pid = out[0]
assert pid > 0, pid
prop = PBUSH.add_op2_data(out)
props.append(prop)
n += ntotal
return n, props
def _read_pbush_msc_108(self, card_obj: PBUSH, data: bytes, n: int) -> Tuple[int, List[PBUSH]]:
op2 = self.op2
ntotal = 108 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'i22f 4f', self.size))
ndata = len(data) - n
nentries = ndata // ntotal
assert nentries > 0, 'table={op2.table_name} len={ndata}'
assert ndata % ntotal == 0, f'table={op2.table_name} leftover = {ndata} % {ntotal} = {ndata % ntotal}'
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
pid = out[0]
assert pid > 0, pid
prop = PBUSH.add_op2_data(out)
str(prop)
props.append(prop)
n += ntotal
return n, props
def _read_pbush1d(self, data: bytes, n: int) -> int:
op2 = self.op2
type_map = {
0 : None,
1 : 'TABLE',
2 : 'EQUAT',
}
ntotal = 152 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'i 6f i 4f 24i 2f', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid, k, c, m, unused_alpha, sa, se,
typea, cvt, cvc, expvt, expvc, idtsu, idtcu, idtsud, idcsud,
types, idts, idcs, idtdus, idcdus,
typed, idtd, idcd, idtdvd, idcdvd,
typeg, idtg, idcg, idtdug, idcdug, idtdvg, idcdvg,
typef, idtf, idcf,
unused_ut, unused_uc) = out
msg = f'PBUSH1D pid={pid} k={k} c={c} m={m} sa={sa} se={se}'
optional_vars = {}
typea_str = type_map[typea]
types_str = type_map[types]
typed_str = type_map[typed]
unused_typeg_str = type_map[typeg]
unused_typef_str = type_map[typef]
if min([typea, types, typed, typeg, typef]) < 0:
raise RuntimeError(f'typea={typea} types={types} typed={typed} typeg={typeg} typef={typef}')
if typea in [1, 2]:
idts = idtsu
idets = idtsu
optional_vars['SHOCKA'] = [typea_str, cvt, cvc, expvt, expvc,
idts, idets, idtcu, idtsud, idcsud]
msg += (
f' SHOCKA type={typea} cvt={cvt} cvc={cvc} expvt={expvt} expvc={expvc}\n'
f' idtsu={idtsu} (idts={idts} idets={idets}) idtcu={idtcu} idtsud={idtsud} idcsud={idcsud}')
if types in [1, 2]:
optional_vars['SPRING'] = [types_str, idts, idcs, idtdus, idcdus]
msg += f' SPRING type={types} idt={idts} idc={idcs} idtdu={idtdus} idcdu={idcdus}'
if typed in [1, 2]:
optional_vars['DAMPER'] = [typed_str, idtd, idcd, idtdvd, idcdvd]
msg += f' DAMPER type={typed} idt={idtd} idc={idtd} idtdv={idtdvd} idcdv={idcdvd}'
if typeg in [1, 2]:
msg += f' GENER type={typeg} idt={idtg} idc={idcg} idtdu={idtdug} idcdu={idcdug} idtdv={idtdvg} idcdv={idcdvg}'
optional_vars['GENER'] = [idtg, idcg, idtdug, idcdug, idtdvg, idcdvg]
if typef in [1, 2]:
raise NotImplementedError(f'typef={typef} idtf={idtf} idcf={idcf}')
if op2.is_debug_file:
op2.binary_debug.write(msg)
pbush1d = op2.add_pbush1d(pid, k=k, c=c, m=m, sa=sa, se=se,
optional_vars=optional_vars,)
str(pbush1d)
n += ntotal
op2.card_count['PBUSH1D'] = nentries
return n
def _read_pbusht(self, data: bytes, n: int) -> int:
op2 = self.op2
card_name = 'PBUSHT'
card_obj = PBUSHT
methods = {
80 : self._read_pbusht_80,
100 : self._read_pbusht_100,
136 : self._read_pbusht_136,
}
try:
n = op2.reader_geom2._read_double_card(
card_name, card_obj, op2._add_methods._add_pbusht_object,
methods, data, n)
except DoubleCardError:
raise
op2.log.warning(f'try-except {card_name}')
return n
def _read_pbusht_nx_old(self, data: bytes, n: int) -> int:
op2 = self.op2
ndata = (len(data) - n) // self.factor
if ndata % 100 == 0 and ndata % 80 == 0:
op2.log.warning(f"skipping PBUSHT in EPT because nfields={ndata//4}, which is "
'nproperties*25 or nproperties*20')
return len(data), []
if ndata % 100 == 0:
n, props = self._read_pbusht_100(data, n)
elif ndata % 80 == 0:
n, props = self._read_pbusht_80(data, n)
else:
raise NotImplementedError('You have blank lines in your PBUSHT')
return n, props
def _read_pbusht_80(self, card_obj, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 80 * self.factor
struct1 = Struct(op2._endian + b'20i')
nentries = (len(data) - n) // ntotal
assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
props = []
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1,
n1, n2, n3, n4, n5, n6) = out
g2 = g3 = g4 = g5 = g6 = g1
k_tables = [k1, k2, k3, k4, k5, k6]
b_tables = [b1, b2, b3, b4, b5, b6]
ge_tables = [g1, g2, g3, g4, g5, g6]
kn_tables = [n1, n2, n3, n4, n5, n6]
prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)
props.append(prop)
n += ntotal
return n, props
def _read_pbusht_100(self, card_obj, data: bytes, n: int) -> int:
op2 = self.op2
props = []
ntotal = 100 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'25i', self.size))
nentries = (len(data) - n) // ntotal
assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1, g2, g3, g4, g5, g6,
n1, n2, n3, n4, n5, n6) = out
k_tables = [k1, k2, k3, k4, k5, k6]
b_tables = [b1, b2, b3, b4, b5, b6]
ge_tables = [g1, g2, g3, g4, g5, g6]
kn_tables = [n1, n2, n3, n4, n5, n6]
prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)
props.append(prop)
n += ntotal
return n, props
def _read_pbusht_136(self, card_obj, data: bytes, n: int) -> int:
op2 = self.op2
props = []
ntotal = 136 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'i 6i 6i 6i 6i 4s 2i i 5i', self.size))
nentries = (len(data) - n) // ntotal
assert nentries > 0, 'table=%r len=%s' % (op2.table_name, len(data) - n)
for unused_i in range(nentries):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
(pid,
k1, k2, k3, k4, k5, k6,
b1, b2, b3, b4, b5, b6,
g1, g2, g3, g4, g5, g6,
n1, n2, n3, n4, n5, n6,
word1, a, word2, c, *other) = out
k_tables = [ki if ki != 538976288 else 0
for ki in [k1, k2, k3, k4, k5, k6]]
b_tables = [bi if bi != 538976288 else 0
for bi in [b1, b2, b3, b4, b5, b6]]
ge_tables = [gei if gei != 538976288 else 0
for gei in [g1, g2, g3, g4, g5, g6]]
kn_tables = [kni if kni != 538976288 else 0
for kni in [n1, n2, n3, n4, n5, n6]]
op2.log.warning(
f'PBUSHT: pid={pid} '
f'k={k_tables} '
f'b={b_tables} '
f'ge={ge_tables} '
f'n={kn_tables} ' +
'words=' + str([word1, a, word2, c]) +
f' other={other}')
assert sum(other) == 0, other
prop = PBUSHT(pid, k_tables, b_tables, ge_tables, kn_tables)
props.append(prop)
n += ntotal
return n, props
def _read_pcomp(self, data: bytes, n: int) -> int:
op2 = self.op2
if self.size == 4:
n2, props = self._read_pcomp_32_bit(data, n)
nproperties = len(props)
for prop in props:
self._add_op2_property(prop)
op2.card_count['PCOMP'] = nproperties
else:
n2 = op2.reader_geom2._read_dual_card(
data, n, self._read_pcomp_32_bit,
self._read_pcomp_64_bit,
'PCOMP', self._add_op2_property)
return n2
def _read_pcomp_64_bit(self, data: bytes, n: int) -> Tuple[int, List[PCOMP]]:
op2 = self.op2
op2.to_nx(' because PCOMP-64 was found')
nproperties = 0
s1 = Struct(mapfmt(op2._endian + b'2i3fi2f', self.size))
ntotal1 = 32 * self.factor
s2 = Struct(mapfmt(op2._endian + b'i2fi', self.size))
four_minus1 = Struct(mapfmt(op2._endian + b'4i', self.size))
ndata = len(data)
ntotal2 = 16 * self.factor
props = []
while n < (ndata - ntotal1):
out = s1.unpack(data[n:n+ntotal1])
(pid, nlayers, z0, nsm, sb, ft, tref, ge) = out
assert pid > 0
if op2.binary_debug:
op2.binary_debug.write(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '
f'sb={sb} ft={ft} Tref={tref} ge={ge}')
assert isinstance(nlayers, int), out
n += ntotal1
is_symmetrical = 'NO'
mids = []
T = []
thetas = []
souts = []
edata2 = data[n:n+ntotal2]
idata = four_minus1.unpack(edata2)
while idata != (-1, -1, -1, -1):
(mid, t, theta, sout) = s2.unpack(edata2)
mids.append(mid)
T.append(t)
thetas.append(theta)
souts.append(sout)
if op2.is_debug_file:
op2.binary_debug.write(f' mid={mid} t={t} theta={theta} sout={sout}\n')
n += ntotal2
edata2 = data[n:n+ntotal2]
if n == ndata:
op2.log.warning(' no (-1, -1, -1, -1) flag was found to close the PCOMPs')
break
idata = four_minus1.unpack(edata2)
if self.size == 4:
assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nms=%s sb=%s ft=%s Tref=%s ge=%s' % (
pid, nlayers, z0, nsm, sb, ft, tref, ge)
else:
assert nlayers == 0, nlayers
nlayers = len(mids)
data_in = [
pid, z0, nsm, sb, ft, tref, ge,
is_symmetrical, mids, T, thetas, souts]
prop = PCOMP.add_op2_data(data_in)
nproperties += 1
n += ntotal2
props.append(prop)
return n, props
def _read_pcomp_32_bit(self, data: bytes, n: int) -> Tuple[int, List[PCOMP]]:
op2 = self.op2
nproperties = 0
s1 = Struct(mapfmt(op2._endian + b'2i3fi2f', self.size))
ntotal1 = 32 * self.factor
s2 = Struct(mapfmt(op2._endian + b'i2fi', self.size))
ndata = len(data)
ntotal2 = 16 * self.factor
props = []
while n < (ndata - ntotal1):
out = s1.unpack(data[n:n+ntotal1])
(pid, nlayers, z0, nsm, sb, ft, tref, ge) = out
assert pid > 0
if op2.binary_debug:
op2.binary_debug.write(f'PCOMP pid={pid} nlayers={nlayers} z0={z0} nsm={nsm} '
f'sb={sb} ft={ft} Tref={tref} ge={ge}')
assert isinstance(nlayers, int), out
n += ntotal1
mids = []
T = []
thetas = []
souts = []
is_symmetrical = 'NO'
if nlayers < 0:
is_symmetrical = 'SYM'
nlayers = abs(nlayers)
assert nlayers > 0, out
assert 0 < nlayers < 400, 'pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s' % (
pid, nlayers, z0, nsm, sb, ft, tref, ge)
if op2.is_debug_file:
op2.binary_debug.write(' pid=%s nlayers=%s z0=%s nsm=%s sb=%s ft=%s Tref=%s ge=%s\n' % (
pid, nlayers, z0, nsm, sb, ft, tref, ge))
for unused_ilayer in range(nlayers):
(mid, t, theta, sout) = s2.unpack(data[n:n+ntotal2])
if op2._nastran_format == 'optistruct':
if sout in [2, 3]:
sout = 1
mids.append(mid)
assert mid > 0
T.append(t)
thetas.append(theta)
souts.append(sout)
if op2.is_debug_file:
op2.binary_debug.write(f' mid={mid} t={t} theta={theta} sout={sout}\n')
n += ntotal2
data_in = [
pid, z0, nsm, sb, ft, tref, ge,
is_symmetrical, mids, T, thetas, souts]
prop = PCOMP.add_op2_data(data_in)
props.append(prop)
nproperties += 1
return n, props
def _read_pcompg(self, data: bytes, n: int) -> int:
op2 = self.op2
nproperties = 0
s1 = Struct(mapfmt(op2._endian + b'2i 3f i 2f', self.size))
s2 = Struct(mapfmt(op2._endian + b'2i 2f i', self.size))
struct_i5 = Struct(mapfmt(op2._endian + b'5i', self.size))
lam_map = {
0 : None,
}
ft_map = {
0 : None,
3 : 'TSAI',
}
sout_map = {
0 : 'NO',
1 : 'YES',
}
ndata = len(data)
ntotal1 = 32 * self.factor
ntotal2 = 20 * self.factor
while n < (ndata - ntotal1):
out = s1.unpack(data[n:n+ntotal1])
(pid, lam_int, z0, nsm, sb, ft_int, tref, ge) = out
if op2.binary_debug:
op2.binary_debug.write(f'PCOMPG pid={pid} lam_int={lam_int} z0={z0} nsm={nsm} '
f'sb={sb} ft_int={ft_int} tref={tref} ge={ge}')
assert isinstance(lam_int, int), out
assert pid > -1, out
n += ntotal1
mids = []
thicknesses = []
thetas = []
souts = []
global_ply_ids = []
ilayer = 0
while ilayer < 1000:
ints5 = struct_i5.unpack(data[n:n+ntotal2])
if ints5 == (-1, -1, -1, -1, -1):
if op2.is_debug_file:
op2.binary_debug.write(' global_ply=%-1 mid=%-1 t=%-1 theta=%-1 sout=-1\n')
break
(global_ply, mid, t, theta, sout_int) = s2.unpack(data[n:n+ntotal2])
try:
sout = sout_map[sout_int]
except KeyError:
op2.log.error('cant parse global_ply=%s sout=%s; assuming 0=NO' % (
global_ply, sout_int))
sout = 'NO'
global_ply_ids.append(global_ply)
mids.append(mid)
thicknesses.append(t)
thetas.append(theta)
souts.append(sout)
if op2.is_debug_file:
op2.binary_debug.write(' global_ply=%s mid=%s t=%s theta=%s sout_int=%s sout=%r\n' % (
global_ply, mid, t, theta, sout_int, sout))
n += ntotal2
ilayer += 1
n += ntotal2
try:
ft = ft_map[ft_int]
except KeyError:
op2.log.error('pid=%s cant parse ft=%s; should be HILL, HOFF, TSAI, STRN'
'...skipping' % (pid, ft_int))
continue
try:
lam = lam_map[lam_int]
except KeyError:
op2.log.error('pid=%s cant parse lam=%s; should be HILL, HOFF, TSAI, STRN'
'...skipping' % (pid, lam_int))
continue
if pid in op2.properties and op2.properties[pid].type == 'PCOMP':
del op2.properties[pid]
op2.add_pcompg(pid, global_ply_ids, mids, thicknesses, thetas=thetas, souts=souts,
nsm=nsm, sb=sb, ft=ft, tref=tref, ge=ge, lam=lam, z0=z0, comment='')
nproperties += 1
op2.card_count['PCOMPG'] = nproperties
return n
def _read_pconeax(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping PCONEAX in EPT')
return len(data)
def _read_pconv(self, data: bytes, n: int) -> int:
op2 = self.op2
card_name = 'PCONV'
card_obj = PCONV
methods = {
16 : self._read_pconv_nx_16,
56 : self._read_pconv_msc_56,
}
try:
n, elements = op2.reader_geom2._read_double_card_load(
card_name, card_obj,
methods, data, n)
except DoubleCardError:
nx_method = partial(self._read_pconv_nx_16, card_obj)
msc_method = partial(self._read_pconv_msc_56, card_obj)
n, elements = op2._read_dual_card_load(
data, n,
nx_method, msc_method,
card_name, self._add_op2_property)
nelements = len(elements)
for prop in elements:
key = prop.pconid
if key in op2.convection_properties:
prop_old = op2.convection_properties[key]
if prop != prop_old:
op2.log.warning(prop.raw_fields())
op2.log.warning(prop_old.raw_fields())
op2.log.warning(f'PCONV pconid={key}; old, new\n{prop_old}{prop}')
self._add_pconv(prop)
else:
self._add_pconv(prop)
op2.card_count['PCONV'] = nelements
return n
def _read_pconv_nx_16(self, card_obj: PCONV, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 16
struct_3if = Struct(op2._endian + b'3if')
nentries = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
props = []
for unused_i in range(nentries):
out = struct_3if.unpack(data[n:n+ntotal])
(pconid, mid, form, expf) = out
ftype = tid = chlen = gidin = ce = e1 = e2 = e3 = None
data_in = (pconid, mid, form, expf, ftype, tid, chlen,
gidin, ce, e1, e2, e3)
prop = PCONV.add_op2_data(data_in)
props.append(prop)
n += ntotal
return n, props
def _read_pconv_msc_56(self, card_obj: PCONV, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 56
s = Struct(op2._endian + b'3if 4i fii 3f')
nentries = (len(data) - n) // ntotal
assert (len(data) - n) % ntotal == 0
props = []
for unused_i in range(nentries):
out = s.unpack(data[n:n+ntotal])
(pconid, mid, form, expf, ftype, tid, unused_undef1, unused_undef2, chlen,
gidin, ce, e1, e2, e3) = out
data_in = (pconid, mid, form, expf, ftype, tid, chlen,
gidin, ce, e1, e2, e3)
prop = PCONV.add_op2_data(data_in)
props.append(prop)
n += ntotal
return n, props
def _read_pconvm(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 32
structi = Struct(op2._endian + b'4i 4f')
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = structi.unpack(data[n:n+ntotal])
if out != (0, 0, 0, 0, 0., 0., 0., 0.):
(pconid, mid, form, flag, coeff, expr, expri, exppo) = out
prop = PCONVM(pconid, mid, coeff, form=form, flag=flag,
expr=expr, exppi=expri, exppo=exppo, comment='')
op2._add_methods._add_convection_property_object(prop)
n += ntotal
op2.card_count['PCONVM'] = nentries
return n
def _read_pdamp(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 8 * self.factor
struct_if = Struct(mapfmt(op2._endian + b'if', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = struct_if.unpack(data[n:n+ntotal])
prop = PDAMP.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PDAMP'] = nentries
return n
def _read_pdampt(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping PDAMPT in EPT')
return len(data)
def _read_pdamp5(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping PDAMP5 in EPT')
return len(data)
def _read_pelas(self, data: bytes, n: int) -> int:
op2 = self.op2
struct_i3f = Struct(mapfmt(op2._endian + b'i3f', self.size))
ntotal = 16 * self.factor
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_i3f.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PELAS=%s\n' % str(out))
prop = PELAS.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PELAS'] = nproperties
return n
def _read_pfast_msc(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 100 * self.factor
struct1 = Struct(op2._endian + b'2if 5i 2f2i2f 3i 2i 6f')
ndatai = len(data) - n
nproperties = ndatai // ntotal
delta = ndatai % ntotal
assert delta == 0, 'len(data)-n=%s n=%s' % (ndatai, ndatai / 100.)
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PFAST=%s\n' % str(out))
(pid, d, mcid, unused_connbeh, unused_conntype, unused_extcon,
unused_condtype, unused_weldtype, unused_minlen, unused_maxlen,
unused_gmcheck, unused_spcgs, mass, ge,
unused_aa, unused_bb, unused_cc, mcid, mflag,
kt1, kt2, kt3, kr1, kr2, kr3) = out
data_in = (pid, d, mcid, mflag, kt1, kt2, kt3,
kr1, kr2, kr3, mass, ge)
prop = PFAST.add_op2_data(data_in)
str(prop)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PFAST'] = nproperties
return n
def _read_pfast_nx(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 48
struct1 = Struct(op2._endian + b'ifii 8f')
nproperties = (len(data) - n) // ntotal
delta = (len(data) - n) % ntotal
assert delta == 0, 'len(data)-n=%s n=%s' % (len(data) - n, (len(data) - n) / 48.)
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct1.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PFAST=%s\n' % str(out))
(pid, d, mcid, mflag, kt1, kt2, kt3, kr1, kr2, kr3, mass, ge) = out
data_in = (pid, d, mcid, mflag, kt1, kt2, kt3,
kr1, kr2, kr3, mass, ge)
prop = PFAST.add_op2_data(data_in)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PFAST'] = nproperties
op2.to_nx(' because PFAST-NX was found')
return n
def _read_pelast(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 16 * self.factor
struct_4i = Struct(mapfmt(op2._endian + b'4i', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_4i.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PELAST=%s\n' % str(out))
prop = PELAST.add_op2_data(out)
op2._add_methods._add_pelast_object(prop)
n += ntotal
op2.card_count['PELAST'] = nproperties
return n
def _read_pgap(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 44 * self.factor
struct_i10f = Struct(mapfmt(op2._endian + b'i10f', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_i10f.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PGAP=%s\n' % str(out))
prop = PGAP.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PGAP'] = nproperties
return n
def _read_phbdy(self, data: bytes, n: int) -> int:
op2 = self.op2
struct_i3f = Struct(op2._endian + b'ifff')
nproperties = (len(data) - n) // 16
for unused_i in range(nproperties):
edata = data[n:n+16]
out = struct_i3f.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PHBDY=%s\n' % str(out))
prop = PHBDY.add_op2_data(out)
op2._add_methods._add_phbdy_object(prop)
n += 16
op2.card_count['PHBDY'] = nproperties
return n
def _read_pintc(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping PINTC in EPT')
return len(data)
def _read_pints(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping PINTS in EPT')
return len(data)
def _read_pbeam3(self, data: bytes, n: int) -> int:
op2 = self.op2
card_name = 'PBUSHT'
card_obj = PBUSHT
methods = {
264 : self._read_pbeam3_264,
456 : self._read_pbeam3_456,
}
try:
n = op2.reader_geom2._read_double_card(
card_name, card_obj, self._add_op2_property,
methods, data, n)
except DoubleCardError:
raise
op2.log.warning(f'try-except {card_name}')
return n
def _read_pbeam3_456(self, card_obj, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 456 * self.factor
struct1 = Struct(mapfmt(op2._endian +
b'2i'
b'3f'
b'5f' b'5f fi 14f i'
b'2i3f'
b'5f'
b'5f'
b'5f'
b'6f'
b'5f'
b'4i'
b'2f iii f'
b'5f'
b'6f'
b'30f', self.size))
ndatai = len(data) - n
nentries = ndatai // ntotal
assert ndatai % ntotal == 0
props = []
for unused_i in range(nentries):
datai = data[n:n+ntotal]
n += ntotal
(pid, mid, A, iz, iy,
a, b, c, d, e,
f, g, h, i, j,
k, inta, l, m, ni, o, p, q, r, s, t, u, v, w, x, y, z,
aa, bb, cc, dd, ee,
ff, gg, hh, ii, jj,
kk, ll, mm, nn, oo,
pp, qq, rr, ss, tt,
uu, vv, ww, xx, yy, zz,
aaa, bbb, ccc, ddd, eee,
fff, ggg, hhh, iii,
jjj, kkk, lll, mmm, nnn, ooo,
ppp, qqq, rrr, sss, ttt,
uuu, vvv, www, xxx, yyy, zzz,
*other) = struct1.unpack(datai)
if mid == 0:
continue
prop = PBEAM3(
pid, mid, A, iz, iy, iyz=None, j=None, nsm=0.,
so=None,
cy=None, cz=None,
dy=None, dz=None,
ey=None, ez=None,
fy=None, fz=None,
ky=1., kz=1.,
ny=None, nz=None, my=None, mz=None,
nsiy=None, nsiz=None, nsiyz=None,
cw=None, stress='GRID',
w=None, wy=None, wz=None, comment='')
assert pid > 0, prop.get_stats()
assert mid > 0, prop.get_stats()
str(prop)
props.append(prop)
return n, props
def _read_pbeam3_264(self, card_obj, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 264 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'2i 3f 5f 5f fi 14f i 30f 4i', self.size))
ndatai = len(data) - n
nentries = ndatai // ntotal
assert ndatai % ntotal == 0
props = []
for unused_i in range(nentries):
pid, mid, A, iz, iy, a, b, c, d, e, f, g, h, i, j, k, inta, *other = struct1.unpack(data[n:n+ntotal])
assert sum(other) < 100, other
prop = PBEAM3(
pid, mid, A, iz, iy, iyz=None, j=None, nsm=0.,
so=None,
cy=None, cz=None,
dy=None, dz=None,
ey=None, ez=None,
fy=None, fz=None,
ky=1., kz=1.,
ny=None, nz=None, my=None, mz=None,
nsiy=None, nsiz=None, nsiyz=None,
cw=None, stress='GRID',
w=None, wy=None, wz=None, comment='')
assert pid > 0, prop.get_stats()
assert mid > 0, prop.get_stats()
str(prop)
props.append(prop)
n += ntotal
return n, props
def _read_pplane(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 32 * self.factor
struct1 = Struct(mapfmt(op2._endian + b'2i 2f 4i', self.size))
ndatai = len(data) - n
nentries = ndatai // ntotal
assert ndatai % ntotal == 0
for unused_i in range(nentries):
out = struct1.unpack(data[n:n+ntotal])
pid, mid, t, nsm, foropt, csopt = out[:6]
assert csopt == 0, csopt
pplane = op2.add_pplane(pid, mid, t=t, nsm=nsm,
formulation_option=foropt)
pplane.validate()
str(pplane)
n += ntotal
op2.card_count['PLPLANE'] = nentries
return n
def _read_plplane(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 44 * self.factor
if self.size == 4:
s = Struct(op2._endian + b'3i 4s f 6i')
else:
s = Struct(op2._endian + b'3q 8s d 6q')
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = s.unpack(data[n:n+ntotal])
pid, mid, cid, location, unused_t, unused_csopt = out[:6]
location = location.decode('latin1')
op2.add_plplane(pid, mid, cid=cid, stress_strain_output_location=location)
n += ntotal
op2.card_count['PLPLANE'] = nentries
return n
def _read_plsolid(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 28 * self.factor
if self.size == 4:
struct1 = Struct(op2._endian + b'2i 4s 4i')
else:
struct1 = Struct(op2._endian + b'2q 8s 4q')
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
out = struct1.unpack(data[n:n+ntotal])
pid, mid, location, unused_csopt, unused_null_a, unused_null_b, unused_null_c = out
location = location.decode('latin1')
op2.add_plsolid(pid, mid, stress_strain=location, ge=0.)
n += ntotal
op2.card_count['PLSOLID'] = nentries
return n
def _read_pmass(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 8 * self.factor
struct_if = Struct(mapfmt(op2._endian + b'if', self.size))
nentries = (len(data) - n) // ntotal
for unused_i in range(nentries):
edata = data[n:n + ntotal]
out = struct_if.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PMASS=%s\n' % str(out))
prop = PMASS.add_op2_data(out)
self._add_op2_property_mass(prop)
n += ntotal
return n
def _read_prod(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 24 * self.factor
struct_2i4f = Struct(mapfmt(op2._endian + b'2i4f', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_2i4f.unpack(edata)
prop = PROD.add_op2_data(out)
if op2.is_debug_file:
op2.binary_debug.write(' PROD=%s\n' % str(out))
self._add_op2_property(prop)
n += ntotal
op2.card_count['PROD'] = nproperties
return n
def _read_pshear(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 24 * self.factor
struct_2i4f = Struct(mapfmt(op2._endian + b'2i4f', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_2i4f.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PSHEAR=%s\n' % str(out))
prop = PSHEAR.add_op2_data(out)
self._add_op2_property(prop)
n += ntotal
op2.card_count['PSHEAR'] = nproperties
return n
def _read_pshell(self, data: bytes, n: int) -> int:
op2 = self.op2
ntotal = 44 * self.factor
s = Struct(mapfmt(op2._endian + b'iififi4fi', self.size))
nproperties = (len(data) - n) // ntotal
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = s.unpack(edata)
(pid, mid1, unused_t, mid2, unused_bk, mid3, unused_ts,
unused_nsm, unused_z1, unused_z2, mid4) = out
if op2.is_debug_file:
op2.binary_debug.write(' PSHELL=%s\n' % str(out))
prop = PSHELL.add_op2_data(out)
n += ntotal
if pid in op2.properties:
propi = op2.properties[pid]
if prop == propi:
op2.log.warning(f'Fake PSHELL {pid:d} (skipping):\n{propi}')
nproperties -= 1
continue
op2.log.error(f'PSHELL {pid:d} is also {propi.type} (skipping PSHELL):\n{propi}{prop}')
nproperties -= 1
continue
self._add_op2_property(prop)
if nproperties:
op2.card_count['PSHELL'] = nproperties
return n
def _read_psolid(self, data: bytes, n: int) -> int:
op2 = self.op2
if self.size == 4:
ntotal = 28
struct_6i4s = Struct(op2._endian + b'6i4s')
else:
ntotal = 28 * 2
struct_6i4s = Struct(op2._endian + b'6q8s')
nproperties = (len(data) - n) // ntotal
nproperties_found = 0
for unused_i in range(nproperties):
edata = data[n:n+ntotal]
out = struct_6i4s.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PSOLID=%s\n' % str(out))
n += ntotal
fctn = out[-1]
if fctn == b'FAKE':
op2.log.warning(' PSOLID=%s; is this a PCOMPLS?' % str(out))
continue
prop = PSOLID.add_op2_data(out)
self._add_op2_property(prop)
nproperties_found += 1
op2.card_count['PSOLID'] = nproperties_found
return n
def _read_ptube(self, data: bytes, n: int) -> int:
op2 = self.op2
struct_2i3f = Struct(op2._endian + b'2i3f')
nproperties = (len(data) - n) // 20
for unused_i in range(nproperties):
edata = data[n:n+20]
out = struct_2i3f.unpack(edata)
(pid, mid, OD, t, nsm) = out
data_in = [pid, mid, OD, t, nsm]
if op2.is_debug_file:
op2.binary_debug.write(' PTUBE=%s\n' % str(out))
prop = PTUBE.add_op2_data(data_in)
self._add_op2_property(prop)
n += 20
op2.card_count['PTUBE'] = nproperties
return n
def _read_pset(self, data: bytes, n: int) -> int:
op2 = self.op2
struct_5i4si = Struct(op2._endian + b'5i4si')
nentries = 0
while n < len(data):
edata = data[n:n+28]
out = struct_5i4si.unpack(edata)
idi, poly1, poly2, poly3, cid, typei, typeid = out
typei = typei.rstrip().decode('latin1')
assert typei in ['SET', 'ELID'], (idi, poly1, poly2, poly3, cid, typei, typeid)
if op2.is_debug_file:
op2.binary_debug.write(' PVAL=%s\n' % str(out))
typeids = []
n += 28
while typeid != -1:
typeids.append(typeid)
typeid, = op2.struct_i.unpack(data[n:n+4])
n += 4
if len(typeids) == 1:
typeids = typeids[0]
op2.add_pset(idi, poly1, poly2, poly3, cid, typei, typeids)
op2.card_count['PSET'] = nentries
return n
def _read_pval(self, data: bytes, n: int) -> int:
op2 = self.op2
if self.size == 4:
struct_5i4si = Struct(op2._endian + b'5i 4s i')
struct_i = op2.struct_i
else:
struct_5i4si = Struct(op2._endian + b'5q 8s q')
struct_i = op2.struct_q
nentries = 0
ntotal = 28 * self.factor
size = self.size
while n < len(data):
edata = data[n:n+ntotal]
out = struct_5i4si.unpack(edata)
idi, poly1, poly2, poly3, cid, typei, typeid = out
typei = typei.rstrip().decode('latin1')
assert typei in ['SET', 'ELID'], f'idi={idi} poly1={poly1} poly2={poly2} poly3={poly3} cid={cid} typei={typei} typeid={typeid}'
if op2.is_debug_file:
op2.binary_debug.write(' PVAL=%s\n' % str(out))
typeids = []
n += ntotal
while typeid != -1:
typeids.append(typeid)
typeid, = struct_i.unpack(data[n:n+size])
n += size
op2.add_pval(idi, poly1, poly2, poly3, cid, typei, typeids)
op2.card_count['PVAL'] = nentries
return n
def _read_pvisc(self, data: bytes, n: int) -> int:
op2 = self.op2
struct_i2f = Struct(op2._endian + b'i2f')
nproperties = (len(data) - n) // 12
for unused_i in range(nproperties):
edata = data[n:n+12]
out = struct_i2f.unpack(edata)
if op2.is_debug_file:
op2.binary_debug.write(' PVISC=%s\n' % str(out))
prop = PVISC.add_op2_data(out)
self._add_op2_property(prop)
n += 12
op2.card_count['PVISC'] = nproperties
return n
def _read_view(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping VIEW in EPT')
return len(data)
def _read_view3d(self, data: bytes, n: int) -> int:
self.op2.log.info('geom skipping VIEW3D in EPT')
return len(data)
def break_by_minus1(idata):
i1 = 0
i = 0
i2 = None
packs = []
for idatai in idata:
if idatai == -1:
i2 = i
packs.append((i1, i2))
i1 = i2 + 1
i += 1
continue
i += 1
return packs
| true | true |
f735cbd60586e7a60325ea6879b020b987b34a4b | 810 | py | Python | cloneEveryHourFor48Hours.py | keotl/TA-tools | 1ad5466e2470e8009ad4825c6fd21f37f567e8dd | [
"MIT"
] | null | null | null | cloneEveryHourFor48Hours.py | keotl/TA-tools | 1ad5466e2470e8009ad4825c6fd21f37f567e8dd | [
"MIT"
] | null | null | null | cloneEveryHourFor48Hours.py | keotl/TA-tools | 1ad5466e2470e8009ad4825c6fd21f37f567e8dd | [
"MIT"
] | 1 | 2019-09-03T17:47:51.000Z | 2019-09-03T17:47:51.000Z | #!/usr/bin/python3
import subprocess;
import datetime;
import time;
import threading;
import argparse;
parser = argparse.ArgumentParser(description="sleep 3600 pour remise");
parser.add_argument("--destination", help='base dir destination');
parser.add_argument("--branch", help="branch to be checked out");
args = parser.parse_args();
BASE_DIR = "build";
BRANCH = "remise_1";
if args.destination is not None:
BASE_DIR = args.destination;
if args.branch is not None:
BRANCH = args.branch;
def preleve():
subprocess.run(["python3", "remise.py", "--destination", BASE_DIR + "/" + timestamp, "--branch", BRANCH]);
for i in range (0,48):
timestamp = datetime.datetime.now().strftime("%y-%m-%d_%H.%M");
print(timestamp);
threading.Thread(target=preleve).start();
time.sleep(3600);
| 27 | 110 | 0.698765 |
import subprocess;
import datetime;
import time;
import threading;
import argparse;
parser = argparse.ArgumentParser(description="sleep 3600 pour remise");
parser.add_argument("--destination", help='base dir destination');
parser.add_argument("--branch", help="branch to be checked out");
args = parser.parse_args();
BASE_DIR = "build";
BRANCH = "remise_1";
if args.destination is not None:
BASE_DIR = args.destination;
if args.branch is not None:
BRANCH = args.branch;
def preleve():
subprocess.run(["python3", "remise.py", "--destination", BASE_DIR + "/" + timestamp, "--branch", BRANCH]);
for i in range (0,48):
timestamp = datetime.datetime.now().strftime("%y-%m-%d_%H.%M");
print(timestamp);
threading.Thread(target=preleve).start();
time.sleep(3600);
| true | true |
f735cc7446deb1248e077f55440f4483396ccf9c | 4,181 | py | Python | bot/exts/info/source.py | zeph-yrus9/bot | 5fe110b9a49144480ebca34dec65de91753994ec | [
"MIT",
"BSD-3-Clause"
] | 22 | 2018-02-03T12:39:12.000Z | 2018-10-03T22:19:23.000Z | bot/exts/info/source.py | zeph-yrus9/bot | 5fe110b9a49144480ebca34dec65de91753994ec | [
"MIT",
"BSD-3-Clause"
] | 52 | 2018-02-09T14:00:39.000Z | 2018-06-06T17:29:32.000Z | bot/exts/info/source.py | zeph-yrus9/bot | 5fe110b9a49144480ebca34dec65de91753994ec | [
"MIT",
"BSD-3-Clause"
] | 15 | 2018-02-09T13:37:17.000Z | 2018-05-10T13:17:58.000Z | import inspect
from pathlib import Path
from typing import Optional, Tuple, Union
from discord import Embed
from discord.ext import commands
from bot.bot import Bot
from bot.constants import URLs
from bot.converters import SourceConverter
from bot.exts.info.tags import TagIdentifier
SourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, TagIdentifier, commands.ExtensionNotLoaded]
class BotSource(commands.Cog):
"""Displays information about the bot's source code."""
def __init__(self, bot: Bot):
self.bot = bot
@commands.command(name="source", aliases=("src",))
async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:
"""Display information and a GitHub link to the source code of a command, tag, or cog."""
if not source_item:
embed = Embed(title="Bot's GitHub Repository")
embed.add_field(name="Repository", value=f"[Go to GitHub]({URLs.github_bot_repo})")
embed.set_thumbnail(url="https://avatars1.githubusercontent.com/u/9919")
await ctx.send(embed=embed)
return
embed = await self.build_embed(source_item)
await ctx.send(embed=embed)
def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:
"""
Build GitHub link of source item, return this link, file location and first line number.
Raise BadArgument if `source_item` is a dynamically-created object (e.g. via internal eval).
"""
if isinstance(source_item, commands.Command):
source_item = inspect.unwrap(source_item.callback)
src = source_item.__code__
filename = src.co_filename
elif isinstance(source_item, TagIdentifier):
tags_cog = self.bot.get_cog("Tags")
filename = tags_cog.tags[source_item].file_path
else:
src = type(source_item)
try:
filename = inspect.getsourcefile(src)
except TypeError:
raise commands.BadArgument("Cannot get source for a dynamically-created object.")
if not isinstance(source_item, TagIdentifier):
try:
lines, first_line_no = inspect.getsourcelines(src)
except OSError:
raise commands.BadArgument("Cannot get source for a dynamically-created object.")
lines_extension = f"#L{first_line_no}-L{first_line_no+len(lines)-1}"
else:
first_line_no = None
lines_extension = ""
# Handle tag file location differently than others to avoid errors in some cases
if not first_line_no:
file_location = Path(filename).relative_to("bot/")
else:
file_location = Path(filename).relative_to(Path.cwd()).as_posix()
url = f"{URLs.github_bot_repo}/blob/main/{file_location}{lines_extension}"
return url, file_location, first_line_no or None
async def build_embed(self, source_object: SourceType) -> Optional[Embed]:
"""Build embed based on source object."""
url, location, first_line = self.get_source_link(source_object)
if isinstance(source_object, commands.HelpCommand):
title = "Help Command"
description = source_object.__doc__.splitlines()[1]
elif isinstance(source_object, commands.Command):
description = source_object.short_doc
title = f"Command: {source_object.qualified_name}"
elif isinstance(source_object, TagIdentifier):
title = f"Tag: {source_object}"
description = ""
else:
title = f"Cog: {source_object.qualified_name}"
description = source_object.description.splitlines()[0]
embed = Embed(title=title, description=description)
embed.add_field(name="Source Code", value=f"[Go to GitHub]({url})")
line_text = f":{first_line}" if first_line else ""
embed.set_footer(text=f"{location}{line_text}")
return embed
async def setup(bot: Bot) -> None:
"""Load the BotSource cog."""
await bot.add_cog(BotSource(bot))
| 40.201923 | 116 | 0.654867 | import inspect
from pathlib import Path
from typing import Optional, Tuple, Union
from discord import Embed
from discord.ext import commands
from bot.bot import Bot
from bot.constants import URLs
from bot.converters import SourceConverter
from bot.exts.info.tags import TagIdentifier
SourceType = Union[commands.HelpCommand, commands.Command, commands.Cog, TagIdentifier, commands.ExtensionNotLoaded]
class BotSource(commands.Cog):
def __init__(self, bot: Bot):
self.bot = bot
@commands.command(name="source", aliases=("src",))
async def source_command(self, ctx: commands.Context, *, source_item: SourceConverter = None) -> None:
if not source_item:
embed = Embed(title="Bot's GitHub Repository")
embed.add_field(name="Repository", value=f"[Go to GitHub]({URLs.github_bot_repo})")
embed.set_thumbnail(url="https://avatars1.githubusercontent.com/u/9919")
await ctx.send(embed=embed)
return
embed = await self.build_embed(source_item)
await ctx.send(embed=embed)
def get_source_link(self, source_item: SourceType) -> Tuple[str, str, Optional[int]]:
if isinstance(source_item, commands.Command):
source_item = inspect.unwrap(source_item.callback)
src = source_item.__code__
filename = src.co_filename
elif isinstance(source_item, TagIdentifier):
tags_cog = self.bot.get_cog("Tags")
filename = tags_cog.tags[source_item].file_path
else:
src = type(source_item)
try:
filename = inspect.getsourcefile(src)
except TypeError:
raise commands.BadArgument("Cannot get source for a dynamically-created object.")
if not isinstance(source_item, TagIdentifier):
try:
lines, first_line_no = inspect.getsourcelines(src)
except OSError:
raise commands.BadArgument("Cannot get source for a dynamically-created object.")
lines_extension = f"#L{first_line_no}-L{first_line_no+len(lines)-1}"
else:
first_line_no = None
lines_extension = ""
# Handle tag file location differently than others to avoid errors in some cases
if not first_line_no:
file_location = Path(filename).relative_to("bot/")
else:
file_location = Path(filename).relative_to(Path.cwd()).as_posix()
url = f"{URLs.github_bot_repo}/blob/main/{file_location}{lines_extension}"
return url, file_location, first_line_no or None
async def build_embed(self, source_object: SourceType) -> Optional[Embed]:
url, location, first_line = self.get_source_link(source_object)
if isinstance(source_object, commands.HelpCommand):
title = "Help Command"
description = source_object.__doc__.splitlines()[1]
elif isinstance(source_object, commands.Command):
description = source_object.short_doc
title = f"Command: {source_object.qualified_name}"
elif isinstance(source_object, TagIdentifier):
title = f"Tag: {source_object}"
description = ""
else:
title = f"Cog: {source_object.qualified_name}"
description = source_object.description.splitlines()[0]
embed = Embed(title=title, description=description)
embed.add_field(name="Source Code", value=f"[Go to GitHub]({url})")
line_text = f":{first_line}" if first_line else ""
embed.set_footer(text=f"{location}{line_text}")
return embed
async def setup(bot: Bot) -> None:
await bot.add_cog(BotSource(bot))
| true | true |
f735ce75cb3189d592ca1a91116e1da6432e4695 | 326 | py | Python | manage.py | rmboot/approvalsystem | b084ecb5fa4b401a052f3cf2a307af1d55f3c8fc | [
"MIT"
] | 3 | 2019-04-16T04:11:11.000Z | 2021-09-19T02:38:49.000Z | manage.py | rmboot/approvalsystem | b084ecb5fa4b401a052f3cf2a307af1d55f3c8fc | [
"MIT"
] | 4 | 2019-06-04T11:14:22.000Z | 2021-09-08T01:11:26.000Z | manage.py | rmboot/approvalsystem | b084ecb5fa4b401a052f3cf2a307af1d55f3c8fc | [
"MIT"
] | 4 | 2019-10-10T15:04:31.000Z | 2021-06-19T17:03:06.000Z | from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from approval_system import create_app
from approval_system.extensions import db
app = create_app()
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| 19.176471 | 49 | 0.785276 | from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from approval_system import create_app
from approval_system.extensions import db
app = create_app()
manager = Manager(app)
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| true | true |
f735cf81e2ea3e8120be5104973afe9ad14ad25d | 57,266 | py | Python | Experiments/src/Task Control - Python/UR5Class.py | FelipeH92/Task-Space-Control-Vision | 77d9f709d7cb0afb50ef9baf6ba39304aca445e5 | [
"CC-BY-4.0"
] | null | null | null | Experiments/src/Task Control - Python/UR5Class.py | FelipeH92/Task-Space-Control-Vision | 77d9f709d7cb0afb50ef9baf6ba39304aca445e5 | [
"CC-BY-4.0"
] | null | null | null | Experiments/src/Task Control - Python/UR5Class.py | FelipeH92/Task-Space-Control-Vision | 77d9f709d7cb0afb50ef9baf6ba39304aca445e5 | [
"CC-BY-4.0"
] | 1 | 2021-11-08T23:50:33.000Z | 2021-11-08T23:50:33.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
## @package UR5
# Documentação para o pacote de classes UR5.
#
# Documentação do código produzido para controle do manipulador UR5 e geração e controle de suas posições.
# Cada código aqui documentado possui uma breve descrição de sua função, suas entradas e saídas.
import numpy as np
from numpy.linalg import inv
from numpy.linalg import norm
from numpy.linalg import pinv
from scipy.signal import butter,lfilter
from scipy.signal import freqz
import struct
import time
import csv
import Transformations as tf
import os
## Documentação da Classe UR5Class para controle remoto do manipulador Universal Robots 5 (UR5).
#
# Essa classe é responsável por interpretar os dados recebidos pela caixa de controle do UR5 e controlar seu funcionamento ao longo do projeto.
# A ela cabe as funções dos cálculos de cinemática direta e inversa para as diversas posições do robô, interpretar os dados do robô, verificar
# seu estado de segurança e funcionamento, assim como realizar qualquer cálculo de calibração ou posição necessário.
class UR5Class:
_standard_DH = np.mat([[0,-.425,-.39225,0,0,0], [1.570796327, 0, 0, 1.570796327, -1.570796327, 0], [.089159,0,0,.10915,.09465,.0823], [0, 0, 0, 0, 0, 0]])
# _standard_DH é a tabela DH tradicional do Robô. As linhas correspondem respectivamente a (a, alpha, d,q)
_robot_data = []
# Lista vazia para receber os dados do robô
_data_pack_max = 133
# Tamanho maximo e esperado de valores recebidos em lista no pacote de dados
processTimeList = []
errorDB = []
error_D_DB = []
wDB = []
u = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)
errorSaturation = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)
errorPrevious = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)
errorSum = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)
normErro = np.zeros(6,dtype=np.float64)
## Construtor da classe.
# @param self O ponteiro do objeto.
# @param delta_DH Os dados de calibração da matriz Denavit-Hartenberg do robô a ser controlado.
def __init__(self, delta_DH = np.zeros((5,6))):
self.delta_standard_DH = delta_DH
self._effective_a = self._standard_DH[0,:] + self.delta_standard_DH[0,:]
self._effective_alpha = self._standard_DH[1,:] + self.delta_standard_DH[1,:]
self._effective_d = self._standard_DH[2,:] + self.delta_standard_DH[2,:]
self._effective_q = np.array(self._standard_DH[3,:] + self.delta_standard_DH[3,:])
# Os dados efetivos equivalem aos dados esperados do UR5 mais os dados de calibração do robô específico.
Rot_x_1 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,0]), -np.sin(self._effective_alpha[0,0]), 0], [0, np.sin(self._effective_alpha[0,0]), np.cos(self._effective_alpha[0,0]), 0], [ 0, 0, 0, 1]])
Rot_x_2 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,1]), -np.sin(self._effective_alpha[0,1]), 0], [0, np.sin(self._effective_alpha[0,1]), np.cos(self._effective_alpha[0,1]), 0], [ 0, 0, 0, 1]])
Rot_x_3 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,2]), -np.sin(self._effective_alpha[0,2]), 0], [0, np.sin(self._effective_alpha[0,2]), np.cos(self._effective_alpha[0,2]), 0], [ 0, 0, 0, 1]])
Rot_x_4 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,3]), -np.sin(self._effective_alpha[0,3]), 0], [0, np.sin(self._effective_alpha[0,3]), np.cos(self._effective_alpha[0,3]), 0], [ 0, 0, 0, 1]])
Rot_x_5 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,4]), -np.sin(self._effective_alpha[0,4]), 0], [0, np.sin(self._effective_alpha[0,4]), np.cos(self._effective_alpha[0,4]), 0], [ 0, 0, 0, 1]])
Rot_x_6 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,5]), -np.sin(self._effective_alpha[0,5]), 0], [0, np.sin(self._effective_alpha[0,5]), np.cos(self._effective_alpha[0,5]), 0], [ 0, 0, 0, 1]])
Trans_d_1 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,0]], [0, 0, 0, 1]])
Trans_d_2 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,1]], [0, 0, 0, 1]])
Trans_d_3 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,2]], [0, 0, 0, 1]])
Trans_d_4 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,3]], [0, 0, 0, 1]])
Trans_d_5 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,4]], [0, 0, 0, 1]])
Trans_d_6 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,5]], [0, 0, 0, 1]])
Trans_a_1 = np.mat([[1, 0, 0, self._effective_a[0,0]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Trans_a_2 = np.mat([[1, 0, 0, self._effective_a[0,1]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Trans_a_3 = np.mat([[1, 0, 0, self._effective_a[0,2]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Trans_a_4 = np.mat([[1, 0, 0, self._effective_a[0,3]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Trans_a_5 = np.mat([[1, 0, 0, self._effective_a[0,4]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Trans_a_6 = np.mat([[1, 0, 0, self._effective_a[0,5]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
self._A_0_1 = Trans_d_1 * Trans_a_1 * Rot_x_1
self._A_0_2 = Trans_d_2 * Trans_a_2 * Rot_x_2
self._A_0_3 = Trans_d_3 * Trans_a_3 * Rot_x_3
self._A_0_4 = Trans_d_4 * Trans_a_4 * Rot_x_4
self._A_0_5 = Trans_d_5 * Trans_a_5 * Rot_x_5
self._A_0_6 = Trans_d_6 * Trans_a_6 * Rot_x_6
# Transformações comuns, indiferentes a movimentação, utilizadas em cálculos futuros.
return
## Método que recebe e configura o pacote de dados do robô.
# @param self O ponteiro do objeto.
# @param data O pacote de dados recebido pela conexão Ethernet com o robô.
def setRobotData(self, data):
size = len(data)
self._robot_data = []
# O primeiro dado recebido, de tempo, é um inteiro de 4 bytes.
self._robot_data.append(struct.unpack('!i', data[0:4]))
i = 4
# O resto dos dados recebidos vem em formato de double de 8 bytes.
while i < size:
self._robot_data.append(struct.unpack('!d', data[i:i+8])[0])
i += 8
# Já atualiza os dados de juntas do robô.
if (size < (4+(34*8))):
print("[WARNING] Data size smaller than expected. Bytes: " + str(size))
return
self._effective_q = np.array(self._robot_data[32:38]) + self.delta_standard_DH[3,:]
return
# setRobotData recebe o pacote de 1060 bytes e os separa nos 160 valores da lista de dados.
def setRobotDataRTDE(self, data):
#print(data.actual_TCP_pose)
self._robot_data[1] = np.asarray(data.timestamp, dtype = np.float64)
self._robot_data[2:8] = np.asarray(data.target_q, dtype = np.float64)
self._robot_data[8:14] = np.asarray(data.target_qd, dtype = np.float64)
self._robot_data[32:38] = np.asarray(data.actual_q, dtype = np.float64)
self._robot_data[38:44] = np.asarray(data.actual_qd, dtype = np.float64)
self._robot_data[56:62] = np.asarray(data.actual_TCP_pose, dtype = np.float64)
self._robot_data[62:68] = np.asarray(data.actual_TCP_speed, dtype = np.float64)
self._robot_data[68:74] = np.asarray(data.actual_TCP_force, dtype = np.float64)
self._robot_data[74:80] = np.asarray(data.target_TCP_pose, dtype = np.float64)
self._robot_data[80:86] = np.asarray(data.target_TCP_speed, dtype = np.float64)
self._robot_data[102] = np.asarray(data.safety_mode, dtype = np.int32)
self._robot_data[132] = np.asarray(data.runtime_state, dtype = np.uint32)
q = np.asarray(data.actual_q)
self._effective_q = q + self.delta_standard_DH[3,:]
# <field name="timestamp" type="DOUBLE"/>
# <field name="target_q" type="VECTOR6D"/>
# <field name="target_qd" type="VECTOR6D"/>
# <field name="target_qdd" type="VECTOR6D"/>
# <field name="target_current" type="VECTOR6D"/>
# <field name="target_moment" type="VECTOR6D"/>
# <field name="actual_q" type="VECTOR6D"/>
# <field name="actual_qd" type="VECTOR6D"/>
# <field name="actual_current" type="VECTOR6D"/>
# <field name="joint_control_output" type="VECTOR6D"/>
# <field name="actual_TCP_pose" type="VECTOR6D"/>
# <field name="actual_TCP_speed" type="VECTOR6D"/>
# <field name="actual_TCP_force" type="VECTOR6D"/>
# <field name="target_TCP_pose" type="VECTOR6D"/>
# <field name="target_TCP_speed" type="VECTOR6D"/>
# <field name="actual_digital_input_bits" type="UINT64"/>
# <field name="joint_temperatures" type="VECTOR6D"/>
# <field name="actual_execution_time" type="DOUBLE"/>
# <field name="robot_mode" type="INT32"/>
# <field name="joint_mode" type="VECTOR6INT32"/>
# <field name="safety_mode" type="INT32"/>
# <field name="actual_tool_accelerometer" type="VECTOR3D"/>
# <field name="speed_scaling" type="DOUBLE"/>
# <field name="target_speed_fraction" type="DOUBLE"/>
# <field name="actual_momentum" type="DOUBLE"/>
# <field name="actual_main_voltage" type="DOUBLE"/>
# <field name="actual_robot_voltage" type="DOUBLE"/>
# <field name="actual_robot_current" type="DOUBLE"/>
# <field name="actual_joint_voltage" type="VECTOR6D"/>
# <field name="actual_digital_output_bits" type="UINT64"/>
# <field name="runtime_state" type="UINT32"/>
return
## Retorna verdadeiro ou falso para o estado de segurança do robô.
# @param self O ponteiro do objeto.
def checkSafety(self):
try:
if self._robot_data[102] == 1:
safety = True
else:
safety = False
return safety
except:
print("[ERROR] Could not find Robot Data!")
return None
# checkSafety verifica se a variável de segurança do robô está apta a operar
## Retorna verdadeiro ou falso para o estado de operação do robô.
# @param self O ponteiro do objeto.
def programStateCheck(self):
try:
if self._robot_data[132] == 1:
state = True
else:
state = False
return state
except:
print("[ERROR] Could not find Robot Data!")
return None
# programStateCheck verifica se a variável de estado do robô está apta a operar
## Imprime em prompt de comando as 133 informações recebidas pelo pacote de dados do robô.
# @param self O ponteiro do objeto.
def printRobotData(self):
size = len(self._robot_data)
if size == self._datapackmax:
print("[INFO] Message Size in Bytes: " + str(self._robot_data[0]))
print("[INFO] Time: " + str(self._robot_data[1]))
print("[INFO] q target" + str(self._robot_data[2:8]))
print("[INFO] qd target" + str(self._robot_data[8:14]))
print("[INFO] qdd target" + str(self._robot_data[14:20]))
print("[INFO] I target" + str(self._robot_data[20:26]))
print("[INFO] M target" + str(self._robot_data[26:32]))
print("[INFO] q actual" + str(self._robot_data[32:38]))
print("[INFO] qd actual" + str(self._robot_data[38:44]))
print("[INFO] I actual" + str(self._robot_data[44:50]))
print("[INFO] I control" + str(self._robot_data[50:56]))
print("[INFO] Tool Vector Actual" + str(self._robot_data[56:62]))
print("[INFO] TCP Speed Actual" + str(self._robot_data[62:68]))
print("[INFO] TCP Force" + str(self._robot_data[68:74]))
print("[INFO] Tool Vector Target" + str(self._robot_data[74:80]))
print("[INFO] TCP Speed Target" + str(self._robot_data[80:86]))
print("[INFO] digital input bits" + str(self._robot_data[86]))
print("[INFO] Motor Temperatures" + str(self._robot_data[87:93]))
print("[INFO] Controller Timer" + str(self._robot_data[93]))
print("[INFO] Test Value" + str(self._robot_data[94]))
print("[INFO] Robot Mode" + str(self._robot_data[95]))
print("[INFO] Joint Modes" + str(self._robot_data[96:102]))
print("[INFO] Safety Mode" + str(self._robot_data[102]))
print("[INFO] Tool Acceleration Values" + str(self._robot_data[109:112]))
print("[INFO] Speed Scaling" + str(self._robot_data[118]))
print("[INFO] Linear Momentum Norm" + str(self._robot_data[119]))
print("[INFO] V Main" + str(self._robot_data[122]))
print("[INFO] V Robot" + str(self._robot_data[123]))
print("[INFO] I Robot" + str(self._robot_data[124]))
print("[INFO] V actual" + str(self._robot_data[125:131]))
print("[INFO] Digital Outputs" + str(self._robot_data[131]))
print("[INFO] Program State" + str(self._robot_data[132]))
# Exceção caso o pacote venha menor que 1060 Bytes
else:
print("[WARNING] Size of data smaller than expected: ", size)
return
# printRobotData imprime em tela todos os valores do pacote de dados traduzido, para depuração
## Retorna o vetor de posição do efetuador do robô, em formato [x, y, z, rx, ry, rz].
# @param self O ponteiro do objeto.
def getPositionTarget(self):
try:
array = np.array(self._robot_data[74:80])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
# getPosition retorna a posição atual do vetor da ferramenta.
def getPosition(self):
try:
array = np.array(self._robot_data[56:62])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
# getPosition retorna a posição atual do vetor da ferramenta.
## Retorna o vetor de velocidade do efetuador do robô, em formato [dx, dy, dz, drx, dry, drz].
# @param self O ponteiro do objeto.
def getTCPSpeed(self):
try:
array = np.array(self._robot_data[62:68])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
# getTCPSpeed retorna a velocidade da ferramenta.
## Retorna o vetor de velocidade do efetuador do robô, em formato [dx, dy, dz, drx, dry, drz].
# @param self O ponteiro do objeto.
def getTCPSpeedTarget(self):
try:
array = np.array(self._robot_data[80:86])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
# getTCPSpeed retorna a velocidade da ferramenta.
## Retorna o vetor de velocidade modular do efetuador do robô, em formato [v].
# @param self O ponteiro do objeto.
def getTCPSpeedMod(self):
try:
v = np.sqrt(self._robot_data[62]*self._robot_data[62] + self._robot_data[63]*self._robot_data[63] + self._robot_data[64]*self._robot_data[64])
return v
except:
print("[ERROR] Could not find Robot Data!")
return None
# getTCPSpeed retorna a velocidade da ferramenta.
## Retorna o vetor de posição das seis juntas do robô.
# @param self O ponteiro do objeto.
def getJointPosition(self):
try:
array = np.array(self._robot_data[32:38])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
## Retorna o vetor de posição das seis juntas do robô.
# @param self O ponteiro do objeto.
def getJointPositionTarget(self):
try:
array = np.array(self._robot_data[2:8])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
# Retorna o valor das articulações da ferramenta
## Retorna o vetor de velocidade das seis juntas do robô.
# @param self O ponteiro do objeto.
def getJointSpeed(self):
try:
array = np.array(self._robot_data[38:44])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
# getJointSpeed retorna a velocidade da ferramenta.
## Retorna o vetor de velocidade das seis juntas do robô.
# @param self O ponteiro do objeto.
def getJointSpeedTarget(self):
try:
array = np.array(self._robot_data[8:14])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
# getJointSpeed retorna a velocidade da ferramenta.
def getTCPForce(self):
try:
array = np.array(self._robot_data[68:74])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
# getJointSpeed retorna a velocidade da ferramenta.
## Retorna o tempo atual do robô desde que foi ligado.
# @param self O ponteiro do objeto.
def getTime(self):
return self._robot_data[1]
# Retorna o valor do tempo de uso atual
## Realiza a cinemática direta do UR5 para a posição de juntas atual. O método retorna a matriz homogênea 4x4 da posição atual, ou um vetor em RV ou RPY.
# @param self O ponteiro do objeto.
# @param q O vetor de coordenadas de junta.
# @param vector parâmetro que define se o tipo de retorno como vetor de posições em RV.
# @param rpy parâmetro que, juntamente de vector, define o retorno como vetor de posições em RPY.
def ur5_direct_kinematics(self, q, vector = False, rpy = False, apply_offset = False):
if (apply_offset == True):
# q = q + self.delta_standard_DH[3,:]
q = np.squeeze(np.asarray(q + self.delta_standard_DH[3,:]))
_rot_z_1 = np.mat([[np.cos(q[0]), -np.sin(q[0]), 0, 0],[np.sin(q[0]), np.cos(q[0]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
_rot_z_2 = np.mat([[np.cos(q[1]), -np.sin(q[1]), 0, 0],[np.sin(q[1]), np.cos(q[1]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
_rot_z_3 = np.mat([[np.cos(q[2]), -np.sin(q[2]), 0, 0],[np.sin(q[2]), np.cos(q[2]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
_rot_z_4 = np.mat([[np.cos(q[3]), -np.sin(q[3]), 0, 0],[np.sin(q[3]), np.cos(q[3]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
_rot_z_5 = np.mat([[np.cos(q[4]), -np.sin(q[4]), 0, 0],[np.sin(q[4]), np.cos(q[4]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
_rot_z_6 = np.mat([[np.cos(q[5]), -np.sin(q[5]), 0, 0],[np.sin(q[5]), np.cos(q[5]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
# Utiliza as matrizes definidas no construtor e as de rotação das juntas atuais para retornar a matriz final.
self._A_1 = _rot_z_1 * self._A_0_1
self._A_2 = _rot_z_2 * self._A_0_2
self._A_3 = _rot_z_3 * self._A_0_3
self._A_4 = _rot_z_4 * self._A_0_4
self._A_5 = _rot_z_5 * self._A_0_5
self._A_6 = _rot_z_6 * self._A_0_6
self._H = self._A_1 * self._A_2 * self._A_3 * self._A_4 * self._A_5 * self._A_6
#print self._H
if (vector == False):
return self._H
else:
vetor = tf.matrix2RotationVector(self._H[0:3,0:3])
array = np.array([self._H[0,3], self._H[1,3], self._H[2,3]], float)
vetor = np.hstack((array,vetor))
#print vetor
if (rpy == False):
return vetor
else:
vetor[3:6] = tf.rotationVector2RollPitchYaw(vetor[3:6])
return vetor
# ur5_direct_kinematics executa a cinemática direta do UR5 e retorna a matriz 4x4 de posição e orientação do UR5
def verifyDelta(self, epsilon = 10e-6):
direct = self.ur5_direct_kinematics(self.getJointPosition(), vector = True, apply_offset = True)
real = self.getPosition()
diff = tf.computeDifference(real,direct)
print("[INFO] Direct Kinematics calculated with Delta: " + str(direct))
print("[INFO] Direct Kinematics real: " + str(real))
error = norm(diff[0:3])
print("[INFO] Error: ", error)
if (error < epsilon):
print("[INFO] Correct Delta Matrix!")
return True
else:
print("[WARNING] Incorrect Delta Matrix!")
return False
def _DH(self, a, alpha, d, theta):
Td = np.asmatrix(np.eye(4))
Td[2,3] = d
Ta = np.asmatrix(np.eye(4))
Ta[0,3] = a
Rtheta = tf.Rot_z(theta)
Rtheta = np.mat([[Rtheta[0,0], Rtheta[0,1], Rtheta[0,2], 0], [Rtheta[1,0], Rtheta[1,1], Rtheta[1,2], 0], [Rtheta[2,0], Rtheta[2,1], Rtheta[2,2], 0], [0,0,0,1]])
Ralpha = tf.Rot_x(alpha)
Ralpha = np.mat([[Ralpha[0,0], Ralpha[0,1], Ralpha[0,2], 0], [Ralpha[1,0], Ralpha[1,1], Ralpha[1,2], 0], [Ralpha[2,0], Ralpha[2,1], Ralpha[2,2], 0], [0,0,0,1]])
G = Td * Rtheta * Ta * Ralpha
return G
# _DH retorna uma matrix 4x4 de junta especifica, utilizado na cinemática inversa analítica
def _analytic_ur5_inverse_kinematics(self, p):
rvMatrix = tf.rotationVector2Matrix(p[3:6])
gd = np.mat(([[rvMatrix[0,0], rvMatrix[0,1], rvMatrix[0,2], p[0]], [rvMatrix[1,0], rvMatrix[1,1], rvMatrix[1,2], p[1]], [rvMatrix[2,0], rvMatrix[2,1], rvMatrix[2,2], p[2]], [0, 0, 0, 1]]))
theta = np.zeros((6, 8))
d1 = self._standard_DH[2,0]
d2 = self._standard_DH[2,1]
d3 = self._standard_DH[2,2]
d4 = self._standard_DH[2,3]
d5 = self._standard_DH[2,4]
d6 = self._standard_DH[2,5]
a1 = self._standard_DH[0,0]
a2 = self._standard_DH[0,1]
a3 = self._standard_DH[0,2]
a4 = self._standard_DH[0,3]
a5 = self._standard_DH[0,4]
a6 = self._standard_DH[0,5]
alpha1 = self._standard_DH[1,0]
alpha2 = self._standard_DH[1,1]
alpha3 = self._standard_DH[1,2]
alpha4 = self._standard_DH[1,3]
alpha5 = self._standard_DH[1,4]
alpha6 = self._standard_DH[1,5]
# Calculating theta1
p05 = gd * np.mat([[0], [0], [-d6], [1]])
p05 = p05 - np.mat([[0], [0], [0], [1]])
psi = np.arctan2(p05[1], p05[0])
p05xy = np.sqrt(p05[1]*p05[1] + p05[0]*p05[0])
if (d4 > p05xy):
print ("[WARNING] No solution for Theta1: d4 > P05xy")
print ("[WARNING] Creating aproximation highly inaccurate")
d4 = p05xy - 1e-10
try:
phi = np.arccos(d4 / p05xy)
except:
print("[ERROR] Division by zero: " + str(p05xy))
return None
theta[0, 0:4] = np.radians(90) + psi + phi
theta[0, 4:8] = np.radians(90) + psi - phi
theta = np.real(theta)
# Calculating theta5
cols = np.array([0, 4])
for i in range(0, cols.size):
c = cols[i];
try:
T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))
except:
print("[ERROR] Could not find inverse: " + str(self._DH(a1, alpha1, d1, theta[0,c])))
return None
T16 = T10 * gd
p16z = T16[2,3]
try:
if (((p16z-d4)/d6) > 1):
print ("[WARNING] No solution for Theta5: (p16z-d4)/d6) > 1")
print ("[WARNING] Creating aproximation highly inaccurate")
d6 = (p16z-d4) + 1e-10
t5 = np.arccos((p16z-d4)/d6)
except:
print("[ERROR] Division by zero: " + str(d6))
return None
theta[4, c:c+2] = t5
theta[4, c+2:c+4] = -t5
theta = np.real(theta)
# Calculating theta6
cols = np.array([0, 2, 4, 6])
for i in range(0, cols.size):
c = cols[i]
T01 = self._DH(a1, alpha1, d1, theta[0,c])
try:
T61 = inv(gd) * T01
except:
print("[ERROR] Could not find inverse: " + str(gd))
return None
T61zy = T61[1, 2]
T61zx = T61[0, 2]
t5 = theta[4, c]
if (np.sin(t5) == 0):
theta[5, c:c+2] = 0
else:
theta[5, c:c+2] = np.arctan2(-T61zy/np.sin(t5), T61zx/np.sin(t5))
theta = np.real(theta)
# Calculating theta3
cols = np.array([0, 2, 4, 6])
for i in range (0, cols.size):
c = cols[i]
try:
T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))
T65 = inv(self._DH(a6, alpha6, d6, theta[5,c]))
T54 = inv(self._DH(a5, alpha5, d5, theta[4,c]))
except T10:
print("[ERROR] Could not find inverse: Theta3, inverse 1, " + str(T10))
return None
except T65:
print("[ERROR] Could not find inverse: Theta3, inverse 2, " + str(T65))
return None
except T54:
print("[ERROR] Could not find inverse: Theta3, inverse 3, " + str(T54))
return None
T14 = T10 * gd * T65 * T54
p13 = T14 * np.mat([[0], [-d4], [0], [1]])
p13 = p13 - np.mat([[0], [0], [0], [1]])
p13norm2 = norm(p13) * norm(p13)
arg = (p13norm2-a2*a2-a3*a3)/(2*a2*a3)
if (arg > 1 or arg < -1):
print ("[WARNING] No solution for Theta3: arg < -1 or arg > 1")
print ("[WARNING] Creating aproximation highly inaccurate")
if (arg >1):
arg = 1 - 1e-10
else:
arg = -1 + 1e-10
t3p = np.arccos(arg)
theta[2, c] = t3p
theta[2, c+1] = -t3p
theta = np.real(theta)
# Calculating theta2 and theta4
cols = np.array([0, 1, 2, 3, 4, 5, 6, 7])
for i in range (0, cols.size):
c = cols[i]
try:
T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))
T65 = inv(self._DH(a6, alpha6, d6, theta[5,c]))
T54 = inv(self._DH(a5, alpha5, d5, theta[4,c]))
except T10:
print("[ERROR] Could not find inverse: Theta2 inverse 1, " + str(T10))
return None
except T65:
print("[ERROR] Could not find inverse: Theta2, inverse 2, " + str(T65))
return None
except T54:
print("[ERROR] Could not find inverse: Theta2, inverse 3, " + str(T54))
return None
T14 = T10 * gd * T65 * T54
p13 = T14 * np.mat([[0], [-d4], [0], [1]]) - np.mat([[0], [0], [0], [1]])
p13norm = norm(p13)
theta[1, c] = -np.arctan2(p13[1], -p13[0])+np.arcsin(a3*np.sin(theta[2,c])/p13norm)
try:
T32 = inv(self._DH(a3, alpha3, d3, theta[2,c]))
T21 = inv(self._DH(a2, alpha2, d2, theta[1,c]))
except T10:
print("[ERROR] Could not find inverse: Theta4 inverse 1, " + str(T32))
return None
except T65:
print("[ERROR] Could not find inverse: Theta4, inverse 2, " + str(T21))
return None
T34 = T32 * T21 * T14;
theta[3, c] = np.arctan2(T34[1,0], T34[0,0])
theta = np.real(theta)
for i in range (0, 5):
for j in range(0,7):
if theta[i,j] > np.pi:
theta[i,j] -= 2*np.pi
elif theta[i,j] < -np.pi:
theta[i,j] += 2*np.pi
return theta
# _analytic_ur5_inverse_kinematics retorna a matriz 6x8 com as 8 possiveis posições de 6 angulos dos motores que inferem na posição atual do UR5
## Cálcula a matriz Jacobiana da relação entre juntas e vetor de pose.
# @param self O ponteiro do objeto.
# @param q_Past Um vetor de juntas inicial a ser aplicado a derivada.
# @param deltaTheta Um vetor de diferença de juntas em um tempo infinitesimal para o cálculo de derivada.
def jacobian(self, q_Past, deltaTheta, rpy = False):
jacobian_matrix = np.zeros((6,6))
FK_init = self.ur5_direct_kinematics(np.squeeze(np.asarray(q_Past.transpose() + self.delta_standard_DH[3,:])), vector = True, rpy = rpy)
step = deltaTheta
NaN_check = False
for i in range(0,6):
q_aux = np.array([[0],[0],[0],[0],[0],[0]], float)
q_aux[i] += step[i]
q_aux = q_Past + q_aux
q_aux = np.squeeze(np.asarray(q_aux.transpose() + self.delta_standard_DH[3,:]))
FK_next = self.ur5_direct_kinematics(q_aux, vector = True, rpy = rpy)
jacobian_matrix[i,:] = (tf.computeDifference(FK_next, FK_init)/(step[i]))
if(np.any(np.isnan(jacobian_matrix[i,:]))):
jacobian_matrix[i,:] = np.zeros(6)
NaN_check = True
if(NaN_check):
print("[WARNING] NaN found on Jacobian.")
return jacobian_matrix.transpose()
def jacobian2(self, q):
jacobian_matrix = np.zeros((6,6))
# Atualiza as matrizes
self.ur5_direct_kinematics(np.squeeze(np.asarray(q.transpose() + self.delta_standard_DH[3,:])))
# R^0_{i-1}dot(0,0,1)cross(d^0_n - d^0_{i-1})
auxRow = np.array([[0],[0],[1]])
# Row 1
jacobian_matrix[0:3,0] = np.cross(np.dot(np.eye(3),auxRow),self._H[0:3,3],axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,0] = np.dot(np.eye(3),auxRow).transpose()
# Row 2
jacobian_matrix[0:3,1] = np.cross(np.dot(self._A_1[0:3,0:3],auxRow),(self._H[0:3,3] - self._A_1[0:3,3]),axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,1] = np.dot(self._A_1[0:3,0:3],auxRow).transpose()
# Row 3
aux = self._A_1 * self._A_2
jacobian_matrix[0:3,2] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,2] = np.dot(aux[0:3,0:3],auxRow).transpose()
# Row 4
aux = aux * self._A_3
jacobian_matrix[0:3,3] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,3] = np.dot(aux[0:3,0:3],auxRow).transpose()
# Row 5
aux = aux * self._A_4
jacobian_matrix[0:3,4] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,4] = np.dot(aux[0:3,0:3],auxRow).transpose()
# Row 6
aux = aux * self._A_5
jacobian_matrix[0:3,5] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,5] = np.dot(aux[0:3,0:3],auxRow).transpose()
return jacobian_matrix
def jacobianEndEffectorReference(self,jacobian):
fowardKinematics = self._H
jacobianTransform = np.eye(6)
#jacobianTransform[0:3,0:3] = fowardKinematics[0:3,0:3].transpose()
jacobianTransform[3:6,3:6] = fowardKinematics[0:3,0:3].transpose()
newJacobian = np.dot(jacobianTransform,jacobian)
return newJacobian
def jacobianAnalytic(self, q):
pose = self.ur5_direct_kinematics(np.squeeze(np.asarray(q.transpose() + self.delta_standard_DH[3,:])),vector = True, rpy = True)
jacobian = self.jacobian2(q)
jacobian = self.jacobianEndEffectorReference(jacobian)
# r = pose[3]
# p = pose[4]
# #y = pose[5]
# B = np.array([[1,0,np.sin(p)],[0,np.cos(r),-np.cos(p)*np.sin(r)],[0,np.sin(r),np.cos(p)*np.cos(r)]])
# invB = inv(B)
# auxMat = np.eye(6)
# auxMat[3:6,3:6] = invB
# jacobianAnalytic = np.dot(auxMat,jacobian)
#jacobianAnalytic = self.jacobianEndEffectorReference(jacobianAnalytic)
return jacobian
## Esse método realiza a cinemática inversa de uma posição espacial para uma das oito configurações possíveis no espaço utilizando aproximação numérica por Newton-Raphson.
# Ele retorna um vetor com as seis juntas que representam a configuração escolhida.
# @param self O ponteiro do objeto.
# @param cartesian_position Vetor [1x6] da posição a ser transformada.
# @param chosen_theta Configuração escolhida. Default = 2.
# @param theta Um parametro que pode ser usado como posição proxima inicial para aproximação numérica
# @param rpy Um parâmetro que especifica se a posição cartesiana dada foi em RV ou RPY.
def ur5_inverse_kinematics_newthon_raphson(self, cartesian_position, chosen_theta = 2, theta = np.zeros(6), rpy = False):
#t = time.clock()
if (rpy == True):
cartesian_position[3:6] = tf.rollPitchYaw2RotationVector(cartesian_position[3:6])
# A cinemática inversa analitica é inicialmente calculada
if (np.all(theta == 0)):
theta = self._analytic_ur5_inverse_kinematics(cartesian_position)
joint_analytic_IK = theta[:,chosen_theta]
else:
joint_analytic_IK = theta
NaN_check = np.isnan(joint_analytic_IK)
if (np.any(NaN_check)):
joint_analytic_IK = self.getJointPosition()
print ("[WARNING] Nan position found in analytic IK solution, using Actual Joint Position as start position.")
# O vetor de juntas inicial a ser corrigido numéricamente é escolhido
#print joint_analytic_IK
q_i = np.array([0,0,0,0,0,0], float)
q_i += joint_analytic_IK
joint_analytic_IK = joint_analytic_IK + self.delta_standard_DH[3,:]
joint_analytic_IK = np.squeeze(np.asarray(joint_analytic_IK))
FK = self.ur5_direct_kinematics(joint_analytic_IK, True)
# Transformação de RV para RPY é realizada para se iniciar o cálculo.
cartesian_position_rpy = cartesian_position
erro = tf.computeDifference(cartesian_position_rpy, FK)
norm_erro = norm(erro)
episilon = 0.0001*0.0001
max_iteractions = 500
iteraction = 1
q_i = np.array([[q_i[0]], [q_i[1]],[q_i[2]], [q_i[3]],[q_i[4]], [q_i[5]]])
erro = np.array([[erro[0]], [erro[1]],[erro[2]], [erro[3]],[erro[4]], [erro[5]]])
delta_theta = np.ones(6)*0.000006
delta_theta = np.array([[delta_theta[0]], [delta_theta[1]],[delta_theta[2]], [delta_theta[3]],[delta_theta[4]], [delta_theta[5]]])
while (norm_erro > episilon):
# Calcula
j = self.jacobian(q_i, delta_theta)
try:
jt = pinv(j)
except:
print("[WARNING] Pseudo Inverse with SVD diverged")
jt = np.dot(j.transpose(),inv(np.dot(j,j.transpose())))
q_in = np.array([[0],[0],[0],[0],[0],[0]], float)
q_in = q_i + np.dot(jt,erro)
delta_theta = q_in - q_i
q_i = np.array([[0],[0],[0],[0],[0],[0]], float)
q_i += q_in
q_i = np.squeeze(np.asarray(q_i.transpose()))
FK = self.ur5_direct_kinematics(np.squeeze(np.asarray(q_i + self.delta_standard_DH[3,:])), True)
erro = tf.computeDifference(cartesian_position_rpy, FK)
norm_erro = norm(erro)
erro = np.array([[erro[0]], [erro[1]],[erro[2]], [erro[3]],[erro[4]], [erro[5]]])
q_i = np.array([[q_i[0]], [q_i[1]],[q_i[2]], [q_i[3]],[q_i[4]], [q_i[5]]])
iteraction += 1
if (iteraction > max_iteractions):
print ("[ERROR] Maximum interactions reached.")
break
#t2 = time.clock()
#print ("Tempo de convergencia NRa: ", t2 - t)
q_i = q_i.transpose()
q_aux = np.array([q_i[0,0],q_i[0,1],q_i[0,2],q_i[0,3],q_i[0,4],q_i[0,5]], float)
return q_aux
## Esse método realiza a cinemática inversa de uma posição espacial para uma das oito configurações possíveis no espaço utilizando aproximação numérica por Cyclic Coordinate Descent.
# Ele retorna um vetor com as seis juntas que representam a configuração escolhida. Obs.: Lento.
# @param self O ponteiro do objeto.
# @param cartesian_position Vetor [1x6] da posição a ser transformada.
# @param chosen_theta Configuração escolhida. Default = 2.
def ur5_inverse_kinematics_ccd(self, cartesian_position, chosen_theta = 2):
# A cinemática inversa analitica é inicialmente calculada
t = time.clock()
theta = self._analytic_ur5_inverse_kinematics(cartesian_position)
# O vetor de juntas inicial a ser corrigido numéricamente é escolhido
joint_analytic_IK = theta[:,chosen_theta]
self._effective_q = joint_analytic_IK + self.delta_standard_DH[3,:]
Initial_DK = self.ur5_direct_kinematics(np.squeeze(np.asarray(self._effective_q.transpose())), True)
Initial_DK[3:6] = tf.rotationVector2RollPitchYaw(Initial_DK[3:6])
# Cyclic Coordinate Descent
cartesian_position_rpy = np.hstack((cartesian_position[0:3], tf.rotationVector2RollPitchYaw(cartesian_position[3:6])))
# Constantes a serem utilizadas
epsilon = 0.0001
quad_epsilon = epsilon*epsilon
joint_count = 5
max_interection = 5000
interection_count = 1
interection_count_joint = 1
direction = 1
min_step = 0.000017
max_step = 0.1
alpha_step = max_step
Radius = np.sqrt(cartesian_position[0:3].transpose()*cartesian_position[0:3])
joint_interact = np.zeros(6)
joint_interact += joint_analytic_IK
# Erros Iniciais
Error_Position = cartesian_position[0:3] - Initial_DK[0:3]
Mean_Position = np.mean(np.dot(Error_Position.transpose(),Error_Position))
Error_Rotation = tf.computeDifference(cartesian_position_rpy[3:6],Initial_DK[3:6], True)
Linear_Rotation_Error = Radius*Error_Rotation
Mean_Rotation = np.mean(np.dot(Linear_Rotation_Error,Linear_Rotation_Error.transpose()))
erro_quad = (Mean_Position + Mean_Rotation)/2
erro_quad_aux = erro_quad
# Correção numérica.
while erro_quad > quad_epsilon:
joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step
self._effective_q = joint_interact + self.delta_standard_DH[3,:]
DK = self.ur5_direct_kinematics(np.squeeze(np.asarray(self._effective_q.transpose())), True)
DK[3:6] = rotationVector2RollPitchYaw(DK[3:6])
Error_Position = cartesian_position[0:3] - DK[0:3]
Mean_Position = np.mean(np.dot(Error_Position.transpose(),Error_Position))
Error_Rotation = computeDifference(cartesian_position_rpy[3:6],DK[3:6], True)
Linear_Rotation_Error = Radius*Error_Rotation
Mean_Rotation = np.mean(np.dot(Linear_Rotation_Error,Linear_Rotation_Error.transpose()))
erro_quad = (Mean_Position + Mean_Rotation)/2
if erro_quad > erro_quad_aux:
if interection_count_joint == 1:
direction = -1*direction
joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step
interection_count_joint = 0
error_direction = erro_quad
else:
if alpha_step > min_step:
joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step
alpha_step = alpha_step/2
interection_count_joint = 1
else:
joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step
alpha_step = max_step
interection_count_joint = 1
joint_count -=1
if joint_count < 0:
joint_count = 5
interection_count +=1
else:
alpha_step = alpha_step/2
interection_count_joint = 1
erro_quad_aux = erro_quad
#if interection_count_joint == 1:
#if erro_quad < erro_quad_aux:
#erro_quad_aux = erro_quad
#interection_count_joint += 1
#joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step
#alpha_step = alpha_step/2
#else:
#direction = -1*direction
#joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step
#interection_count_joint += 1
#else:
#if erro_quad < erro_quad_aux:
#erro_quad_aux = erro_quad
#interection_count_joint += 1
#joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step
#alpha_step = alpha_step/2
#else:
#if (alpha_step < 0.000017)
#joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step
#alpha_step = alpha_step*2
#joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step
#alpha_step = np.pi
#interection_count_joint = 1
#joint_count -=1
#if joint_count < 0:
#joint_count = 5
#interection_count +=1
#else:
#joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step
#interection_count_joint = 1
#joint_count -=1
#if joint_count < 0:
#joint_count = 5
#interection_count +=1
if interection_count > max_interection:
print ("[ERROR] Maximum interations reached.")
break
t2 = time.clock()
print ("[INFO] CCD Total time: "+ str(t2 - t))
return joint_interact
def getMeanValueVector(self, vectorArray):
print("[INFO] Mean Value: Array, Mean, " + str(vectorArray) + ", " + str(np.mean(vectorArray, axis = 0, dtype=np.float64)))
def controlLoopTranspose(self, desiredPose, poseActual = None):
if (poseActual == None):
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
poseActualFK = tf.pose2Matrix(poseActual)
desiredPoseFK = tf.pose2Matrix(desiredPose)
poseError = desiredPose[0:3] - poseActual[0:3]
rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)
if np.any(np.isnan(rotationError)):
np.nan_to_num(rotationError, False)
error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]
self.normErro = norm(poseError)
self.errorDB.append(error)
jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-3)[np.newaxis].transpose())
# Control
K = 0.5*np.eye(6,6)
jointControl = np.dot(np.dot(jacob.transpose(),K),error.transpose())
return np.squeeze(np.asarray(jointControl))
def controlLoopPseudoInverse(self, desiredPose, poseActual = None):
if (poseActual == None):
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
poseActualFK = tf.pose2Matrix(poseActual)
desiredPoseFK = tf.pose2Matrix(desiredPose)
poseError = desiredPose[0:3] - poseActual[0:3]
rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)
if np.any(np.isnan(rotationError)):
np.nan_to_num(rotationError, False)
error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]
self.normErro = norm(poseError)
self.errorDB.append(error)
jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-3)[np.newaxis].transpose())
# Control
K = 0.5*np.eye(6,6)
jointControl = np.dot(np.dot(pinv(jacob),K),error.transpose())
return np.squeeze(np.asarray(jointControl))
def controlLoopInverse(self, desiredPose, poseActual = None):
if (poseActual == None):
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
poseActualFK = tf.pose2Matrix(poseActual)
desiredPoseFK = tf.pose2Matrix(desiredPose)
poseError = desiredPose[0:3] - poseActual[0:3]
rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)
if np.any(np.isnan(rotationError)):
np.nan_to_num(rotationError, False)
error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]
self.normErro = norm(poseError)
self.errorDB.append(error)
jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-6)[np.newaxis].transpose())
# Control
K = 0.5*np.eye(6,6)
jointControl = np.dot(np.dot(inv(jacob),K),error.transpose())
return np.squeeze(np.asarray(jointControl))
def controlLoopDLS(self, desiredPose, poseActual = None, step = 0.008, jointSpeedReference = np.array([0, 0, 0, 0, 0, 0]), cartesianSpeedReference = np.array([0, 0, 0, 0, 0, 0])):
if (poseActual == None):
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
#print(self.getPosition())
#print(self.getJointPosition())
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
poseActualFK = tf.pose2Matrix(poseActual)
desiredPoseFK = tf.pose2Matrix(desiredPose)
poseError = desiredPose[0:3] - poseActual[0:3]
rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)
if np.any(np.isnan(rotationError)):
print('[INFO][ControlLoopDLS] NaN found on control')
np.nan_to_num(rotationError, False)
# Error Calculation
#Kp
error = np.hstack((poseError, rotationError))
#Kd
error_D = (error - self.errorPrevious)/step
self.error_D_DB.append(error_D)
self.errorPrevious = error
errorFiltered = butter_lowpass_filter(np.asarray(self.error_D_DB, dtype=np.float32), 3, 125, order=2)
error_D = errorFiltered[errorFiltered.shape[0]-1]
#Ki
self.errorSum = self.errorSum + error
# for i in range(0,6):
# if (self.errorSum[i] > 0.1):
# self.errorSum[i] = 0.1
# elif(self.errorSum[i] < -0.1):
# self.errorSum[i] = -0.1
# print('Error Sum ' + str(self.errorSum))
# if (len(self.errorDB) > 1000):
# self.errorSum = self.errorSum - np.asarray(self.errorDB[len(self.errorDB) - 1000], dtype=np.float32)
#DB
self.normErro = norm(poseError)
self.errorDB.append(error)
#jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-6)[np.newaxis].transpose(), rpy = True)
#jacob = self.jacobian2(self.getJointPosition())
jacob = self.jacobianAnalytic(self.getJointPosition())
# Control
Kp = 5*np.eye(6,6) #10 #5
# Kp[0,0] = 1.5
# Kp[1,1] = 1.5
# Kp[2,2] = 1.5
# Kp[0,3] = 0.2#0.5
# Kp[0,4] = 0.1#0.5
# Kp[0,5] = 0.1#0.5
# Kp[1,3] = 0#0.5
# Kp[1,4] = 0#0.5
# Kp[1,5] = 0#0.5
# Kp[2,3] = 0#0.5
# Kp[2,4] = 0#0.5
# Kp[2,5] = 0#0.5
#Kp[3,3] = 16#0.5
# Kp[3,4] = 0#0.5
# Kp[3,5] = 0#0.5
# Kp[4,3] = 0#0.5
#Kp[4,4] = 16#0.5
# Kp[4,5] = 0#0.5
# Kp[5,3] = 0#0.5
# Kp[5,4] = 0#0.5
#Kp[5,5] = 16#0.5
Kd = 2*np.eye(6,6)
# Kd[3,3] = 0.1
# Kd[4,4] = 0.1
# Kd[5,5] = 0.1
Ki = 0.25*np.eye(6,6)
# Ki[3,3] = 0.00055 #0.55
# Ki[4,4] = 0.00055
# Ki[5,5] = 0.00055
# WindupUpperLimit = np.array([0.15, 0.15, 0.15, 0.15, 0.15, 0.15])
# WindupLowerLimit = -np.array([0.15, 0.15, 0.15, 0.15, 0.15, 0.15])
k0 = 0.01
w0 = 0.01
KpControl = np.dot(Kp,error.transpose())
KdControl = np.dot(Kd,error_D.transpose())
KiControl = np.dot(Ki,self.errorSum.transpose())
# print(KiControl)
# print('\n')
# for i in range(0,6):
# if (KiControl[i] > 0.02):
# KiControl[i] = 0.02
# elif(KiControl[i] < -0.02):
# KiControl[i] = -0.02
ControlSum = KpControl + cartesianSpeedReference #+ KiControl
t1 = time.perf_counter()
w = np.sqrt(np.linalg.det(np.dot(jacob,jacob.transpose())))
if (w < w0):
lamb = k0*(np.power((1 - (w/w0)),2))
print('[WARNING] Near Singularity: ' + str(w))
else:
lamb = 0
lamb2 = lamb*np.eye(6,6)
invJacob = np.dot(jacob.transpose(),inv(np.dot(jacob,jacob.transpose()) + lamb2))
t2 = time.perf_counter()
#t1 = time.perf_counter()
#invJacob = inv(jacob)
#t2 = time.perf_counter()
JacobianProcessTime = t2 - t1
self.processTimeList.append(JacobianProcessTime)
self.wDB.append(w)
#invJacob = jacob.transpose()
jointControl = np.dot(invJacob,ControlSum) #np.dot(np.dot(np.dot(jacob.transpose(),inv(np.dot(jacob,jacob.transpose()) + lamb2)),Kp),error.transpose())
#jointControl = jointControl + jointSpeedReference
# for i in range(0,6):
# if (jointControl[i] > WindupUpperLimit[i]):
# self.u[i] = WindupUpperLimit[i]
# elif(jointControl[i] < WindupLowerLimit[i]):
# self.u[i] = WindupLowerLimit[i]
# else:
# self.u[i] = jointControl[i]
# self.errorSaturation = jointControl - self.u
# print(self.errorSaturation)
# print('Error Sum windup' + str((np.dot(jacob,jointControl) - KpControl)/Ki[0,0]))
# for i in range(0,6):
# if (jointControl[i] > 0.4):
# jointControl[i] = 0.4
# elif (jointControl[i] < -0.4):
# jointControl[i] = -0.4
return np.squeeze(np.asarray(jointControl))
def speedTransform(self, desiredSpeed, q = None, step = 0.008):
if (q == None):
q = self.getJointPosition()
#jacobian = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-6)[np.newaxis].transpose(), rpy = True)
#jacobian = self.jacobian2(q)
jacobian = self.jacobianAnalytic(q)
jointSpeed = np.dot(inv(jacobian),desiredSpeed.transpose())
return jointSpeed
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = lfilter(b, a, data)
return y | 46.256866 | 222 | 0.524412 |
py as np
from numpy.linalg import inv
from numpy.linalg import norm
from numpy.linalg import pinv
from scipy.signal import butter,lfilter
from scipy.signal import freqz
import struct
import time
import csv
import Transformations as tf
import os
, 1.570796327, -1.570796327, 0], [.089159,0,0,.10915,.09465,.0823], [0, 0, 0, 0, 0, 0]])
_robot_data = []
_data_pack_max = 133
processTimeList = []
errorDB = []
error_D_DB = []
wDB = []
u = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)
errorSaturation = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)
errorPrevious = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)
errorSum = np.array([0, 0, 0, 0, 0, 0],dtype=np.float64)
normErro = np.zeros(6,dtype=np.float64)
def __init__(self, delta_DH = np.zeros((5,6))):
self.delta_standard_DH = delta_DH
self._effective_a = self._standard_DH[0,:] + self.delta_standard_DH[0,:]
self._effective_alpha = self._standard_DH[1,:] + self.delta_standard_DH[1,:]
self._effective_d = self._standard_DH[2,:] + self.delta_standard_DH[2,:]
self._effective_q = np.array(self._standard_DH[3,:] + self.delta_standard_DH[3,:])
Rot_x_1 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,0]), -np.sin(self._effective_alpha[0,0]), 0], [0, np.sin(self._effective_alpha[0,0]), np.cos(self._effective_alpha[0,0]), 0], [ 0, 0, 0, 1]])
Rot_x_2 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,1]), -np.sin(self._effective_alpha[0,1]), 0], [0, np.sin(self._effective_alpha[0,1]), np.cos(self._effective_alpha[0,1]), 0], [ 0, 0, 0, 1]])
Rot_x_3 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,2]), -np.sin(self._effective_alpha[0,2]), 0], [0, np.sin(self._effective_alpha[0,2]), np.cos(self._effective_alpha[0,2]), 0], [ 0, 0, 0, 1]])
Rot_x_4 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,3]), -np.sin(self._effective_alpha[0,3]), 0], [0, np.sin(self._effective_alpha[0,3]), np.cos(self._effective_alpha[0,3]), 0], [ 0, 0, 0, 1]])
Rot_x_5 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,4]), -np.sin(self._effective_alpha[0,4]), 0], [0, np.sin(self._effective_alpha[0,4]), np.cos(self._effective_alpha[0,4]), 0], [ 0, 0, 0, 1]])
Rot_x_6 = np.mat([[1, 0, 0, 0], [0, np.cos(self._effective_alpha[0,5]), -np.sin(self._effective_alpha[0,5]), 0], [0, np.sin(self._effective_alpha[0,5]), np.cos(self._effective_alpha[0,5]), 0], [ 0, 0, 0, 1]])
Trans_d_1 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,0]], [0, 0, 0, 1]])
Trans_d_2 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,1]], [0, 0, 0, 1]])
Trans_d_3 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,2]], [0, 0, 0, 1]])
Trans_d_4 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,3]], [0, 0, 0, 1]])
Trans_d_5 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,4]], [0, 0, 0, 1]])
Trans_d_6 = np.mat([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, self._effective_d[0,5]], [0, 0, 0, 1]])
Trans_a_1 = np.mat([[1, 0, 0, self._effective_a[0,0]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Trans_a_2 = np.mat([[1, 0, 0, self._effective_a[0,1]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Trans_a_3 = np.mat([[1, 0, 0, self._effective_a[0,2]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Trans_a_4 = np.mat([[1, 0, 0, self._effective_a[0,3]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Trans_a_5 = np.mat([[1, 0, 0, self._effective_a[0,4]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
Trans_a_6 = np.mat([[1, 0, 0, self._effective_a[0,5]], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
self._A_0_1 = Trans_d_1 * Trans_a_1 * Rot_x_1
self._A_0_2 = Trans_d_2 * Trans_a_2 * Rot_x_2
self._A_0_3 = Trans_d_3 * Trans_a_3 * Rot_x_3
self._A_0_4 = Trans_d_4 * Trans_a_4 * Rot_x_4
self._A_0_5 = Trans_d_5 * Trans_a_5 * Rot_x_5
self._A_0_6 = Trans_d_6 * Trans_a_6 * Rot_x_6
return
size = len(data)
self._robot_data = []
self._robot_data.append(struct.unpack('!i', data[0:4]))
i = 4
while i < size:
self._robot_data.append(struct.unpack('!d', data[i:i+8])[0])
i += 8
if (size < (4+(34*8))):
print("[WARNING] Data size smaller than expected. Bytes: " + str(size))
return
self._effective_q = np.array(self._robot_data[32:38]) + self.delta_standard_DH[3,:]
return
def setRobotDataRTDE(self, data):
self._robot_data[1] = np.asarray(data.timestamp, dtype = np.float64)
self._robot_data[2:8] = np.asarray(data.target_q, dtype = np.float64)
self._robot_data[8:14] = np.asarray(data.target_qd, dtype = np.float64)
self._robot_data[32:38] = np.asarray(data.actual_q, dtype = np.float64)
self._robot_data[38:44] = np.asarray(data.actual_qd, dtype = np.float64)
self._robot_data[56:62] = np.asarray(data.actual_TCP_pose, dtype = np.float64)
self._robot_data[62:68] = np.asarray(data.actual_TCP_speed, dtype = np.float64)
self._robot_data[68:74] = np.asarray(data.actual_TCP_force, dtype = np.float64)
self._robot_data[74:80] = np.asarray(data.target_TCP_pose, dtype = np.float64)
self._robot_data[80:86] = np.asarray(data.target_TCP_speed, dtype = np.float64)
self._robot_data[102] = np.asarray(data.safety_mode, dtype = np.int32)
self._robot_data[132] = np.asarray(data.runtime_state, dtype = np.uint32)
q = np.asarray(data.actual_q)
self._effective_q = q + self.delta_standard_DH[3,:]
return
if self._robot_data[102] == 1:
safety = True
else:
safety = False
return safety
except:
print("[ERROR] Could not find Robot Data!")
return None
if self._robot_data[132] == 1:
state = True
else:
state = False
return state
except:
print("[ERROR] Could not find Robot Data!")
return None
if size == self._datapackmax:
print("[INFO] Message Size in Bytes: " + str(self._robot_data[0]))
print("[INFO] Time: " + str(self._robot_data[1]))
print("[INFO] q target" + str(self._robot_data[2:8]))
print("[INFO] qd target" + str(self._robot_data[8:14]))
print("[INFO] qdd target" + str(self._robot_data[14:20]))
print("[INFO] I target" + str(self._robot_data[20:26]))
print("[INFO] M target" + str(self._robot_data[26:32]))
print("[INFO] q actual" + str(self._robot_data[32:38]))
print("[INFO] qd actual" + str(self._robot_data[38:44]))
print("[INFO] I actual" + str(self._robot_data[44:50]))
print("[INFO] I control" + str(self._robot_data[50:56]))
print("[INFO] Tool Vector Actual" + str(self._robot_data[56:62]))
print("[INFO] TCP Speed Actual" + str(self._robot_data[62:68]))
print("[INFO] TCP Force" + str(self._robot_data[68:74]))
print("[INFO] Tool Vector Target" + str(self._robot_data[74:80]))
print("[INFO] TCP Speed Target" + str(self._robot_data[80:86]))
print("[INFO] digital input bits" + str(self._robot_data[86]))
print("[INFO] Motor Temperatures" + str(self._robot_data[87:93]))
print("[INFO] Controller Timer" + str(self._robot_data[93]))
print("[INFO] Test Value" + str(self._robot_data[94]))
print("[INFO] Robot Mode" + str(self._robot_data[95]))
print("[INFO] Joint Modes" + str(self._robot_data[96:102]))
print("[INFO] Safety Mode" + str(self._robot_data[102]))
print("[INFO] Tool Acceleration Values" + str(self._robot_data[109:112]))
print("[INFO] Speed Scaling" + str(self._robot_data[118]))
print("[INFO] Linear Momentum Norm" + str(self._robot_data[119]))
print("[INFO] V Main" + str(self._robot_data[122]))
print("[INFO] V Robot" + str(self._robot_data[123]))
print("[INFO] I Robot" + str(self._robot_data[124]))
print("[INFO] V actual" + str(self._robot_data[125:131]))
print("[INFO] Digital Outputs" + str(self._robot_data[131]))
print("[INFO] Program State" + str(self._robot_data[132]))
else:
print("[WARNING] Size of data smaller than expected: ", size)
return
ay = np.array(self._robot_data[74:80])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
def getPosition(self):
try:
array = np.array(self._robot_data[56:62])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
elf._robot_data[62:68])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
rray(self._robot_data[80:86])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
v = np.sqrt(self._robot_data[62]*self._robot_data[62] + self._robot_data[63]*self._robot_data[63] + self._robot_data[64]*self._robot_data[64])
return v
except:
print("[ERROR] Could not find Robot Data!")
return None
try:
array = np.array(self._robot_data[32:38])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
try:
array = np.array(self._robot_data[2:8])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
ry:
array = np.array(self._robot_data[38:44])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
try:
array = np.array(self._robot_data[8:14])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
def getTCPForce(self):
try:
array = np.array(self._robot_data[68:74])
return array
except:
print("[ERROR] Could not find Robot Data!")
return None
rn self._robot_data[1]
ply_offset == True):
q = np.squeeze(np.asarray(q + self.delta_standard_DH[3,:]))
_rot_z_1 = np.mat([[np.cos(q[0]), -np.sin(q[0]), 0, 0],[np.sin(q[0]), np.cos(q[0]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
_rot_z_2 = np.mat([[np.cos(q[1]), -np.sin(q[1]), 0, 0],[np.sin(q[1]), np.cos(q[1]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
_rot_z_3 = np.mat([[np.cos(q[2]), -np.sin(q[2]), 0, 0],[np.sin(q[2]), np.cos(q[2]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
_rot_z_4 = np.mat([[np.cos(q[3]), -np.sin(q[3]), 0, 0],[np.sin(q[3]), np.cos(q[3]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
_rot_z_5 = np.mat([[np.cos(q[4]), -np.sin(q[4]), 0, 0],[np.sin(q[4]), np.cos(q[4]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
_rot_z_6 = np.mat([[np.cos(q[5]), -np.sin(q[5]), 0, 0],[np.sin(q[5]), np.cos(q[5]), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
self._A_1 = _rot_z_1 * self._A_0_1
self._A_2 = _rot_z_2 * self._A_0_2
self._A_3 = _rot_z_3 * self._A_0_3
self._A_4 = _rot_z_4 * self._A_0_4
self._A_5 = _rot_z_5 * self._A_0_5
self._A_6 = _rot_z_6 * self._A_0_6
self._H = self._A_1 * self._A_2 * self._A_3 * self._A_4 * self._A_5 * self._A_6
if (vector == False):
return self._H
else:
vetor = tf.matrix2RotationVector(self._H[0:3,0:3])
array = np.array([self._H[0,3], self._H[1,3], self._H[2,3]], float)
vetor = np.hstack((array,vetor))
if (rpy == False):
return vetor
else:
vetor[3:6] = tf.rotationVector2RollPitchYaw(vetor[3:6])
return vetor
def verifyDelta(self, epsilon = 10e-6):
direct = self.ur5_direct_kinematics(self.getJointPosition(), vector = True, apply_offset = True)
real = self.getPosition()
diff = tf.computeDifference(real,direct)
print("[INFO] Direct Kinematics calculated with Delta: " + str(direct))
print("[INFO] Direct Kinematics real: " + str(real))
error = norm(diff[0:3])
print("[INFO] Error: ", error)
if (error < epsilon):
print("[INFO] Correct Delta Matrix!")
return True
else:
print("[WARNING] Incorrect Delta Matrix!")
return False
def _DH(self, a, alpha, d, theta):
Td = np.asmatrix(np.eye(4))
Td[2,3] = d
Ta = np.asmatrix(np.eye(4))
Ta[0,3] = a
Rtheta = tf.Rot_z(theta)
Rtheta = np.mat([[Rtheta[0,0], Rtheta[0,1], Rtheta[0,2], 0], [Rtheta[1,0], Rtheta[1,1], Rtheta[1,2], 0], [Rtheta[2,0], Rtheta[2,1], Rtheta[2,2], 0], [0,0,0,1]])
Ralpha = tf.Rot_x(alpha)
Ralpha = np.mat([[Ralpha[0,0], Ralpha[0,1], Ralpha[0,2], 0], [Ralpha[1,0], Ralpha[1,1], Ralpha[1,2], 0], [Ralpha[2,0], Ralpha[2,1], Ralpha[2,2], 0], [0,0,0,1]])
G = Td * Rtheta * Ta * Ralpha
return G
def _analytic_ur5_inverse_kinematics(self, p):
rvMatrix = tf.rotationVector2Matrix(p[3:6])
gd = np.mat(([[rvMatrix[0,0], rvMatrix[0,1], rvMatrix[0,2], p[0]], [rvMatrix[1,0], rvMatrix[1,1], rvMatrix[1,2], p[1]], [rvMatrix[2,0], rvMatrix[2,1], rvMatrix[2,2], p[2]], [0, 0, 0, 1]]))
theta = np.zeros((6, 8))
d1 = self._standard_DH[2,0]
d2 = self._standard_DH[2,1]
d3 = self._standard_DH[2,2]
d4 = self._standard_DH[2,3]
d5 = self._standard_DH[2,4]
d6 = self._standard_DH[2,5]
a1 = self._standard_DH[0,0]
a2 = self._standard_DH[0,1]
a3 = self._standard_DH[0,2]
a4 = self._standard_DH[0,3]
a5 = self._standard_DH[0,4]
a6 = self._standard_DH[0,5]
alpha1 = self._standard_DH[1,0]
alpha2 = self._standard_DH[1,1]
alpha3 = self._standard_DH[1,2]
alpha4 = self._standard_DH[1,3]
alpha5 = self._standard_DH[1,4]
alpha6 = self._standard_DH[1,5]
p05 = gd * np.mat([[0], [0], [-d6], [1]])
p05 = p05 - np.mat([[0], [0], [0], [1]])
psi = np.arctan2(p05[1], p05[0])
p05xy = np.sqrt(p05[1]*p05[1] + p05[0]*p05[0])
if (d4 > p05xy):
print ("[WARNING] No solution for Theta1: d4 > P05xy")
print ("[WARNING] Creating aproximation highly inaccurate")
d4 = p05xy - 1e-10
try:
phi = np.arccos(d4 / p05xy)
except:
print("[ERROR] Division by zero: " + str(p05xy))
return None
theta[0, 0:4] = np.radians(90) + psi + phi
theta[0, 4:8] = np.radians(90) + psi - phi
theta = np.real(theta)
cols = np.array([0, 4])
for i in range(0, cols.size):
c = cols[i];
try:
T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))
except:
print("[ERROR] Could not find inverse: " + str(self._DH(a1, alpha1, d1, theta[0,c])))
return None
T16 = T10 * gd
p16z = T16[2,3]
try:
if (((p16z-d4)/d6) > 1):
print ("[WARNING] No solution for Theta5: (p16z-d4)/d6) > 1")
print ("[WARNING] Creating aproximation highly inaccurate")
d6 = (p16z-d4) + 1e-10
t5 = np.arccos((p16z-d4)/d6)
except:
print("[ERROR] Division by zero: " + str(d6))
return None
theta[4, c:c+2] = t5
theta[4, c+2:c+4] = -t5
theta = np.real(theta)
cols = np.array([0, 2, 4, 6])
for i in range(0, cols.size):
c = cols[i]
T01 = self._DH(a1, alpha1, d1, theta[0,c])
try:
T61 = inv(gd) * T01
except:
print("[ERROR] Could not find inverse: " + str(gd))
return None
T61zy = T61[1, 2]
T61zx = T61[0, 2]
t5 = theta[4, c]
if (np.sin(t5) == 0):
theta[5, c:c+2] = 0
else:
theta[5, c:c+2] = np.arctan2(-T61zy/np.sin(t5), T61zx/np.sin(t5))
theta = np.real(theta)
cols = np.array([0, 2, 4, 6])
for i in range (0, cols.size):
c = cols[i]
try:
T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))
T65 = inv(self._DH(a6, alpha6, d6, theta[5,c]))
T54 = inv(self._DH(a5, alpha5, d5, theta[4,c]))
except T10:
print("[ERROR] Could not find inverse: Theta3, inverse 1, " + str(T10))
return None
except T65:
print("[ERROR] Could not find inverse: Theta3, inverse 2, " + str(T65))
return None
except T54:
print("[ERROR] Could not find inverse: Theta3, inverse 3, " + str(T54))
return None
T14 = T10 * gd * T65 * T54
p13 = T14 * np.mat([[0], [-d4], [0], [1]])
p13 = p13 - np.mat([[0], [0], [0], [1]])
p13norm2 = norm(p13) * norm(p13)
arg = (p13norm2-a2*a2-a3*a3)/(2*a2*a3)
if (arg > 1 or arg < -1):
print ("[WARNING] No solution for Theta3: arg < -1 or arg > 1")
print ("[WARNING] Creating aproximation highly inaccurate")
if (arg >1):
arg = 1 - 1e-10
else:
arg = -1 + 1e-10
t3p = np.arccos(arg)
theta[2, c] = t3p
theta[2, c+1] = -t3p
theta = np.real(theta)
cols = np.array([0, 1, 2, 3, 4, 5, 6, 7])
for i in range (0, cols.size):
c = cols[i]
try:
T10 = inv(self._DH(a1, alpha1, d1, theta[0,c]))
T65 = inv(self._DH(a6, alpha6, d6, theta[5,c]))
T54 = inv(self._DH(a5, alpha5, d5, theta[4,c]))
except T10:
print("[ERROR] Could not find inverse: Theta2 inverse 1, " + str(T10))
return None
except T65:
print("[ERROR] Could not find inverse: Theta2, inverse 2, " + str(T65))
return None
except T54:
print("[ERROR] Could not find inverse: Theta2, inverse 3, " + str(T54))
return None
T14 = T10 * gd * T65 * T54
p13 = T14 * np.mat([[0], [-d4], [0], [1]]) - np.mat([[0], [0], [0], [1]])
p13norm = norm(p13)
theta[1, c] = -np.arctan2(p13[1], -p13[0])+np.arcsin(a3*np.sin(theta[2,c])/p13norm)
try:
T32 = inv(self._DH(a3, alpha3, d3, theta[2,c]))
T21 = inv(self._DH(a2, alpha2, d2, theta[1,c]))
except T10:
print("[ERROR] Could not find inverse: Theta4 inverse 1, " + str(T32))
return None
except T65:
print("[ERROR] Could not find inverse: Theta4, inverse 2, " + str(T21))
return None
T34 = T32 * T21 * T14;
theta[3, c] = np.arctan2(T34[1,0], T34[0,0])
theta = np.real(theta)
for i in range (0, 5):
for j in range(0,7):
if theta[i,j] > np.pi:
theta[i,j] -= 2*np.pi
elif theta[i,j] < -np.pi:
theta[i,j] += 2*np.pi
return theta
eta, rpy = False):
jacobian_matrix = np.zeros((6,6))
FK_init = self.ur5_direct_kinematics(np.squeeze(np.asarray(q_Past.transpose() + self.delta_standard_DH[3,:])), vector = True, rpy = rpy)
step = deltaTheta
NaN_check = False
for i in range(0,6):
q_aux = np.array([[0],[0],[0],[0],[0],[0]], float)
q_aux[i] += step[i]
q_aux = q_Past + q_aux
q_aux = np.squeeze(np.asarray(q_aux.transpose() + self.delta_standard_DH[3,:]))
FK_next = self.ur5_direct_kinematics(q_aux, vector = True, rpy = rpy)
jacobian_matrix[i,:] = (tf.computeDifference(FK_next, FK_init)/(step[i]))
if(np.any(np.isnan(jacobian_matrix[i,:]))):
jacobian_matrix[i,:] = np.zeros(6)
NaN_check = True
if(NaN_check):
print("[WARNING] NaN found on Jacobian.")
return jacobian_matrix.transpose()
def jacobian2(self, q):
jacobian_matrix = np.zeros((6,6))
self.ur5_direct_kinematics(np.squeeze(np.asarray(q.transpose() + self.delta_standard_DH[3,:])))
auxRow = np.array([[0],[0],[1]])
jacobian_matrix[0:3,0] = np.cross(np.dot(np.eye(3),auxRow),self._H[0:3,3],axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,0] = np.dot(np.eye(3),auxRow).transpose()
jacobian_matrix[0:3,1] = np.cross(np.dot(self._A_1[0:3,0:3],auxRow),(self._H[0:3,3] - self._A_1[0:3,3]),axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,1] = np.dot(self._A_1[0:3,0:3],auxRow).transpose()
aux = self._A_1 * self._A_2
jacobian_matrix[0:3,2] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,2] = np.dot(aux[0:3,0:3],auxRow).transpose()
aux = aux * self._A_3
jacobian_matrix[0:3,3] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,3] = np.dot(aux[0:3,0:3],auxRow).transpose()
aux = aux * self._A_4
jacobian_matrix[0:3,4] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,4] = np.dot(aux[0:3,0:3],auxRow).transpose()
aux = aux * self._A_5
jacobian_matrix[0:3,5] = np.cross(np.dot(aux[0:3,0:3],auxRow),(self._H[0:3,3] - aux[0:3,3]),axisa=0,axisb=0,axisc=1)
jacobian_matrix[3:6,5] = np.dot(aux[0:3,0:3],auxRow).transpose()
return jacobian_matrix
def jacobianEndEffectorReference(self,jacobian):
fowardKinematics = self._H
jacobianTransform = np.eye(6)
jacobianTransform[3:6,3:6] = fowardKinematics[0:3,0:3].transpose()
newJacobian = np.dot(jacobianTransform,jacobian)
return newJacobian
def jacobianAnalytic(self, q):
pose = self.ur5_direct_kinematics(np.squeeze(np.asarray(q.transpose() + self.delta_standard_DH[3,:])),vector = True, rpy = True)
jacobian = self.jacobian2(q)
jacobian = self.jacobianEndEffectorReference(jacobian)
return jacobian
py = False):
if (rpy == True):
cartesian_position[3:6] = tf.rollPitchYaw2RotationVector(cartesian_position[3:6])
if (np.all(theta == 0)):
theta = self._analytic_ur5_inverse_kinematics(cartesian_position)
joint_analytic_IK = theta[:,chosen_theta]
else:
joint_analytic_IK = theta
NaN_check = np.isnan(joint_analytic_IK)
if (np.any(NaN_check)):
joint_analytic_IK = self.getJointPosition()
print ("[WARNING] Nan position found in analytic IK solution, using Actual Joint Position as start position.")
q_i = np.array([0,0,0,0,0,0], float)
q_i += joint_analytic_IK
joint_analytic_IK = joint_analytic_IK + self.delta_standard_DH[3,:]
joint_analytic_IK = np.squeeze(np.asarray(joint_analytic_IK))
FK = self.ur5_direct_kinematics(joint_analytic_IK, True)
cartesian_position_rpy = cartesian_position
erro = tf.computeDifference(cartesian_position_rpy, FK)
norm_erro = norm(erro)
episilon = 0.0001*0.0001
max_iteractions = 500
iteraction = 1
q_i = np.array([[q_i[0]], [q_i[1]],[q_i[2]], [q_i[3]],[q_i[4]], [q_i[5]]])
erro = np.array([[erro[0]], [erro[1]],[erro[2]], [erro[3]],[erro[4]], [erro[5]]])
delta_theta = np.ones(6)*0.000006
delta_theta = np.array([[delta_theta[0]], [delta_theta[1]],[delta_theta[2]], [delta_theta[3]],[delta_theta[4]], [delta_theta[5]]])
while (norm_erro > episilon):
j = self.jacobian(q_i, delta_theta)
try:
jt = pinv(j)
except:
print("[WARNING] Pseudo Inverse with SVD diverged")
jt = np.dot(j.transpose(),inv(np.dot(j,j.transpose())))
q_in = np.array([[0],[0],[0],[0],[0],[0]], float)
q_in = q_i + np.dot(jt,erro)
delta_theta = q_in - q_i
q_i = np.array([[0],[0],[0],[0],[0],[0]], float)
q_i += q_in
q_i = np.squeeze(np.asarray(q_i.transpose()))
FK = self.ur5_direct_kinematics(np.squeeze(np.asarray(q_i + self.delta_standard_DH[3,:])), True)
erro = tf.computeDifference(cartesian_position_rpy, FK)
norm_erro = norm(erro)
erro = np.array([[erro[0]], [erro[1]],[erro[2]], [erro[3]],[erro[4]], [erro[5]]])
q_i = np.array([[q_i[0]], [q_i[1]],[q_i[2]], [q_i[3]],[q_i[4]], [q_i[5]]])
iteraction += 1
if (iteraction > max_iteractions):
print ("[ERROR] Maximum interactions reached.")
break
q_i = q_i.transpose()
q_aux = np.array([q_i[0,0],q_i[0,1],q_i[0,2],q_i[0,3],q_i[0,4],q_i[0,5]], float)
return q_aux
theta = self._analytic_ur5_inverse_kinematics(cartesian_position)
joint_analytic_IK = theta[:,chosen_theta]
self._effective_q = joint_analytic_IK + self.delta_standard_DH[3,:]
Initial_DK = self.ur5_direct_kinematics(np.squeeze(np.asarray(self._effective_q.transpose())), True)
Initial_DK[3:6] = tf.rotationVector2RollPitchYaw(Initial_DK[3:6])
cartesian_position_rpy = np.hstack((cartesian_position[0:3], tf.rotationVector2RollPitchYaw(cartesian_position[3:6])))
epsilon = 0.0001
quad_epsilon = epsilon*epsilon
joint_count = 5
max_interection = 5000
interection_count = 1
interection_count_joint = 1
direction = 1
min_step = 0.000017
max_step = 0.1
alpha_step = max_step
Radius = np.sqrt(cartesian_position[0:3].transpose()*cartesian_position[0:3])
joint_interact = np.zeros(6)
joint_interact += joint_analytic_IK
Error_Position = cartesian_position[0:3] - Initial_DK[0:3]
Mean_Position = np.mean(np.dot(Error_Position.transpose(),Error_Position))
Error_Rotation = tf.computeDifference(cartesian_position_rpy[3:6],Initial_DK[3:6], True)
Linear_Rotation_Error = Radius*Error_Rotation
Mean_Rotation = np.mean(np.dot(Linear_Rotation_Error,Linear_Rotation_Error.transpose()))
erro_quad = (Mean_Position + Mean_Rotation)/2
erro_quad_aux = erro_quad
while erro_quad > quad_epsilon:
joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step
self._effective_q = joint_interact + self.delta_standard_DH[3,:]
DK = self.ur5_direct_kinematics(np.squeeze(np.asarray(self._effective_q.transpose())), True)
DK[3:6] = rotationVector2RollPitchYaw(DK[3:6])
Error_Position = cartesian_position[0:3] - DK[0:3]
Mean_Position = np.mean(np.dot(Error_Position.transpose(),Error_Position))
Error_Rotation = computeDifference(cartesian_position_rpy[3:6],DK[3:6], True)
Linear_Rotation_Error = Radius*Error_Rotation
Mean_Rotation = np.mean(np.dot(Linear_Rotation_Error,Linear_Rotation_Error.transpose()))
erro_quad = (Mean_Position + Mean_Rotation)/2
if erro_quad > erro_quad_aux:
if interection_count_joint == 1:
direction = -1*direction
joint_interact[joint_count] = joint_interact[joint_count] + direction*alpha_step
interection_count_joint = 0
error_direction = erro_quad
else:
if alpha_step > min_step:
joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step
alpha_step = alpha_step/2
interection_count_joint = 1
else:
joint_interact[joint_count] = joint_interact[joint_count] - direction*alpha_step
alpha_step = max_step
interection_count_joint = 1
joint_count -=1
if joint_count < 0:
joint_count = 5
interection_count +=1
else:
alpha_step = alpha_step/2
interection_count_joint = 1
erro_quad_aux = erro_quad
if interection_count > max_interection:
print ("[ERROR] Maximum interations reached.")
break
t2 = time.clock()
print ("[INFO] CCD Total time: "+ str(t2 - t))
return joint_interact
def getMeanValueVector(self, vectorArray):
print("[INFO] Mean Value: Array, Mean, " + str(vectorArray) + ", " + str(np.mean(vectorArray, axis = 0, dtype=np.float64)))
def controlLoopTranspose(self, desiredPose, poseActual = None):
if (poseActual == None):
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
poseActualFK = tf.pose2Matrix(poseActual)
desiredPoseFK = tf.pose2Matrix(desiredPose)
poseError = desiredPose[0:3] - poseActual[0:3]
rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)
if np.any(np.isnan(rotationError)):
np.nan_to_num(rotationError, False)
error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]
self.normErro = norm(poseError)
self.errorDB.append(error)
jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-3)[np.newaxis].transpose())
K = 0.5*np.eye(6,6)
jointControl = np.dot(np.dot(jacob.transpose(),K),error.transpose())
return np.squeeze(np.asarray(jointControl))
def controlLoopPseudoInverse(self, desiredPose, poseActual = None):
if (poseActual == None):
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
poseActualFK = tf.pose2Matrix(poseActual)
desiredPoseFK = tf.pose2Matrix(desiredPose)
poseError = desiredPose[0:3] - poseActual[0:3]
rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)
if np.any(np.isnan(rotationError)):
np.nan_to_num(rotationError, False)
error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]
self.normErro = norm(poseError)
self.errorDB.append(error)
jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-3)[np.newaxis].transpose())
K = 0.5*np.eye(6,6)
jointControl = np.dot(np.dot(pinv(jacob),K),error.transpose())
return np.squeeze(np.asarray(jointControl))
def controlLoopInverse(self, desiredPose, poseActual = None):
if (poseActual == None):
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
poseActualFK = tf.pose2Matrix(poseActual)
desiredPoseFK = tf.pose2Matrix(desiredPose)
poseError = desiredPose[0:3] - poseActual[0:3]
rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)
if np.any(np.isnan(rotationError)):
np.nan_to_num(rotationError, False)
error = np.concatenate((poseError, rotationError),axis=0)[np.newaxis]
self.normErro = norm(poseError)
self.errorDB.append(error)
jacob = self.jacobian(self.getJointPosition()[np.newaxis].transpose(),(np.ones(6)*10e-6)[np.newaxis].transpose())
K = 0.5*np.eye(6,6)
jointControl = np.dot(np.dot(inv(jacob),K),error.transpose())
return np.squeeze(np.asarray(jointControl))
def controlLoopDLS(self, desiredPose, poseActual = None, step = 0.008, jointSpeedReference = np.array([0, 0, 0, 0, 0, 0]), cartesianSpeedReference = np.array([0, 0, 0, 0, 0, 0])):
if (poseActual == None):
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
poseActual = self.getPosition()
poseActual[3:6] = tf.rotationVector2RollPitchYaw(poseActual[3:6])
poseActualFK = tf.pose2Matrix(poseActual)
desiredPoseFK = tf.pose2Matrix(desiredPose)
poseError = desiredPose[0:3] - poseActual[0:3]
rotationError = tf.matrix2Pose(np.dot(poseActualFK[0:3,0:3].transpose(),desiredPoseFK[0:3,0:3]), True)
if np.any(np.isnan(rotationError)):
print('[INFO][ControlLoopDLS] NaN found on control')
np.nan_to_num(rotationError, False)
error = np.hstack((poseError, rotationError))
error_D = (error - self.errorPrevious)/step
self.error_D_DB.append(error_D)
self.errorPrevious = error
errorFiltered = butter_lowpass_filter(np.asarray(self.error_D_DB, dtype=np.float32), 3, 125, order=2)
error_D = errorFiltered[errorFiltered.shape[0]-1]
self.errorSum = self.errorSum + error
self.normErro = norm(poseError)
self.errorDB.append(error)
jacob = self.jacobianAnalytic(self.getJointPosition())
Kp = 5*np.eye(6,6)
Kd = 2*np.eye(6,6)
Ki = 0.25*np.eye(6,6)
k0 = 0.01
w0 = 0.01
KpControl = np.dot(Kp,error.transpose())
KdControl = np.dot(Kd,error_D.transpose())
KiControl = np.dot(Ki,self.errorSum.transpose())
ControlSum = KpControl + cartesianSpeedReference
t1 = time.perf_counter()
w = np.sqrt(np.linalg.det(np.dot(jacob,jacob.transpose())))
if (w < w0):
lamb = k0*(np.power((1 - (w/w0)),2))
print('[WARNING] Near Singularity: ' + str(w))
else:
lamb = 0
lamb2 = lamb*np.eye(6,6)
invJacob = np.dot(jacob.transpose(),inv(np.dot(jacob,jacob.transpose()) + lamb2))
t2 = time.perf_counter()
JacobianProcessTime = t2 - t1
self.processTimeList.append(JacobianProcessTime)
self.wDB.append(w)
jointControl = np.dot(invJacob,ControlSum)
return np.squeeze(np.asarray(jointControl))
def speedTransform(self, desiredSpeed, q = None, step = 0.008):
if (q == None):
q = self.getJointPosition()
jacobian = self.jacobianAnalytic(q)
jointSpeed = np.dot(inv(jacobian),desiredSpeed.transpose())
return jointSpeed
def butter_lowpass(cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, fs, order=5):
b, a = butter_lowpass(cutoff, fs, order=order)
y = lfilter(b, a, data)
return y | true | true |
f735d0e633001aab0366d4b6659ef304fb957519 | 845 | py | Python | odml/gui/__main__.py | carloscanova/python-odml | dff793bff86187b67be139c0f32c7cd036ba8db4 | [
"BSD-4-Clause"
] | null | null | null | odml/gui/__main__.py | carloscanova/python-odml | dff793bff86187b67be139c0f32c7cd036ba8db4 | [
"BSD-4-Clause"
] | null | null | null | odml/gui/__main__.py | carloscanova/python-odml | dff793bff86187b67be139c0f32c7cd036ba8db4 | [
"BSD-4-Clause"
] | null | null | null | #!/usr/bin/env python
import gtk
import Editor
def main(filenames=[]):
"""
start the editor, with a new empty document
or load all *filenames* as tabs
returns the tab object
"""
Editor.register_stock_icons()
editor = Editor.EditorWindow()
tabs = map(editor.load_document, filenames)
if len(filenames) == 0:
editor.welcome()
return tabs
def run():
"""
handle all initialisation and start main() and gtk.main()
"""
try: # this works only on linux
from ctypes import cdll
libc = cdll.LoadLibrary("libc.so.6")
libc.prctl(15, 'odMLEditor', 0, 0, 0)
except:
pass
from optparse import OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
main(filenames=args)
gtk.main()
if __name__=="__main__":
run()
| 22.236842 | 61 | 0.623669 |
import gtk
import Editor
def main(filenames=[]):
Editor.register_stock_icons()
editor = Editor.EditorWindow()
tabs = map(editor.load_document, filenames)
if len(filenames) == 0:
editor.welcome()
return tabs
def run():
try:
from ctypes import cdll
libc = cdll.LoadLibrary("libc.so.6")
libc.prctl(15, 'odMLEditor', 0, 0, 0)
except:
pass
from optparse import OptionParser
parser = OptionParser()
(options, args) = parser.parse_args()
main(filenames=args)
gtk.main()
if __name__=="__main__":
run()
| true | true |
f735d103558b450b8555fe74915f3f20a5ba2d42 | 4,680 | py | Python | tests/test_reporters.py | indigoviolet/region_profiler | 119a3624ee063d86e8e66de9fa97cccd7964f943 | [
"MIT"
] | null | null | null | tests/test_reporters.py | indigoviolet/region_profiler | 119a3624ee063d86e8e66de9fa97cccd7964f943 | [
"MIT"
] | null | null | null | tests/test_reporters.py | indigoviolet/region_profiler | 119a3624ee063d86e8e66de9fa97cccd7964f943 | [
"MIT"
] | null | null | null | import sys
from typing import List
import pytest
from region_profiler import RegionProfiler
from region_profiler import reporter_columns as cols
from region_profiler.reporters import (
ConsoleReporter,
CsvReporter,
SilentReporter,
Slice,
get_profiler_slice,
)
from region_profiler.utils import SeqStatsProtocol
class FixedStats(SeqStatsProtocol):
def __init__(self, count, total, min, max):
self.count = count
self.total = total
self.min = min
self.max = max
def add(self, x: float):
raise NotImplementedError
@pytest.fixture()
def dummy_region_profiler():
"""Generate dummy region profiler to test reporters."""
rp = RegionProfiler()
with rp.region("a"):
with rp.region("b"):
pass
for n in ["c", "d"]:
with rp.region(n):
with rp.region("x"):
pass
rp.root.stats = FixedStats(1, 100, 100, 100)
a = rp.root.children["a"]
a.stats = FixedStats(1, 90, 90, 90)
a.children["b"].stats = FixedStats(4, 20, 2, 10)
a.children["c"].stats = FixedStats(2, 30, 10, 20)
a.children["c"].children["x"].stats = FixedStats(2, 10, 5, 5)
a.children["d"].stats = FixedStats(1, 25, 25, 25)
a.children["d"].children["x"].stats = FixedStats(1, 10, 10, 10)
return rp
def test_slice_generation(dummy_region_profiler):
"""Test that node tree is properly serialized in a list."""
expected = [
Slice(0, RegionProfiler.ROOT_NODE_NAME, None, 0, 1, 100, 10, 100, 100),
Slice(1, "a", None, 1, 1, 90, 15, 90, 90),
Slice(2, "c", None, 2, 2, 30, 20, 10, 20),
Slice(3, "x", None, 3, 2, 10, 10, 5, 5),
Slice(4, "d", None, 2, 1, 25, 15, 25, 25),
Slice(5, "x", None, 3, 1, 10, 10, 10, 10),
Slice(6, "b", None, 2, 4, 20, 20, 2, 10),
]
expected[1].parent = expected[0]
expected[2].parent = expected[1]
expected[3].parent = expected[2]
expected[4].parent = expected[1]
expected[5].parent = expected[4]
expected[6].parent = expected[1]
slices = get_profiler_slice(dummy_region_profiler)
assert slices == expected
def test_silent_reporter(dummy_region_profiler):
"""Test :py:class:`SilentReporter` reporter."""
r = SilentReporter([cols.name, cols.node_id, cols.parent_id, cols.total_us])
r.dump_profiler(dummy_region_profiler)
expected = [
["name", "id", "parent_id", "total_us"],
[RegionProfiler.ROOT_NODE_NAME, "0", "", "100000000"],
["a", "1", "0", "90000000"],
["c", "2", "1", "30000000"],
["x", "3", "2", "10000000"],
["d", "4", "1", "25000000"],
["x", "5", "4", "10000000"],
["b", "6", "1", "20000000"],
]
assert r.rows == expected
def test_console_reporter(dummy_region_profiler, capsys):
"""Test :py:class:`ConsoleReporter` reporter."""
r = ConsoleReporter(
[cols.name, cols.node_id, cols.parent_id, cols.total_us], stream=sys.stdout
)
r.dump_profiler(dummy_region_profiler)
expected: List[List[str]] = [
["name", "id", "parent id", "total us"],
[],
[RegionProfiler.ROOT_NODE_NAME, "0", "", "100000000"],
["a", "1", "0", "90000000"],
["c", "2", "1", "30000000"],
["x", "3", "2", "10000000"],
["d", "4", "1", "25000000"],
["x", "5", "4", "10000000"],
["b", "6", "1", "20000000"],
]
output, err = capsys.readouterr()
output = output.strip().split("\n")
assert len(output) == len(expected)
for row, expected_vals in zip(output, expected):
assert len(row) == len(output[0])
for v in expected_vals:
assert v in row
def test_csv_reporter(dummy_region_profiler, capsys):
"""Test :py:class:`CsvReporter` reporter."""
r = CsvReporter(
[cols.name, cols.node_id, cols.parent_id, cols.total_us], stream=sys.stdout
)
r.dump_profiler(dummy_region_profiler)
expected = [
["name", "id", "parent_id", "total_us"],
[RegionProfiler.ROOT_NODE_NAME, "0", "", "100000000"],
["a", "1", "0", "90000000"],
["c", "2", "1", "30000000"],
["x", "3", "2", "10000000"],
["d", "4", "1", "25000000"],
["x", "5", "4", "10000000"],
["b", "6", "1", "20000000"],
]
output, err = capsys.readouterr()
output = [[c.strip() for c in r.split(",")] for r in output.strip().split("\n")]
assert len(output) == len(expected)
for row, expected_vals in zip(output, expected):
assert len(row) == len(expected_vals)
for col, v in zip(row, expected_vals):
assert col == v
| 30.588235 | 84 | 0.569231 | import sys
from typing import List
import pytest
from region_profiler import RegionProfiler
from region_profiler import reporter_columns as cols
from region_profiler.reporters import (
ConsoleReporter,
CsvReporter,
SilentReporter,
Slice,
get_profiler_slice,
)
from region_profiler.utils import SeqStatsProtocol
class FixedStats(SeqStatsProtocol):
def __init__(self, count, total, min, max):
self.count = count
self.total = total
self.min = min
self.max = max
def add(self, x: float):
raise NotImplementedError
@pytest.fixture()
def dummy_region_profiler():
rp = RegionProfiler()
with rp.region("a"):
with rp.region("b"):
pass
for n in ["c", "d"]:
with rp.region(n):
with rp.region("x"):
pass
rp.root.stats = FixedStats(1, 100, 100, 100)
a = rp.root.children["a"]
a.stats = FixedStats(1, 90, 90, 90)
a.children["b"].stats = FixedStats(4, 20, 2, 10)
a.children["c"].stats = FixedStats(2, 30, 10, 20)
a.children["c"].children["x"].stats = FixedStats(2, 10, 5, 5)
a.children["d"].stats = FixedStats(1, 25, 25, 25)
a.children["d"].children["x"].stats = FixedStats(1, 10, 10, 10)
return rp
def test_slice_generation(dummy_region_profiler):
expected = [
Slice(0, RegionProfiler.ROOT_NODE_NAME, None, 0, 1, 100, 10, 100, 100),
Slice(1, "a", None, 1, 1, 90, 15, 90, 90),
Slice(2, "c", None, 2, 2, 30, 20, 10, 20),
Slice(3, "x", None, 3, 2, 10, 10, 5, 5),
Slice(4, "d", None, 2, 1, 25, 15, 25, 25),
Slice(5, "x", None, 3, 1, 10, 10, 10, 10),
Slice(6, "b", None, 2, 4, 20, 20, 2, 10),
]
expected[1].parent = expected[0]
expected[2].parent = expected[1]
expected[3].parent = expected[2]
expected[4].parent = expected[1]
expected[5].parent = expected[4]
expected[6].parent = expected[1]
slices = get_profiler_slice(dummy_region_profiler)
assert slices == expected
def test_silent_reporter(dummy_region_profiler):
r = SilentReporter([cols.name, cols.node_id, cols.parent_id, cols.total_us])
r.dump_profiler(dummy_region_profiler)
expected = [
["name", "id", "parent_id", "total_us"],
[RegionProfiler.ROOT_NODE_NAME, "0", "", "100000000"],
["a", "1", "0", "90000000"],
["c", "2", "1", "30000000"],
["x", "3", "2", "10000000"],
["d", "4", "1", "25000000"],
["x", "5", "4", "10000000"],
["b", "6", "1", "20000000"],
]
assert r.rows == expected
def test_console_reporter(dummy_region_profiler, capsys):
r = ConsoleReporter(
[cols.name, cols.node_id, cols.parent_id, cols.total_us], stream=sys.stdout
)
r.dump_profiler(dummy_region_profiler)
expected: List[List[str]] = [
["name", "id", "parent id", "total us"],
[],
[RegionProfiler.ROOT_NODE_NAME, "0", "", "100000000"],
["a", "1", "0", "90000000"],
["c", "2", "1", "30000000"],
["x", "3", "2", "10000000"],
["d", "4", "1", "25000000"],
["x", "5", "4", "10000000"],
["b", "6", "1", "20000000"],
]
output, err = capsys.readouterr()
output = output.strip().split("\n")
assert len(output) == len(expected)
for row, expected_vals in zip(output, expected):
assert len(row) == len(output[0])
for v in expected_vals:
assert v in row
def test_csv_reporter(dummy_region_profiler, capsys):
r = CsvReporter(
[cols.name, cols.node_id, cols.parent_id, cols.total_us], stream=sys.stdout
)
r.dump_profiler(dummy_region_profiler)
expected = [
["name", "id", "parent_id", "total_us"],
[RegionProfiler.ROOT_NODE_NAME, "0", "", "100000000"],
["a", "1", "0", "90000000"],
["c", "2", "1", "30000000"],
["x", "3", "2", "10000000"],
["d", "4", "1", "25000000"],
["x", "5", "4", "10000000"],
["b", "6", "1", "20000000"],
]
output, err = capsys.readouterr()
output = [[c.strip() for c in r.split(",")] for r in output.strip().split("\n")]
assert len(output) == len(expected)
for row, expected_vals in zip(output, expected):
assert len(row) == len(expected_vals)
for col, v in zip(row, expected_vals):
assert col == v
| true | true |
f735d114acfbbc7d006b9ce0ce107c90ffa2fae2 | 324 | py | Python | simulator/kruxsim/mocks/flash.py | odudex/krux | db421a3f107c0263221e5f1e877e9c38925bb17c | [
"MIT"
] | null | null | null | simulator/kruxsim/mocks/flash.py | odudex/krux | db421a3f107c0263221e5f1e877e9c38925bb17c | [
"MIT"
] | 13 | 2022-03-21T05:35:03.000Z | 2022-03-31T14:31:46.000Z | simulator/kruxsim/mocks/flash.py | odudex/krux | db421a3f107c0263221e5f1e877e9c38925bb17c | [
"MIT"
] | null | null | null | import sys
from unittest import mock
flash = bytearray(8 * 1024 * 1024)
def read_data(addr, amount):
return flash[addr : addr + amount]
def write_data(addr, data):
flash[addr : addr + len(data)] = data
if "flash" not in sys.modules:
sys.modules["flash"] = mock.MagicMock(read=read_data, write=write_data)
| 19.058824 | 75 | 0.691358 | import sys
from unittest import mock
flash = bytearray(8 * 1024 * 1024)
def read_data(addr, amount):
return flash[addr : addr + amount]
def write_data(addr, data):
flash[addr : addr + len(data)] = data
if "flash" not in sys.modules:
sys.modules["flash"] = mock.MagicMock(read=read_data, write=write_data)
| true | true |
f735d1900984061043b641ab5d3f5887318d3656 | 6,714 | py | Python | src/mainwin.py | coljac/magicbrowse | a5b4d8c3c3ef9994918ec024b726d2b3d6eba695 | [
"MIT"
] | null | null | null | src/mainwin.py | coljac/magicbrowse | a5b4d8c3c3ef9994918ec024b726d2b3d6eba695 | [
"MIT"
] | null | null | null | src/mainwin.py | coljac/magicbrowse | a5b4d8c3c3ef9994918ec024b726d2b3d6eba695 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwindow.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MtgBrowse(object):
def setupUi(self, MtgBrowse):
MtgBrowse.setObjectName("MtgBrowse")
MtgBrowse.resize(1120, 1073)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MtgBrowse.sizePolicy().hasHeightForWidth())
MtgBrowse.setSizePolicy(sizePolicy)
self.centralWidget = QtWidgets.QWidget(MtgBrowse)
self.centralWidget.setObjectName("centralWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralWidget)
self.verticalLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setSpacing(6)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.lineEdit = QtWidgets.QLineEdit(self.centralWidget)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit)
self.saveButton = QtWidgets.QPushButton(self.centralWidget)
self.saveButton.setObjectName("saveButton")
self.horizontalLayout.addWidget(self.saveButton)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setSpacing(6)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.loadButton_1 = QtWidgets.QPushButton(self.centralWidget)
self.loadButton_1.setObjectName("loadButton_1")
self.horizontalLayout_3.addWidget(self.loadButton_1)
self.loadButton_2 = QtWidgets.QPushButton(self.centralWidget)
self.loadButton_2.setObjectName("loadButton_2")
self.horizontalLayout_3.addWidget(self.loadButton_2)
self.loadButton_3 = QtWidgets.QPushButton(self.centralWidget)
self.loadButton_3.setObjectName("loadButton_3")
self.horizontalLayout_3.addWidget(self.loadButton_3)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.check_standard = QtWidgets.QCheckBox(self.centralWidget)
self.check_standard.setObjectName("check_standard")
self.horizontalLayout_3.addWidget(self.check_standard)
self.zoomToggleButton = QtWidgets.QPushButton(self.centralWidget)
self.zoomToggleButton.setObjectName("zoomToggleButton")
self.horizontalLayout_3.addWidget(self.zoomToggleButton)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.verticalLayout_2.addLayout(self.verticalLayout_3)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setHorizontalSpacing(6)
self.gridLayout.setVerticalSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.mainWidget = QtWidgets.QWidget(self.centralWidget)
self.mainWidget.setMinimumSize(QtCore.QSize(1100, 930))
self.mainWidget.setLayoutDirection(QtCore.Qt.RightToLeft)
self.mainWidget.setObjectName("mainWidget")
self.gridLayout.addWidget(self.mainWidget, 0, 0, 1, 1)
self.statusBarLabel = QtWidgets.QLabel(self.centralWidget)
self.statusBarLabel.setLayoutDirection(QtCore.Qt.LeftToRight)
self.statusBarLabel.setAutoFillBackground(False)
self.statusBarLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.statusBarLabel.setObjectName("statusBarLabel")
self.gridLayout.addWidget(self.statusBarLabel, 2, 0, 1, 1)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSpacing(6)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.prevPage = QtWidgets.QPushButton(self.centralWidget)
self.prevPage.setEnabled(False)
self.prevPage.setObjectName("prevPage")
self.horizontalLayout_5.addWidget(self.prevPage)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem3)
self.nextPage = QtWidgets.QPushButton(self.centralWidget)
self.nextPage.setEnabled(False)
self.nextPage.setObjectName("nextPage")
self.horizontalLayout_5.addWidget(self.nextPage)
self.gridLayout.addLayout(self.horizontalLayout_5, 1, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
MtgBrowse.setCentralWidget(self.centralWidget)
self.mainToolBar = QtWidgets.QToolBar(MtgBrowse)
self.mainToolBar.setObjectName("mainToolBar")
MtgBrowse.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.retranslateUi(MtgBrowse)
QtCore.QMetaObject.connectSlotsByName(MtgBrowse)
def retranslateUi(self, MtgBrowse):
_translate = QtCore.QCoreApplication.translate
MtgBrowse.setWindowTitle(_translate("MtgBrowse", "MTGBrowser"))
self.saveButton.setText(_translate("MtgBrowse", "Save"))
self.loadButton_1.setText(_translate("MtgBrowse", "Load 1"))
self.loadButton_2.setText(_translate("MtgBrowse", "Load 2"))
self.loadButton_3.setText(_translate("MtgBrowse", "Load 3"))
self.check_standard.setText(_translate("MtgBrowse", "Standard"))
self.zoomToggleButton.setText(_translate("MtgBrowse", "Zoom"))
self.statusBarLabel.setText(_translate("MtgBrowse", "Initialized."))
self.prevPage.setText(_translate("MtgBrowse", "<"))
self.nextPage.setText(_translate("MtgBrowse", ">"))
| 55.95 | 115 | 0.737563 |
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MtgBrowse(object):
def setupUi(self, MtgBrowse):
MtgBrowse.setObjectName("MtgBrowse")
MtgBrowse.resize(1120, 1073)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MtgBrowse.sizePolicy().hasHeightForWidth())
MtgBrowse.setSizePolicy(sizePolicy)
self.centralWidget = QtWidgets.QWidget(MtgBrowse)
self.centralWidget.setObjectName("centralWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralWidget)
self.verticalLayout.setContentsMargins(11, 11, 11, 11)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout()
self.verticalLayout_3.setSpacing(6)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setSpacing(6)
self.horizontalLayout.setObjectName("horizontalLayout")
self.lineEdit = QtWidgets.QLineEdit(self.centralWidget)
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit)
self.saveButton = QtWidgets.QPushButton(self.centralWidget)
self.saveButton.setObjectName("saveButton")
self.horizontalLayout.addWidget(self.saveButton)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setSpacing(6)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.loadButton_1 = QtWidgets.QPushButton(self.centralWidget)
self.loadButton_1.setObjectName("loadButton_1")
self.horizontalLayout_3.addWidget(self.loadButton_1)
self.loadButton_2 = QtWidgets.QPushButton(self.centralWidget)
self.loadButton_2.setObjectName("loadButton_2")
self.horizontalLayout_3.addWidget(self.loadButton_2)
self.loadButton_3 = QtWidgets.QPushButton(self.centralWidget)
self.loadButton_3.setObjectName("loadButton_3")
self.horizontalLayout_3.addWidget(self.loadButton_3)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.check_standard = QtWidgets.QCheckBox(self.centralWidget)
self.check_standard.setObjectName("check_standard")
self.horizontalLayout_3.addWidget(self.check_standard)
self.zoomToggleButton = QtWidgets.QPushButton(self.centralWidget)
self.zoomToggleButton.setObjectName("zoomToggleButton")
self.horizontalLayout_3.addWidget(self.zoomToggleButton)
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem2)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.verticalLayout_2.addLayout(self.verticalLayout_3)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setHorizontalSpacing(6)
self.gridLayout.setVerticalSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.mainWidget = QtWidgets.QWidget(self.centralWidget)
self.mainWidget.setMinimumSize(QtCore.QSize(1100, 930))
self.mainWidget.setLayoutDirection(QtCore.Qt.RightToLeft)
self.mainWidget.setObjectName("mainWidget")
self.gridLayout.addWidget(self.mainWidget, 0, 0, 1, 1)
self.statusBarLabel = QtWidgets.QLabel(self.centralWidget)
self.statusBarLabel.setLayoutDirection(QtCore.Qt.LeftToRight)
self.statusBarLabel.setAutoFillBackground(False)
self.statusBarLabel.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.statusBarLabel.setObjectName("statusBarLabel")
self.gridLayout.addWidget(self.statusBarLabel, 2, 0, 1, 1)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setSpacing(6)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.prevPage = QtWidgets.QPushButton(self.centralWidget)
self.prevPage.setEnabled(False)
self.prevPage.setObjectName("prevPage")
self.horizontalLayout_5.addWidget(self.prevPage)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem3)
self.nextPage = QtWidgets.QPushButton(self.centralWidget)
self.nextPage.setEnabled(False)
self.nextPage.setObjectName("nextPage")
self.horizontalLayout_5.addWidget(self.nextPage)
self.gridLayout.addLayout(self.horizontalLayout_5, 1, 0, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
MtgBrowse.setCentralWidget(self.centralWidget)
self.mainToolBar = QtWidgets.QToolBar(MtgBrowse)
self.mainToolBar.setObjectName("mainToolBar")
MtgBrowse.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.retranslateUi(MtgBrowse)
QtCore.QMetaObject.connectSlotsByName(MtgBrowse)
def retranslateUi(self, MtgBrowse):
_translate = QtCore.QCoreApplication.translate
MtgBrowse.setWindowTitle(_translate("MtgBrowse", "MTGBrowser"))
self.saveButton.setText(_translate("MtgBrowse", "Save"))
self.loadButton_1.setText(_translate("MtgBrowse", "Load 1"))
self.loadButton_2.setText(_translate("MtgBrowse", "Load 2"))
self.loadButton_3.setText(_translate("MtgBrowse", "Load 3"))
self.check_standard.setText(_translate("MtgBrowse", "Standard"))
self.zoomToggleButton.setText(_translate("MtgBrowse", "Zoom"))
self.statusBarLabel.setText(_translate("MtgBrowse", "Initialized."))
self.prevPage.setText(_translate("MtgBrowse", "<"))
self.nextPage.setText(_translate("MtgBrowse", ">"))
| true | true |
f735d1b207af86fc20b10c7aab955f604760663c | 118 | py | Python | learn_mongodb/p1_crud/p3_update/__init__.py | MacHu-GWU/learn_mongodb-project | e5f5a96603e71ca762bdb56951fa7e5ce143d810 | [
"MIT"
] | null | null | null | learn_mongodb/p1_crud/p3_update/__init__.py | MacHu-GWU/learn_mongodb-project | e5f5a96603e71ca762bdb56951fa7e5ce143d810 | [
"MIT"
] | null | null | null | learn_mongodb/p1_crud/p3_update/__init__.py | MacHu-GWU/learn_mongodb-project | e5f5a96603e71ca762bdb56951fa7e5ce143d810 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ref: https://docs.mongodb.com/manual/reference/operator/update/
""" | 19.666667 | 63 | 0.644068 | true | true | |
f735d33c955450b393a844082555926f2cfe08a0 | 1,222 | py | Python | pcat2py/class/23ae8758-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/23ae8758-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | pcat2py/class/23ae8758-5cc5-11e4-af55-00155d01fe08.py | phnomcobra/PCAT2PY | 937c3b365cdc5ac69b78f59070be0a21bdb53db0 | [
"MIT"
] | null | null | null | #!/usr/bin/python
################################################################################
# 23ae8758-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "23ae8758-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Execute command and parse capture standard output
stdout = cli.system("ls -l /etc/gshadow")
# Split output lines
self.output = stdout.split('\n')
# Process standard output
lineNumber = 0
for line in self.output:
lineNumber += 1
if len(line.strip()) > 0:
subStrings = line.split(' ')
if "----------" in subStrings[0]:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.system("chmod 0000 /etc/gshadow")
| 29.095238 | 80 | 0.486088 | true | true | |
f735d366d42e472d897ac4bf09f3aebbea4834d2 | 1,822 | py | Python | app/blueprint_auth/views.py | medsci-tech/mime_analysis_flask_2017 | 4a927219f31db433f4af6a7af3085e05c08b5c3e | [
"MIT"
] | null | null | null | app/blueprint_auth/views.py | medsci-tech/mime_analysis_flask_2017 | 4a927219f31db433f4af6a7af3085e05c08b5c3e | [
"MIT"
] | null | null | null | app/blueprint_auth/views.py | medsci-tech/mime_analysis_flask_2017 | 4a927219f31db433f4af6a7af3085e05c08b5c3e | [
"MIT"
] | null | null | null | import datetime
import json
from flask import url_for
from flask import redirect
from flask import render_template
from flask_login import login_user
from flask_login import logout_user
from . import blueprint_auth
from .forms import RegisterForm
from .forms import LoginForm
from .utils_cms import generate_code
from .utils_cms import send_sms_code
from ..models import db
from ..models import User
@blueprint_auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(phone=form.phone.data).first()
login_user(user)
return redirect(url_for('blueprint_time.index'))
return render_template('auth/login.html', form=form)
@blueprint_auth.route('/logout', methods=['GET', 'POST'])
def logout():
logout_user()
return redirect(url_for('blueprint_auth.login'))
@blueprint_auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
new_user = User.query.filter_by(phone=form.phone.data).first()
new_user.set_password(form.password.data)
db.session.commit()
return redirect(url_for('blueprint_auth.login'))
return render_template('auth/register.html', form=form)
@blueprint_auth.route('/sms_code/<string:phone>', methods=['GET', 'POST'])
def sms_code(phone):
user = User.query.filter_by(phone=phone).first()
if not user:
ret = {'result': 'error'}
return json.dumps(ret)
code = generate_code()
ret = send_sms_code(phone, code)
if not ret:
ret = {'result': 'error'}
return json.dumps(ret)
user.sms_code = code
user.updated_at = datetime.datetime.now()
db.session.commit()
ret = {'result': 'success'}
return json.dumps(ret)
| 27.606061 | 74 | 0.694292 | import datetime
import json
from flask import url_for
from flask import redirect
from flask import render_template
from flask_login import login_user
from flask_login import logout_user
from . import blueprint_auth
from .forms import RegisterForm
from .forms import LoginForm
from .utils_cms import generate_code
from .utils_cms import send_sms_code
from ..models import db
from ..models import User
@blueprint_auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(phone=form.phone.data).first()
login_user(user)
return redirect(url_for('blueprint_time.index'))
return render_template('auth/login.html', form=form)
@blueprint_auth.route('/logout', methods=['GET', 'POST'])
def logout():
logout_user()
return redirect(url_for('blueprint_auth.login'))
@blueprint_auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
new_user = User.query.filter_by(phone=form.phone.data).first()
new_user.set_password(form.password.data)
db.session.commit()
return redirect(url_for('blueprint_auth.login'))
return render_template('auth/register.html', form=form)
@blueprint_auth.route('/sms_code/<string:phone>', methods=['GET', 'POST'])
def sms_code(phone):
user = User.query.filter_by(phone=phone).first()
if not user:
ret = {'result': 'error'}
return json.dumps(ret)
code = generate_code()
ret = send_sms_code(phone, code)
if not ret:
ret = {'result': 'error'}
return json.dumps(ret)
user.sms_code = code
user.updated_at = datetime.datetime.now()
db.session.commit()
ret = {'result': 'success'}
return json.dumps(ret)
| true | true |
f735d3b5b015bc679c6b0fc8a3dfed895d1870f9 | 9,542 | py | Python | BaseTools/Source/Python/GenFds/Fd.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 2 | 2018-09-05T02:46:11.000Z | 2020-04-14T08:38:31.000Z | BaseTools/Source/Python/GenFds/Fd.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 5 | 2020-12-25T05:24:37.000Z | 2021-01-11T01:01:01.000Z | BaseTools/Source/Python/GenFds/Fd.py | CEOALT1/RefindPlusUDK | 116b957ad735f96fbb6d80a0ba582046960ba164 | [
"BSD-2-Clause"
] | 16 | 2020-07-31T17:57:27.000Z | 2021-03-10T14:32:36.000Z | ## @file
# process FD generation
#
# Copyright (c) 2007 - 2017, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Region
import Fv
import Common.LongFilePathOs as os
import StringIO
import sys
from struct import *
from GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import FDClassObject
from Common import EdkLogger
from Common.BuildToolError import *
from Common.Misc import SaveFileOnChange
from GenFds import GenFds
## generate FD
#
#
class FD(FDClassObject):
## The constructor
#
# @param self The object pointer
#
def __init__(self):
FDClassObject.__init__(self)
## GenFd() method
#
# Generate FD
#
# @retval string Generated FD file name
#
def GenFd (self, Flag = False):
if self.FdUiName.upper() + 'fd' in GenFds.ImageBinDict.keys():
return GenFds.ImageBinDict[self.FdUiName.upper() + 'fd']
#
# Print Information
#
FdFileName = os.path.join(GenFdsGlobalVariable.FvDir, self.FdUiName + '.fd')
if not Flag:
GenFdsGlobalVariable.InfLogger("\nFd File Name:%s (%s)" %(self.FdUiName, FdFileName))
Offset = 0x00
for item in self.BlockSizeList:
Offset = Offset + item[0] * item[1]
if Offset != self.Size:
EdkLogger.error("GenFds", GENFDS_ERROR, 'FD %s Size not consistent with block array' % self.FdUiName)
GenFdsGlobalVariable.VerboseLogger('Following Fv will be add to Fd !!!')
for FvObj in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
GenFdsGlobalVariable.VerboseLogger(FvObj)
GenFdsGlobalVariable.VerboseLogger('################### Gen VTF ####################')
self.GenVtfFile()
HasCapsuleRegion = False
for RegionObj in self.RegionList:
if RegionObj.RegionType == 'CAPSULE':
HasCapsuleRegion = True
break
if HasCapsuleRegion:
TempFdBuffer = StringIO.StringIO('')
PreviousRegionStart = -1
PreviousRegionSize = 1
for RegionObj in self.RegionList :
if RegionObj.RegionType == 'CAPSULE':
continue
if RegionObj.Offset + RegionObj.Size <= PreviousRegionStart:
pass
elif RegionObj.Offset <= PreviousRegionStart or (RegionObj.Offset >=PreviousRegionStart and RegionObj.Offset < PreviousRegionStart + PreviousRegionSize):
pass
elif RegionObj.Offset > PreviousRegionStart + PreviousRegionSize:
if not Flag:
GenFdsGlobalVariable.InfLogger('Padding region starting from offset 0x%X, with size 0x%X' %(PreviousRegionStart + PreviousRegionSize, RegionObj.Offset - (PreviousRegionStart + PreviousRegionSize)))
PadRegion = Region.Region()
PadRegion.Offset = PreviousRegionStart + PreviousRegionSize
PadRegion.Size = RegionObj.Offset - PadRegion.Offset
if not Flag:
PadRegion.AddToBuffer(TempFdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict)
PreviousRegionStart = RegionObj.Offset
PreviousRegionSize = RegionObj.Size
#
# Call each region's AddToBuffer function
#
if PreviousRegionSize > self.Size:
pass
GenFdsGlobalVariable.VerboseLogger('Call each region\'s AddToBuffer function')
RegionObj.AddToBuffer (TempFdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict)
FdBuffer = StringIO.StringIO('')
PreviousRegionStart = -1
PreviousRegionSize = 1
for RegionObj in self.RegionList :
if RegionObj.Offset + RegionObj.Size <= PreviousRegionStart:
EdkLogger.error("GenFds", GENFDS_ERROR,
'Region offset 0x%X in wrong order with Region starting from 0x%X, size 0x%X\nRegions in FDF must have offsets appear in ascending order.'\
% (RegionObj.Offset, PreviousRegionStart, PreviousRegionSize))
elif RegionObj.Offset <= PreviousRegionStart or (RegionObj.Offset >=PreviousRegionStart and RegionObj.Offset < PreviousRegionStart + PreviousRegionSize):
EdkLogger.error("GenFds", GENFDS_ERROR,
'Region offset 0x%X overlaps with Region starting from 0x%X, size 0x%X' \
% (RegionObj.Offset, PreviousRegionStart, PreviousRegionSize))
elif RegionObj.Offset > PreviousRegionStart + PreviousRegionSize:
if not Flag:
GenFdsGlobalVariable.InfLogger('Padding region starting from offset 0x%X, with size 0x%X' %(PreviousRegionStart + PreviousRegionSize, RegionObj.Offset - (PreviousRegionStart + PreviousRegionSize)))
PadRegion = Region.Region()
PadRegion.Offset = PreviousRegionStart + PreviousRegionSize
PadRegion.Size = RegionObj.Offset - PadRegion.Offset
if not Flag:
PadRegion.AddToBuffer(FdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict)
PreviousRegionStart = RegionObj.Offset
PreviousRegionSize = RegionObj.Size
#
# Verify current region fits within allocated FD section Size
#
if PreviousRegionStart + PreviousRegionSize > self.Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
'FD %s size too small to fit region with offset 0x%X and size 0x%X'
% (self.FdUiName, PreviousRegionStart, PreviousRegionSize))
#
# Call each region's AddToBuffer function
#
GenFdsGlobalVariable.VerboseLogger('Call each region\'s AddToBuffer function')
RegionObj.AddToBuffer (FdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict,Flag=Flag)
#
# Write the buffer contents to Fd file
#
GenFdsGlobalVariable.VerboseLogger('Write the buffer contents to Fd file')
if not Flag:
SaveFileOnChange(FdFileName, FdBuffer.getvalue())
FdBuffer.close()
GenFds.ImageBinDict[self.FdUiName.upper() + 'fd'] = FdFileName
return FdFileName
## generate VTF
#
# @param self The object pointer
#
def GenVtfFile (self) :
#
# Get this Fd's all Fv name
#
FvAddDict ={}
FvList = []
for RegionObj in self.RegionList:
if RegionObj.RegionType == 'FV':
if len(RegionObj.RegionDataList) == 1:
RegionData = RegionObj.RegionDataList[0]
FvList.append(RegionData.upper())
FvAddDict[RegionData.upper()] = (int(self.BaseAddress,16) + \
RegionObj.Offset, RegionObj.Size)
else:
Offset = RegionObj.Offset
for RegionData in RegionObj.RegionDataList:
FvList.append(RegionData.upper())
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(RegionData.upper())
if len(FvObj.BlockSizeList) < 1:
EdkLogger.error("GenFds", GENFDS_ERROR,
'FV.%s must point out FVs blocksize and Fv BlockNum' \
% FvObj.UiFvName)
else:
Size = 0
for blockStatement in FvObj.BlockSizeList:
Size = Size + blockStatement[0] * blockStatement[1]
FvAddDict[RegionData.upper()] = (int(self.BaseAddress,16) + \
Offset, Size)
Offset = Offset + Size
#
# Check whether this Fd need VTF
#
Flag = False
for VtfObj in GenFdsGlobalVariable.FdfParser.Profile.VtfList:
compLocList = VtfObj.GetFvList()
if set(compLocList).issubset(FvList):
Flag = True
break
if Flag == True:
self.vtfRawDict = VtfObj.GenVtf(FvAddDict)
## generate flash map file
#
# @param self The object pointer
#
def GenFlashMap (self):
pass
| 45.655502 | 222 | 0.581639 |
import Region
import Fv
import Common.LongFilePathOs as os
import StringIO
import sys
from struct import *
from GenFdsGlobalVariable import GenFdsGlobalVariable
from CommonDataClass.FdfClass import FDClassObject
from Common import EdkLogger
from Common.BuildToolError import *
from Common.Misc import SaveFileOnChange
from GenFds import GenFds
ClassObject):
def __init__(self):
FDClassObject.__init__(self)
def GenFd (self, Flag = False):
if self.FdUiName.upper() + 'fd' in GenFds.ImageBinDict.keys():
return GenFds.ImageBinDict[self.FdUiName.upper() + 'fd']
FdFileName = os.path.join(GenFdsGlobalVariable.FvDir, self.FdUiName + '.fd')
if not Flag:
GenFdsGlobalVariable.InfLogger("\nFd File Name:%s (%s)" %(self.FdUiName, FdFileName))
Offset = 0x00
for item in self.BlockSizeList:
Offset = Offset + item[0] * item[1]
if Offset != self.Size:
EdkLogger.error("GenFds", GENFDS_ERROR, 'FD %s Size not consistent with block array' % self.FdUiName)
GenFdsGlobalVariable.VerboseLogger('Following Fv will be add to Fd !!!')
for FvObj in GenFdsGlobalVariable.FdfParser.Profile.FvDict:
GenFdsGlobalVariable.VerboseLogger(FvObj)
GenFdsGlobalVariable.VerboseLogger('################### Gen VTF ####################')
self.GenVtfFile()
HasCapsuleRegion = False
for RegionObj in self.RegionList:
if RegionObj.RegionType == 'CAPSULE':
HasCapsuleRegion = True
break
if HasCapsuleRegion:
TempFdBuffer = StringIO.StringIO('')
PreviousRegionStart = -1
PreviousRegionSize = 1
for RegionObj in self.RegionList :
if RegionObj.RegionType == 'CAPSULE':
continue
if RegionObj.Offset + RegionObj.Size <= PreviousRegionStart:
pass
elif RegionObj.Offset <= PreviousRegionStart or (RegionObj.Offset >=PreviousRegionStart and RegionObj.Offset < PreviousRegionStart + PreviousRegionSize):
pass
elif RegionObj.Offset > PreviousRegionStart + PreviousRegionSize:
if not Flag:
GenFdsGlobalVariable.InfLogger('Padding region starting from offset 0x%X, with size 0x%X' %(PreviousRegionStart + PreviousRegionSize, RegionObj.Offset - (PreviousRegionStart + PreviousRegionSize)))
PadRegion = Region.Region()
PadRegion.Offset = PreviousRegionStart + PreviousRegionSize
PadRegion.Size = RegionObj.Offset - PadRegion.Offset
if not Flag:
PadRegion.AddToBuffer(TempFdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict)
PreviousRegionStart = RegionObj.Offset
PreviousRegionSize = RegionObj.Size
#
if PreviousRegionSize > self.Size:
pass
GenFdsGlobalVariable.VerboseLogger('Call each region\'s AddToBuffer function')
RegionObj.AddToBuffer (TempFdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict)
FdBuffer = StringIO.StringIO('')
PreviousRegionStart = -1
PreviousRegionSize = 1
for RegionObj in self.RegionList :
if RegionObj.Offset + RegionObj.Size <= PreviousRegionStart:
EdkLogger.error("GenFds", GENFDS_ERROR,
'Region offset 0x%X in wrong order with Region starting from 0x%X, size 0x%X\nRegions in FDF must have offsets appear in ascending order.'\
% (RegionObj.Offset, PreviousRegionStart, PreviousRegionSize))
elif RegionObj.Offset <= PreviousRegionStart or (RegionObj.Offset >=PreviousRegionStart and RegionObj.Offset < PreviousRegionStart + PreviousRegionSize):
EdkLogger.error("GenFds", GENFDS_ERROR,
'Region offset 0x%X overlaps with Region starting from 0x%X, size 0x%X' \
% (RegionObj.Offset, PreviousRegionStart, PreviousRegionSize))
elif RegionObj.Offset > PreviousRegionStart + PreviousRegionSize:
if not Flag:
GenFdsGlobalVariable.InfLogger('Padding region starting from offset 0x%X, with size 0x%X' %(PreviousRegionStart + PreviousRegionSize, RegionObj.Offset - (PreviousRegionStart + PreviousRegionSize)))
PadRegion = Region.Region()
PadRegion.Offset = PreviousRegionStart + PreviousRegionSize
PadRegion.Size = RegionObj.Offset - PadRegion.Offset
if not Flag:
PadRegion.AddToBuffer(FdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict)
PreviousRegionStart = RegionObj.Offset
PreviousRegionSize = RegionObj.Size
if PreviousRegionStart + PreviousRegionSize > self.Size:
EdkLogger.error("GenFds", GENFDS_ERROR,
'FD %s size too small to fit region with offset 0x%X and size 0x%X'
% (self.FdUiName, PreviousRegionStart, PreviousRegionSize))
#
GenFdsGlobalVariable.VerboseLogger('Call each region\'s AddToBuffer function')
RegionObj.AddToBuffer (FdBuffer, self.BaseAddress, self.BlockSizeList, self.ErasePolarity, GenFds.ImageBinDict, self.vtfRawDict, self.DefineVarDict,Flag=Flag)
GenFdsGlobalVariable.VerboseLogger('Write the buffer contents to Fd file')
if not Flag:
SaveFileOnChange(FdFileName, FdBuffer.getvalue())
FdBuffer.close()
GenFds.ImageBinDict[self.FdUiName.upper() + 'fd'] = FdFileName
return FdFileName
def GenVtfFile (self) :
#
FvAddDict ={}
FvList = []
for RegionObj in self.RegionList:
if RegionObj.RegionType == 'FV':
if len(RegionObj.RegionDataList) == 1:
RegionData = RegionObj.RegionDataList[0]
FvList.append(RegionData.upper())
FvAddDict[RegionData.upper()] = (int(self.BaseAddress,16) + \
RegionObj.Offset, RegionObj.Size)
else:
Offset = RegionObj.Offset
for RegionData in RegionObj.RegionDataList:
FvList.append(RegionData.upper())
FvObj = GenFdsGlobalVariable.FdfParser.Profile.FvDict.get(RegionData.upper())
if len(FvObj.BlockSizeList) < 1:
EdkLogger.error("GenFds", GENFDS_ERROR,
'FV.%s must point out FVs blocksize and Fv BlockNum' \
% FvObj.UiFvName)
else:
Size = 0
for blockStatement in FvObj.BlockSizeList:
Size = Size + blockStatement[0] * blockStatement[1]
FvAddDict[RegionData.upper()] = (int(self.BaseAddress,16) + \
Offset, Size)
Offset = Offset + Size
#
# Check whether this Fd need VTF
#
Flag = False
for VtfObj in GenFdsGlobalVariable.FdfParser.Profile.VtfList:
compLocList = VtfObj.GetFvList()
if set(compLocList).issubset(FvList):
Flag = True
break
if Flag == True:
self.vtfRawDict = VtfObj.GenVtf(FvAddDict)
## generate flash map file
#
# @param self The object pointer
#
def GenFlashMap (self):
pass
| true | true |
f735d4a1ab3e9f74bfda0300638e91a89fa8e131 | 11,805 | py | Python | samples/mnist_vae.py | piojanu/tf_utils | 169bd3334dd11954cf8f411f2c918f76cd609fab | [
"MIT"
] | 1 | 2019-04-05T11:41:02.000Z | 2019-04-05T11:41:02.000Z | samples/mnist_vae.py | piojanu/tf_utils | 169bd3334dd11954cf8f411f2c918f76cd609fab | [
"MIT"
] | null | null | null | samples/mnist_vae.py | piojanu/tf_utils | 169bd3334dd11954cf8f411f2c918f76cd609fab | [
"MIT"
] | null | null | null | import argparse
import io
import os.path
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tqdm import tqdm
from tf_utils import AttrDict, attrdict_from_yaml, lazy_property_with_scope, share_variables
tfd = tfp.distributions
tfl = tf.layers
class Model(object):
def __init__(self, data, config):
# Initialize attributes
self.data = data
self.data_shape = list(self.data.shape[1:])
self.config = config
# Build model
self.prior
self.posterior
self.code
self.likelihood
self.sample
self.samples
self.log_prob
self.divergence
self.elbo
self.loss
self.optimiser
self.gradients
self.optimise
# Define summaries
self.summary
self.images
@lazy_property_with_scope
def prior(self):
"""Standard normal distribution prior."""
return tfd.MultivariateNormalDiag(
loc=tf.zeros(self.config.code_size),
scale_diag=tf.ones(self.config.code_size))
@lazy_property_with_scope(scope_name="encoder")
def posterior(self):
"""a.k.a the encoder"""
x = tfl.Flatten()(self.data)
x = tfl.Dense(self.config.hidden_size, activation='relu')(x)
x = tfl.Dense(self.config.hidden_size, activation='relu')(x)
loc = tfl.Dense(self.config.code_size)(x)
scale = tfl.Dense(self.config.code_size, activation='softplus')(x)
return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale)
@lazy_property_with_scope
def code(self):
"""Code sample from the posterior."""
return self.posterior.sample()
@lazy_property_with_scope(scope_name="decoder")
def likelihood(self):
"""a.k.a the decoder."""
return self._make_decoder(self.code)
@lazy_property_with_scope
def sample(self):
"""Sample example."""
return self._make_decoder(self.prior.sample(1))
@lazy_property_with_scope
def samples(self):
"""Generated examples."""
return self._make_decoder(self.prior.sample(self.config.n_samples)).mean()
@lazy_property_with_scope
def log_prob(self):
"""Log. likelihood of data under code sampled from posterior."""
return self.likelihood.log_prob(self.data)
@lazy_property_with_scope
def divergence(self):
"""KL divergence between posterior and prior."""
return tfd.kl_divergence(self.posterior, self.prior)
@lazy_property_with_scope
def elbo(self):
"""Evidence lower bound with a Lagrangian multiplier beta."""
return self.log_prob - self.config.beta * self.divergence
@lazy_property_with_scope
def loss(self):
"""Negative ELBO reduced over the whole batch and every pixel."""
return -tf.reduce_mean(self.elbo)
@lazy_property_with_scope
def optimiser(self):
"""ADAM optimiser."""
return tf.train.AdamOptimizer(self.config.learning_rate)
@lazy_property_with_scope
def gradients(self):
"""Variables values and gradients of the loss (negative ELBO)."""
return self.optimiser.compute_gradients(self.loss)
@lazy_property_with_scope
def optimise(self):
"""Optimise the loss op. (apply gradients)."""
return self.optimiser.apply_gradients(self.gradients)
@lazy_property_with_scope
def summary(self):
"""Merged the model's summaries."""
return tf.summary.merge(self._define_summaries())
@lazy_property_with_scope
def images(self):
"""Image summary of generated examples."""
images = tf.reshape(self.samples, (-1, self.samples.shape[2])) # Create col. of images
images = tf.expand_dims(images, axis=0) # Add batch dim.
images = tf.expand_dims(images, axis=-1) # Add channel dim.
return tf.summary.image("samples", images, max_outputs=1)
@share_variables
def _make_decoder(self, code):
"""Build decoder network."""
x = tfl.Dense(self.config.hidden_size, activation='relu')(code)
x = tfl.Dense(self.config.hidden_size, activation='relu')(x)
logits = tfl.Dense(np.product(self.data_shape))(x)
logits = tf.reshape(logits, [-1] + self.data_shape)
return tfd.Independent(tfd.Bernoulli(logits), 2)
def _define_summaries(self):
"""Define the model's summaries."""
summaries = []
# Learning rate
summaries.append(tf.summary.scalar("learning_rate",
self.optimiser._lr))
# ELBO and loss
summaries.append(tf.summary.histogram("evidence/lower_bound_log_prob/image",
self.elbo))
summaries.append(tf.summary.scalar("mean/evidence/lower_bound_log_prob/image",
tf.reduce_mean(self.elbo)))
summaries.append(tf.summary.scalar("loss",
self.loss))
# KL divergence
summaries.append(tf.summary.histogram("divergence",
self.divergence))
summaries.append(tf.summary.scalar("mean/divergence",
tf.reduce_mean(self.divergence)))
# Gradients and variables norm
gradients, variables = list(zip(*self.gradients))
for gradient, variable in zip(gradients, variables):
summaries.append(tf.summary.histogram("gradients/batch_norm/" + variable.name,
tf.norm(gradient, axis=0)))
summaries.append(tf.summary.histogram("variables/batch_norm/" + variable.name,
tf.norm(variable, axis=0)))
summaries.append(tf.summary.scalar("gradients/global_norm",
tf.global_norm(gradients)))
summaries.append(tf.summary.scalar("variables/global_norm",
tf.global_norm(variables)))
# Prior and posterior entropy
summaries.append(tf.summary.histogram("prior/entropy",
self.prior.entropy()))
summaries.append(tf.summary.scalar("mean/prior/entropy",
tf.reduce_mean(self.prior.entropy())))
summaries.append(tf.summary.histogram("posterior/entropy",
self.posterior.entropy()))
summaries.append(tf.summary.scalar("mean/posterior/entropy",
tf.reduce_mean(self.posterior.entropy())))
# Prior and posterior log_prob
summaries.append(tf.summary.histogram("prior/log_prob/image",
self.sample.log_prob(self.data)))
summaries.append(tf.summary.scalar("mean/prior/log_prob/image",
tf.reduce_mean(self.sample.log_prob(self.data))))
summaries.append(tf.summary.histogram("posterior/log_prob/image",
self.log_prob))
summaries.append(tf.summary.scalar("mean/posterior/log_prob/image",
tf.reduce_mean(self.log_prob)))
return summaries
def plot_codes(codes, labels):
# Scatter plot
fig, ax = plt.subplots()
ax.scatter(codes[:, 0], codes[:, 1], s=2, c=labels, alpha=0.1)
ax.set_aspect('equal')
ax.set_xlim(codes.min() - .1, codes.max() + .1)
ax.set_ylim(codes.min() - .1, codes.max() + .1)
ax.tick_params(
axis='both', which='both', left=False, bottom=False,
labelleft=False, labelbottom=False)
# Save to io buffer
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
# Create image summary
image = tf.Summary.Image(encoded_image_string=buf.getvalue())
summary = tf.Summary(value=[tf.Summary.Value(tag="images/codes/image", image=image)])
return summary
def create_datasets(train_set, test_set, config):
train_dataset = tf.data.Dataset.from_tensor_slices(
tf.convert_to_tensor(train_set, dtype=tf.float32)) \
.map(lambda x: x / 255) \
.shuffle(train_set.shape[0]) \
.batch(config.batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices(
tf.convert_to_tensor(test_set, dtype=tf.float32)) \
.map(lambda x: x / 255) \
.batch(test_set.shape[0])
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
next_batch = iterator.get_next()
train_init_op = iterator.make_initializer(train_dataset)
test_init_op = iterator.make_initializer(test_dataset)
return next_batch, train_init_op, test_init_op
def train(model, train_init_op, test_init_op, test_labels, config):
with tf.train.MonitoredSession() as sess:
summary_writer_train = tf.summary.FileWriter(
os.path.join(config.logs_dir, "train"), sess.graph)
summary_writer_test = tf.summary.FileWriter(
os.path.join(config.logs_dir, "test"))
step = 0
for epoch in tqdm(range(config.epochs)):
# Test
sess.run(test_init_op)
test_summary, test_images, test_codes = sess.run(
[model.summary, model.images, model.code])
summary_writer_test.add_summary(test_summary, step)
summary_writer_test.add_summary(test_images, step)
# Plot codes
# TODO: Use TensorBoard projector.
codes = plot_codes(test_codes, test_labels)
summary_writer_test.add_summary(codes, step)
# Train
# TODO: Add tfu.loop that will run whole epoch, have callbacks and reduce returns.
sess.run(train_init_op)
while True:
try:
fetches = AttrDict({"optimise": model.optimise})
if step % config.log_every == 0:
fetches.summary = model.summary
returns = sess.run(fetches)
if "summary" in returns:
summary_writer_train.add_summary(returns.summary, step)
step += 1
except tf.errors.OutOfRangeError:
break
summary_writer_train.close()
summary_writer_test.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train VAE for MNIST dataset.")
parser.add_argument('--config', type=str, default="", help="YAML formatted configuration")
user_config_json = parser.parse_args().config
default_config = AttrDict({
"batch_size": 100,
"epochs": 20,
"n_samples": 10,
"hidden_size": 200,
"code_size": 2,
"beta": 1.,
"learning_rate": 0.001,
"logs_dir": "./logs",
"log_every": 100
})
config = default_config.nested_update(attrdict_from_yaml(user_config_json))
(train_set, _), (test_set, test_labels) = tf.keras.datasets.mnist.load_data()
# TODO: Use whole test set, but batch it like train set and average summaries.
# https://stackoverflow.com/questions/40788785/how-to-average-summaries-over-multiple-batches
train_set, test_set, test_labels = train_set[:], test_set[:5000], test_labels[:5000]
next_batch, train_init_op, test_init_op = create_datasets(train_set, test_set, config)
model = Model(next_batch, config)
train(model, train_init_op, test_init_op, test_labels, config)
| 38.452769 | 103 | 0.607624 | import argparse
import io
import os.path
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tqdm import tqdm
from tf_utils import AttrDict, attrdict_from_yaml, lazy_property_with_scope, share_variables
tfd = tfp.distributions
tfl = tf.layers
class Model(object):
def __init__(self, data, config):
self.data = data
self.data_shape = list(self.data.shape[1:])
self.config = config
self.prior
self.posterior
self.code
self.likelihood
self.sample
self.samples
self.log_prob
self.divergence
self.elbo
self.loss
self.optimiser
self.gradients
self.optimise
self.summary
self.images
@lazy_property_with_scope
def prior(self):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(self.config.code_size),
scale_diag=tf.ones(self.config.code_size))
@lazy_property_with_scope(scope_name="encoder")
def posterior(self):
x = tfl.Flatten()(self.data)
x = tfl.Dense(self.config.hidden_size, activation='relu')(x)
x = tfl.Dense(self.config.hidden_size, activation='relu')(x)
loc = tfl.Dense(self.config.code_size)(x)
scale = tfl.Dense(self.config.code_size, activation='softplus')(x)
return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale)
@lazy_property_with_scope
def code(self):
return self.posterior.sample()
@lazy_property_with_scope(scope_name="decoder")
def likelihood(self):
return self._make_decoder(self.code)
@lazy_property_with_scope
def sample(self):
return self._make_decoder(self.prior.sample(1))
@lazy_property_with_scope
def samples(self):
return self._make_decoder(self.prior.sample(self.config.n_samples)).mean()
@lazy_property_with_scope
def log_prob(self):
return self.likelihood.log_prob(self.data)
@lazy_property_with_scope
def divergence(self):
return tfd.kl_divergence(self.posterior, self.prior)
@lazy_property_with_scope
def elbo(self):
return self.log_prob - self.config.beta * self.divergence
@lazy_property_with_scope
def loss(self):
return -tf.reduce_mean(self.elbo)
@lazy_property_with_scope
def optimiser(self):
return tf.train.AdamOptimizer(self.config.learning_rate)
@lazy_property_with_scope
def gradients(self):
return self.optimiser.compute_gradients(self.loss)
@lazy_property_with_scope
def optimise(self):
return self.optimiser.apply_gradients(self.gradients)
@lazy_property_with_scope
def summary(self):
return tf.summary.merge(self._define_summaries())
@lazy_property_with_scope
def images(self):
images = tf.reshape(self.samples, (-1, self.samples.shape[2]))
images = tf.expand_dims(images, axis=0)
images = tf.expand_dims(images, axis=-1)
return tf.summary.image("samples", images, max_outputs=1)
@share_variables
def _make_decoder(self, code):
x = tfl.Dense(self.config.hidden_size, activation='relu')(code)
x = tfl.Dense(self.config.hidden_size, activation='relu')(x)
logits = tfl.Dense(np.product(self.data_shape))(x)
logits = tf.reshape(logits, [-1] + self.data_shape)
return tfd.Independent(tfd.Bernoulli(logits), 2)
def _define_summaries(self):
summaries = []
summaries.append(tf.summary.scalar("learning_rate",
self.optimiser._lr))
summaries.append(tf.summary.histogram("evidence/lower_bound_log_prob/image",
self.elbo))
summaries.append(tf.summary.scalar("mean/evidence/lower_bound_log_prob/image",
tf.reduce_mean(self.elbo)))
summaries.append(tf.summary.scalar("loss",
self.loss))
summaries.append(tf.summary.histogram("divergence",
self.divergence))
summaries.append(tf.summary.scalar("mean/divergence",
tf.reduce_mean(self.divergence)))
gradients, variables = list(zip(*self.gradients))
for gradient, variable in zip(gradients, variables):
summaries.append(tf.summary.histogram("gradients/batch_norm/" + variable.name,
tf.norm(gradient, axis=0)))
summaries.append(tf.summary.histogram("variables/batch_norm/" + variable.name,
tf.norm(variable, axis=0)))
summaries.append(tf.summary.scalar("gradients/global_norm",
tf.global_norm(gradients)))
summaries.append(tf.summary.scalar("variables/global_norm",
tf.global_norm(variables)))
summaries.append(tf.summary.histogram("prior/entropy",
self.prior.entropy()))
summaries.append(tf.summary.scalar("mean/prior/entropy",
tf.reduce_mean(self.prior.entropy())))
summaries.append(tf.summary.histogram("posterior/entropy",
self.posterior.entropy()))
summaries.append(tf.summary.scalar("mean/posterior/entropy",
tf.reduce_mean(self.posterior.entropy())))
summaries.append(tf.summary.histogram("prior/log_prob/image",
self.sample.log_prob(self.data)))
summaries.append(tf.summary.scalar("mean/prior/log_prob/image",
tf.reduce_mean(self.sample.log_prob(self.data))))
summaries.append(tf.summary.histogram("posterior/log_prob/image",
self.log_prob))
summaries.append(tf.summary.scalar("mean/posterior/log_prob/image",
tf.reduce_mean(self.log_prob)))
return summaries
def plot_codes(codes, labels):
fig, ax = plt.subplots()
ax.scatter(codes[:, 0], codes[:, 1], s=2, c=labels, alpha=0.1)
ax.set_aspect('equal')
ax.set_xlim(codes.min() - .1, codes.max() + .1)
ax.set_ylim(codes.min() - .1, codes.max() + .1)
ax.tick_params(
axis='both', which='both', left=False, bottom=False,
labelleft=False, labelbottom=False)
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
image = tf.Summary.Image(encoded_image_string=buf.getvalue())
summary = tf.Summary(value=[tf.Summary.Value(tag="images/codes/image", image=image)])
return summary
def create_datasets(train_set, test_set, config):
train_dataset = tf.data.Dataset.from_tensor_slices(
tf.convert_to_tensor(train_set, dtype=tf.float32)) \
.map(lambda x: x / 255) \
.shuffle(train_set.shape[0]) \
.batch(config.batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices(
tf.convert_to_tensor(test_set, dtype=tf.float32)) \
.map(lambda x: x / 255) \
.batch(test_set.shape[0])
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
next_batch = iterator.get_next()
train_init_op = iterator.make_initializer(train_dataset)
test_init_op = iterator.make_initializer(test_dataset)
return next_batch, train_init_op, test_init_op
def train(model, train_init_op, test_init_op, test_labels, config):
with tf.train.MonitoredSession() as sess:
summary_writer_train = tf.summary.FileWriter(
os.path.join(config.logs_dir, "train"), sess.graph)
summary_writer_test = tf.summary.FileWriter(
os.path.join(config.logs_dir, "test"))
step = 0
for epoch in tqdm(range(config.epochs)):
sess.run(test_init_op)
test_summary, test_images, test_codes = sess.run(
[model.summary, model.images, model.code])
summary_writer_test.add_summary(test_summary, step)
summary_writer_test.add_summary(test_images, step)
codes = plot_codes(test_codes, test_labels)
summary_writer_test.add_summary(codes, step)
sess.run(train_init_op)
while True:
try:
fetches = AttrDict({"optimise": model.optimise})
if step % config.log_every == 0:
fetches.summary = model.summary
returns = sess.run(fetches)
if "summary" in returns:
summary_writer_train.add_summary(returns.summary, step)
step += 1
except tf.errors.OutOfRangeError:
break
summary_writer_train.close()
summary_writer_test.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train VAE for MNIST dataset.")
parser.add_argument('--config', type=str, default="", help="YAML formatted configuration")
user_config_json = parser.parse_args().config
default_config = AttrDict({
"batch_size": 100,
"epochs": 20,
"n_samples": 10,
"hidden_size": 200,
"code_size": 2,
"beta": 1.,
"learning_rate": 0.001,
"logs_dir": "./logs",
"log_every": 100
})
config = default_config.nested_update(attrdict_from_yaml(user_config_json))
(train_set, _), (test_set, test_labels) = tf.keras.datasets.mnist.load_data()
train_set, test_set, test_labels = train_set[:], test_set[:5000], test_labels[:5000]
next_batch, train_init_op, test_init_op = create_datasets(train_set, test_set, config)
model = Model(next_batch, config)
train(model, train_init_op, test_init_op, test_labels, config)
| true | true |
f735d53be2e6d930f20c1daa49019a1b2e41a6e4 | 1,898 | py | Python | General/state_machine.py | TomE8/drones | c92865556dd3df2d5f5b73589cd48e413bff3a3a | [
"MIT"
] | 14 | 2018-10-29T00:52:18.000Z | 2022-03-23T20:07:11.000Z | General/state_machine.py | TomE8/drones | c92865556dd3df2d5f5b73589cd48e413bff3a3a | [
"MIT"
] | 4 | 2020-07-12T05:19:05.000Z | 2020-09-20T12:40:47.000Z | General/state_machine.py | TomE8/drones | c92865556dd3df2d5f5b73589cd48e413bff3a3a | [
"MIT"
] | 2 | 2019-03-08T01:36:47.000Z | 2019-09-12T04:07:19.000Z | #!/usr/bin/env python3
from .general_common import States
from Control.control_common import ButtonIndex
def getState(state, my_joystick):
# ==============================================================
if state == States.IDLE:
if my_joystick.get_button_val(ButtonIndex.SIDE_BUTTON) == 1:
return States.STAND_BY
elif my_joystick.get_button_val(ButtonIndex.EXIT) == 1:
return States.STOP_BEFORE_EXIT
# ==============================================================
elif state==States.STAND_BY:
if my_joystick.get_button_val(ButtonIndex.TRIGGER) == 1:
return States.MANUAL_CONTROL
elif my_joystick.get_button_val(ButtonIndex.SIDE_BUTTON) == 1:
return States.STOP
elif my_joystick.get_button_val(ButtonIndex.EXIT) == 1:
return States.STOP_BEFORE_EXIT
# ==============================================================
elif state==States.MANUAL_CONTROL:
if my_joystick.get_button_val(ButtonIndex.HOVERING) == 1:
return States.HOVERING
if my_joystick.get_button_val(ButtonIndex.TRIGGER) == 1:
return States.STOP
if my_joystick.get_button_val(ButtonIndex.EXIT) == 1:
return States.STOP_BEFORE_EXIT
elif state==States.HOVERING:
if my_joystick.get_button_val(ButtonIndex.HOVERING) == 1:
return States.MANUAL_CONTROL
if my_joystick.get_button_val(ButtonIndex.TRIGGER) == 1:
return States.STOP
if my_joystick.get_button_val(ButtonIndex.EXIT) == 1:
return States.STOP_BEFORE_EXIT
# ==============================================================
elif state==States.STOP:
return States.IDLE
# ==============================================================
elif state == States.STOP_BEFORE_EXIT:
return States.EXIT
return state
| 45.190476 | 70 | 0.559536 |
from .general_common import States
from Control.control_common import ButtonIndex
def getState(state, my_joystick):
if state == States.IDLE:
if my_joystick.get_button_val(ButtonIndex.SIDE_BUTTON) == 1:
return States.STAND_BY
elif my_joystick.get_button_val(ButtonIndex.EXIT) == 1:
return States.STOP_BEFORE_EXIT
elif state==States.STAND_BY:
if my_joystick.get_button_val(ButtonIndex.TRIGGER) == 1:
return States.MANUAL_CONTROL
elif my_joystick.get_button_val(ButtonIndex.SIDE_BUTTON) == 1:
return States.STOP
elif my_joystick.get_button_val(ButtonIndex.EXIT) == 1:
return States.STOP_BEFORE_EXIT
elif state==States.MANUAL_CONTROL:
if my_joystick.get_button_val(ButtonIndex.HOVERING) == 1:
return States.HOVERING
if my_joystick.get_button_val(ButtonIndex.TRIGGER) == 1:
return States.STOP
if my_joystick.get_button_val(ButtonIndex.EXIT) == 1:
return States.STOP_BEFORE_EXIT
elif state==States.HOVERING:
if my_joystick.get_button_val(ButtonIndex.HOVERING) == 1:
return States.MANUAL_CONTROL
if my_joystick.get_button_val(ButtonIndex.TRIGGER) == 1:
return States.STOP
if my_joystick.get_button_val(ButtonIndex.EXIT) == 1:
return States.STOP_BEFORE_EXIT
elif state==States.STOP:
return States.IDLE
elif state == States.STOP_BEFORE_EXIT:
return States.EXIT
return state
| true | true |
f735d6da81a04d6a2b61934e81a4273b20c11569 | 3,568 | py | Python | FasterRCNN/layers/roi_align.py | pkyIntelligence/FasterRCNN | 230953938efdba8f8c127fcc0bb746fcce8d9463 | [
"MIT"
] | null | null | null | FasterRCNN/layers/roi_align.py | pkyIntelligence/FasterRCNN | 230953938efdba8f8c127fcc0bb746fcce8d9463 | [
"MIT"
] | null | null | null | FasterRCNN/layers/roi_align.py | pkyIntelligence/FasterRCNN | 230953938efdba8f8c127fcc0bb746fcce8d9463 | [
"MIT"
] | null | null | null | import torch
import math
from torch import nn
from ..utils.utils import point_interpolate
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
"""
Args:
output_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sampling_ratio (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
Note:
point interpolate already accounts for alignment, just make sure the continuous coordinates are correct
"""
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
"""
Args:
input: NCHW images
rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy.
returns: ROIAligned output, shape = (B, Channels, self.output_size[0], self.output_size[1])
"""
assert rois.dim() == 2 and rois.size(1) == 5
batch_indices, rois_only = torch.split(rois, split_size_or_sections=[1, 4], dim=1)
batch_indices = batch_indices.squeeze().long()
rois_only = rois_only * self.spatial_scale
n_rois = len(batch_indices)
pooled_height = self.output_size[0]
pooled_width = self.output_size[1]
channels = input.shape[1]
output = input.new_zeros(size=(rois.shape[0], channels, pooled_height, pooled_width))
for i in range(n_rois):
batch_index = batch_indices[i]
roi = rois_only[i]
roi_start_w = roi[0]
roi_start_h = roi[1]
roi_end_w = roi[2]
roi_end_h = roi[3]
roi_width = roi_end_w - roi_start_w
roi_height = roi_end_h - roi_start_h
roi_width = max(roi_width, 1.)
roi_height = max(roi_height, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
roi_bin_grid_h = self.sampling_ratio if self.sampling_ratio > 0 else math.ceil(roi_height / pooled_height)
roi_bin_grid_w = self.sampling_ratio if self.sampling_ratio > 0 else math.ceil(roi_width / pooled_width)
count = max(roi_bin_grid_h * roi_bin_grid_w, 1)
# Construct Pooled ROI for all channels
for ph in range(pooled_height):
for pw in range(pooled_width):
pooled_sum = input.new_zeros(size=(channels, ))
for sample_h in range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + ((sample_h + 0.5) / roi_bin_grid_h) * bin_size_h
for sample_w in range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + ((sample_w + 0.5) / roi_bin_grid_w) * bin_size_w
sampled_point = point_interpolate(input[batch_index], torch.Tensor([x, y]))
pooled_sum = pooled_sum + sampled_point
output[i, :, ph, pw] = pooled_sum / count
return output
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ", aligned=" + str(self.aligned)
tmpstr += ")"
return tmpstr
| 36.783505 | 118 | 0.596693 | import torch
import math
from torch import nn
from ..utils.utils import point_interpolate
class ROIAlign(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
assert rois.dim() == 2 and rois.size(1) == 5
batch_indices, rois_only = torch.split(rois, split_size_or_sections=[1, 4], dim=1)
batch_indices = batch_indices.squeeze().long()
rois_only = rois_only * self.spatial_scale
n_rois = len(batch_indices)
pooled_height = self.output_size[0]
pooled_width = self.output_size[1]
channels = input.shape[1]
output = input.new_zeros(size=(rois.shape[0], channels, pooled_height, pooled_width))
for i in range(n_rois):
batch_index = batch_indices[i]
roi = rois_only[i]
roi_start_w = roi[0]
roi_start_h = roi[1]
roi_end_w = roi[2]
roi_end_h = roi[3]
roi_width = roi_end_w - roi_start_w
roi_height = roi_end_h - roi_start_h
roi_width = max(roi_width, 1.)
roi_height = max(roi_height, 1.)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
roi_bin_grid_h = self.sampling_ratio if self.sampling_ratio > 0 else math.ceil(roi_height / pooled_height)
roi_bin_grid_w = self.sampling_ratio if self.sampling_ratio > 0 else math.ceil(roi_width / pooled_width)
count = max(roi_bin_grid_h * roi_bin_grid_w, 1)
for ph in range(pooled_height):
for pw in range(pooled_width):
pooled_sum = input.new_zeros(size=(channels, ))
for sample_h in range(roi_bin_grid_h):
y = roi_start_h + ph * bin_size_h + ((sample_h + 0.5) / roi_bin_grid_h) * bin_size_h
for sample_w in range(roi_bin_grid_w):
x = roi_start_w + pw * bin_size_w + ((sample_w + 0.5) / roi_bin_grid_w) * bin_size_w
sampled_point = point_interpolate(input[batch_index], torch.Tensor([x, y]))
pooled_sum = pooled_sum + sampled_point
output[i, :, ph, pw] = pooled_sum / count
return output
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ", aligned=" + str(self.aligned)
tmpstr += ")"
return tmpstr
| true | true |
f735d7fc324285a6ec875c9f0d297b2c8400e2b9 | 3,995 | py | Python | tests/test_web_cli.py | ikamensh/aiohttp | d836d9b6b6050f9827e39997163a4b194a3a1314 | [
"Apache-2.0"
] | null | null | null | tests/test_web_cli.py | ikamensh/aiohttp | d836d9b6b6050f9827e39997163a4b194a3a1314 | [
"Apache-2.0"
] | null | null | null | tests/test_web_cli.py | ikamensh/aiohttp | d836d9b6b6050f9827e39997163a4b194a3a1314 | [
"Apache-2.0"
] | null | null | null | import pytest
from aiohttp import web
def test_entry_func_empty(mocker) -> None:
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
argv = [""]
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_only_module(mocker) -> None:
argv = ["test"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_only_function(mocker) -> None:
argv = [":test"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_only_separator(mocker) -> None:
argv = [":"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_relative_module(mocker) -> None:
argv = [".a.b:c"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("relative module names not supported")
def test_entry_func_non_existent_module(mocker) -> None:
argv = ["alpha.beta:func"]
mocker.patch("aiohttp.web.import_module", side_effect=ImportError("Test Error"))
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("unable to import alpha.beta: Test Error")
def test_entry_func_non_existent_attribute(mocker) -> None:
argv = ["alpha.beta:func"]
import_module = mocker.patch("aiohttp.web.import_module")
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
module = import_module("alpha.beta")
del module.func
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with(
"module alpha.beta has no attribute func"
)
def test_path_when_unsupported(mocker, monkeypatch) -> None:
argv = "--path=test_path.sock alpha.beta:func".split()
mocker.patch("aiohttp.web.import_module")
monkeypatch.delattr("socket.AF_UNIX", raising=False)
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with(
"file system paths not supported by your" " operating environment"
)
def test_entry_func_call(mocker) -> None:
mocker.patch("aiohttp.web.run_app")
import_module = mocker.patch("aiohttp.web.import_module")
argv = (
"-H testhost -P 6666 --extra-optional-eins alpha.beta:func "
"--extra-optional-zwei extra positional args"
).split()
module = import_module("alpha.beta")
with pytest.raises(SystemExit):
web.main(argv)
module.func.assert_called_with(
("--extra-optional-eins --extra-optional-zwei extra positional " "args").split()
)
def test_running_application(mocker) -> None:
run_app = mocker.patch("aiohttp.web.run_app")
import_module = mocker.patch("aiohttp.web.import_module")
exit = mocker.patch("aiohttp.web.ArgumentParser.exit", side_effect=SystemExit)
argv = (
"-H testhost -P 6666 --extra-optional-eins alpha.beta:func "
"--extra-optional-zwei extra positional args"
).split()
module = import_module("alpha.beta")
app = module.func()
with pytest.raises(SystemExit):
web.main(argv)
run_app.assert_called_with(app, host="testhost", port=6666, path=None)
exit.assert_called_with(message="Stopped\n")
| 30.730769 | 88 | 0.701377 | import pytest
from aiohttp import web
def test_entry_func_empty(mocker) -> None:
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
argv = [""]
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_only_module(mocker) -> None:
argv = ["test"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_only_function(mocker) -> None:
argv = [":test"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_only_separator(mocker) -> None:
argv = [":"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("'entry-func' not in 'module:function' syntax")
def test_entry_func_relative_module(mocker) -> None:
argv = [".a.b:c"]
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("relative module names not supported")
def test_entry_func_non_existent_module(mocker) -> None:
argv = ["alpha.beta:func"]
mocker.patch("aiohttp.web.import_module", side_effect=ImportError("Test Error"))
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with("unable to import alpha.beta: Test Error")
def test_entry_func_non_existent_attribute(mocker) -> None:
argv = ["alpha.beta:func"]
import_module = mocker.patch("aiohttp.web.import_module")
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
module = import_module("alpha.beta")
del module.func
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with(
"module alpha.beta has no attribute func"
)
def test_path_when_unsupported(mocker, monkeypatch) -> None:
argv = "--path=test_path.sock alpha.beta:func".split()
mocker.patch("aiohttp.web.import_module")
monkeypatch.delattr("socket.AF_UNIX", raising=False)
error = mocker.patch("aiohttp.web.ArgumentParser.error", side_effect=SystemExit)
with pytest.raises(SystemExit):
web.main(argv)
error.assert_called_with(
"file system paths not supported by your" " operating environment"
)
def test_entry_func_call(mocker) -> None:
mocker.patch("aiohttp.web.run_app")
import_module = mocker.patch("aiohttp.web.import_module")
argv = (
"-H testhost -P 6666 --extra-optional-eins alpha.beta:func "
"--extra-optional-zwei extra positional args"
).split()
module = import_module("alpha.beta")
with pytest.raises(SystemExit):
web.main(argv)
module.func.assert_called_with(
("--extra-optional-eins --extra-optional-zwei extra positional " "args").split()
)
def test_running_application(mocker) -> None:
run_app = mocker.patch("aiohttp.web.run_app")
import_module = mocker.patch("aiohttp.web.import_module")
exit = mocker.patch("aiohttp.web.ArgumentParser.exit", side_effect=SystemExit)
argv = (
"-H testhost -P 6666 --extra-optional-eins alpha.beta:func "
"--extra-optional-zwei extra positional args"
).split()
module = import_module("alpha.beta")
app = module.func()
with pytest.raises(SystemExit):
web.main(argv)
run_app.assert_called_with(app, host="testhost", port=6666, path=None)
exit.assert_called_with(message="Stopped\n")
| true | true |
f735d87a764933885e76c30d33d976d189285b6d | 16,617 | py | Python | mindhome_alpha/erpnext/accounts/report/general_ledger/general_ledger.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/accounts/report/general_ledger/general_ledger.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/accounts/report/general_ledger/general_ledger.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
from erpnext import get_company_currency, get_default_company
from erpnext.accounts.report.utils import get_currency, convert_to_presentation_currency
from frappe.utils import getdate, cstr, flt, fmt_money
from frappe import _, _dict
from erpnext.accounts.utils import get_account_currency
from erpnext.accounts.report.financial_statements import get_cost_centers_with_children
from six import iteritems
from erpnext.accounts.doctype.accounting_dimension.accounting_dimension import get_accounting_dimensions, get_dimension_with_children
from collections import OrderedDict
def execute(filters=None):
if not filters:
return [], []
account_details = {}
if filters and filters.get('print_in_account_currency') and \
not filters.get('account'):
frappe.throw(_("Select an account to print in account currency"))
for acc in frappe.db.sql("""select name, is_group from tabAccount""", as_dict=1):
account_details.setdefault(acc.name, acc)
if filters.get('party'):
filters.party = frappe.parse_json(filters.get("party"))
validate_filters(filters, account_details)
validate_party(filters)
filters = set_account_currency(filters)
columns = get_columns(filters)
res = get_result(filters, account_details)
return columns, res
def validate_filters(filters, account_details):
if not filters.get("company"):
frappe.throw(_("{0} is mandatory").format(_("Company")))
if not filters.get("from_date") and not filters.get("to_date"):
frappe.throw(_("{0} and {1} are mandatory").format(frappe.bold(_("From Date")), frappe.bold(_("To Date"))))
if filters.get("account") and not account_details.get(filters.account):
frappe.throw(_("Account {0} does not exists").format(filters.account))
if (filters.get("account") and filters.get("group_by") == _('Group by Account')
and account_details[filters.account].is_group == 0):
frappe.throw(_("Can not filter based on Account, if grouped by Account"))
if (filters.get("voucher_no")
and filters.get("group_by") in [_('Group by Voucher')]):
frappe.throw(_("Can not filter based on Voucher No, if grouped by Voucher"))
if filters.from_date > filters.to_date:
frappe.throw(_("From Date must be before To Date"))
if filters.get('project'):
filters.project = frappe.parse_json(filters.get('project'))
if filters.get('cost_center'):
filters.cost_center = frappe.parse_json(filters.get('cost_center'))
def validate_party(filters):
party_type, party = filters.get("party_type"), filters.get("party")
if party:
if not party_type:
frappe.throw(_("To filter based on Party, select Party Type first"))
else:
for d in party:
if not frappe.db.exists(party_type, d):
frappe.throw(_("Invalid {0}: {1}").format(party_type, d))
def set_account_currency(filters):
if filters.get("account") or (filters.get('party') and len(filters.party) == 1):
filters["company_currency"] = frappe.get_cached_value('Company', filters.company, "default_currency")
account_currency = None
if filters.get("account"):
account_currency = get_account_currency(filters.account)
elif filters.get("party"):
gle_currency = frappe.db.get_value(
"GL Entry", {
"party_type": filters.party_type, "party": filters.party[0], "company": filters.company
},
"account_currency"
)
if gle_currency:
account_currency = gle_currency
else:
account_currency = (None if filters.party_type in ["Employee", "Student", "Shareholder", "Member"] else
frappe.db.get_value(filters.party_type, filters.party[0], "default_currency"))
filters["account_currency"] = account_currency or filters.company_currency
if filters.account_currency != filters.company_currency and not filters.presentation_currency:
filters.presentation_currency = filters.account_currency
return filters
def get_result(filters, account_details):
accounting_dimensions = []
if filters.get("include_dimensions"):
accounting_dimensions = get_accounting_dimensions()
gl_entries = get_gl_entries(filters, accounting_dimensions)
data = get_data_with_opening_closing(filters, account_details,
accounting_dimensions, gl_entries)
result = get_result_as_list(data, filters)
return result
def get_gl_entries(filters, accounting_dimensions):
currency_map = get_currency(filters)
select_fields = """, debit, credit, debit_in_account_currency,
credit_in_account_currency """
order_by_statement = "order by posting_date, account, creation"
if filters.get("group_by") == _("Group by Voucher"):
order_by_statement = "order by posting_date, voucher_type, voucher_no"
if filters.get("include_default_book_entries"):
filters['company_fb'] = frappe.db.get_value("Company",
filters.get("company"), 'default_finance_book')
dimension_fields = ""
if accounting_dimensions:
dimension_fields = ', '.join(accounting_dimensions) + ','
distributed_cost_center_query = ""
if filters and filters.get('cost_center'):
select_fields_with_percentage = """, debit*(DCC_allocation.percentage_allocation/100) as debit, credit*(DCC_allocation.percentage_allocation/100) as credit, debit_in_account_currency*(DCC_allocation.percentage_allocation/100) as debit_in_account_currency,
credit_in_account_currency*(DCC_allocation.percentage_allocation/100) as credit_in_account_currency """
distributed_cost_center_query = """
UNION ALL
SELECT name as gl_entry,
posting_date,
account,
party_type,
party,
voucher_type,
voucher_no, {dimension_fields}
cost_center, project,
against_voucher_type,
against_voucher,
account_currency,
remarks, against,
is_opening, `tabGL Entry`.creation {select_fields_with_percentage}
FROM `tabGL Entry`,
(
SELECT parent, sum(percentage_allocation) as percentage_allocation
FROM `tabDistributed Cost Center`
WHERE cost_center IN %(cost_center)s
AND parent NOT IN %(cost_center)s
GROUP BY parent
) as DCC_allocation
WHERE company=%(company)s
{conditions}
AND posting_date <= %(to_date)s
AND cost_center = DCC_allocation.parent
""".format(dimension_fields=dimension_fields,select_fields_with_percentage=select_fields_with_percentage, conditions=get_conditions(filters).replace("and cost_center in %(cost_center)s ", ''))
gl_entries = frappe.db.sql(
"""
select
name as gl_entry, posting_date, account, party_type, party,
voucher_type, voucher_no, {dimension_fields}
cost_center, project,
against_voucher_type, against_voucher, account_currency,
remarks, against, is_opening, creation {select_fields}
from `tabGL Entry`
where company=%(company)s {conditions}
{distributed_cost_center_query}
{order_by_statement}
""".format(
dimension_fields=dimension_fields, select_fields=select_fields, conditions=get_conditions(filters), distributed_cost_center_query=distributed_cost_center_query,
order_by_statement=order_by_statement
),
filters, as_dict=1)
if filters.get('presentation_currency'):
return convert_to_presentation_currency(gl_entries, currency_map, filters.get('company'))
else:
return gl_entries
def get_conditions(filters):
conditions = []
if filters.get("account"):
lft, rgt = frappe.db.get_value("Account", filters["account"], ["lft", "rgt"])
conditions.append("""account in (select name from tabAccount
where lft>=%s and rgt<=%s and docstatus<2)""" % (lft, rgt))
if filters.get("cost_center"):
filters.cost_center = get_cost_centers_with_children(filters.cost_center)
conditions.append("cost_center in %(cost_center)s")
if filters.get("voucher_no"):
conditions.append("voucher_no=%(voucher_no)s")
if filters.get("group_by") == "Group by Party" and not filters.get("party_type"):
conditions.append("party_type in ('Customer', 'Supplier')")
if filters.get("party_type"):
conditions.append("party_type=%(party_type)s")
if filters.get("party"):
conditions.append("party in %(party)s")
if not (filters.get("account") or filters.get("party") or
filters.get("group_by") in ["Group by Account", "Group by Party"]):
conditions.append("posting_date >=%(from_date)s")
conditions.append("(posting_date <=%(to_date)s or is_opening = 'Yes')")
if filters.get("project"):
conditions.append("project in %(project)s")
if filters.get("finance_book"):
if filters.get("include_default_book_entries"):
conditions.append("(finance_book in (%(finance_book)s, %(company_fb)s, '') OR finance_book IS NULL)")
else:
conditions.append("finance_book in (%(finance_book)s)")
if not filters.get("show_cancelled_entries"):
conditions.append("is_cancelled = 0")
from frappe.desk.reportview import build_match_conditions
match_conditions = build_match_conditions("GL Entry")
if match_conditions:
conditions.append(match_conditions)
accounting_dimensions = get_accounting_dimensions(as_list=False)
if accounting_dimensions:
for dimension in accounting_dimensions:
if filters.get(dimension.fieldname):
if frappe.get_cached_value('DocType', dimension.document_type, 'is_tree'):
filters[dimension.fieldname] = get_dimension_with_children(dimension.document_type,
filters.get(dimension.fieldname))
conditions.append("{0} in %({0})s".format(dimension.fieldname))
else:
conditions.append("{0} in (%({0})s)".format(dimension.fieldname))
return "and {}".format(" and ".join(conditions)) if conditions else ""
def get_data_with_opening_closing(filters, account_details, accounting_dimensions, gl_entries):
data = []
gle_map = initialize_gle_map(gl_entries, filters)
totals, entries = get_accountwise_gle(filters, accounting_dimensions, gl_entries, gle_map)
# Opening for filtered account
data.append(totals.opening)
if filters.get("group_by") != _('Group by Voucher (Consolidated)'):
for acc, acc_dict in iteritems(gle_map):
# acc
if acc_dict.entries:
# opening
data.append({})
if filters.get("group_by") != _("Group by Voucher"):
data.append(acc_dict.totals.opening)
data += acc_dict.entries
# totals
data.append(acc_dict.totals.total)
# closing
if filters.get("group_by") != _("Group by Voucher"):
data.append(acc_dict.totals.closing)
data.append({})
else:
data += entries
# totals
data.append(totals.total)
# closing
data.append(totals.closing)
return data
def get_totals_dict():
def _get_debit_credit_dict(label):
return _dict(
account="'{0}'".format(label),
debit=0.0,
credit=0.0,
debit_in_account_currency=0.0,
credit_in_account_currency=0.0
)
return _dict(
opening = _get_debit_credit_dict(_('Opening')),
total = _get_debit_credit_dict(_('Total')),
closing = _get_debit_credit_dict(_('Closing (Opening + Total)'))
)
def group_by_field(group_by):
if group_by == _('Group by Party'):
return 'party'
elif group_by in [_('Group by Voucher (Consolidated)'), _('Group by Account')]:
return 'account'
else:
return 'voucher_no'
def initialize_gle_map(gl_entries, filters):
gle_map = OrderedDict()
group_by = group_by_field(filters.get('group_by'))
for gle in gl_entries:
gle_map.setdefault(gle.get(group_by), _dict(totals=get_totals_dict(), entries=[]))
return gle_map
def get_accountwise_gle(filters, accounting_dimensions, gl_entries, gle_map):
totals = get_totals_dict()
entries = []
consolidated_gle = OrderedDict()
group_by = group_by_field(filters.get('group_by'))
def update_value_in_dict(data, key, gle):
data[key].debit += flt(gle.debit)
data[key].credit += flt(gle.credit)
data[key].debit_in_account_currency += flt(gle.debit_in_account_currency)
data[key].credit_in_account_currency += flt(gle.credit_in_account_currency)
if data[key].against_voucher and gle.against_voucher:
data[key].against_voucher += ', ' + gle.against_voucher
from_date, to_date = getdate(filters.from_date), getdate(filters.to_date)
for gle in gl_entries:
if (gle.posting_date < from_date or
(cstr(gle.is_opening) == "Yes" and not filters.get("show_opening_entries"))):
update_value_in_dict(gle_map[gle.get(group_by)].totals, 'opening', gle)
update_value_in_dict(totals, 'opening', gle)
update_value_in_dict(gle_map[gle.get(group_by)].totals, 'closing', gle)
update_value_in_dict(totals, 'closing', gle)
elif gle.posting_date <= to_date:
update_value_in_dict(gle_map[gle.get(group_by)].totals, 'total', gle)
update_value_in_dict(totals, 'total', gle)
if filters.get("group_by") != _('Group by Voucher (Consolidated)'):
gle_map[gle.get(group_by)].entries.append(gle)
elif filters.get("group_by") == _('Group by Voucher (Consolidated)'):
keylist = [gle.get("voucher_type"), gle.get("voucher_no"), gle.get("account")]
for dim in accounting_dimensions:
keylist.append(gle.get(dim))
keylist.append(gle.get("cost_center"))
key = tuple(keylist)
if key not in consolidated_gle:
consolidated_gle.setdefault(key, gle)
else:
update_value_in_dict(consolidated_gle, key, gle)
update_value_in_dict(gle_map[gle.get(group_by)].totals, 'closing', gle)
update_value_in_dict(totals, 'closing', gle)
for key, value in consolidated_gle.items():
entries.append(value)
return totals, entries
def get_result_as_list(data, filters):
balance, balance_in_account_currency = 0, 0
inv_details = get_supplier_invoice_details()
for d in data:
if not d.get('posting_date'):
balance, balance_in_account_currency = 0, 0
balance = get_balance(d, balance, 'debit', 'credit')
d['balance'] = balance
d['account_currency'] = filters.account_currency
d['bill_no'] = inv_details.get(d.get('against_voucher'), '')
return data
def get_supplier_invoice_details():
inv_details = {}
for d in frappe.db.sql(""" select name, bill_no from `tabPurchase Invoice`
where docstatus = 1 and bill_no is not null and bill_no != '' """, as_dict=1):
inv_details[d.name] = d.bill_no
return inv_details
def get_balance(row, balance, debit_field, credit_field):
balance += (row.get(debit_field, 0) - row.get(credit_field, 0))
return balance
def get_columns(filters):
if filters.get("presentation_currency"):
currency = filters["presentation_currency"]
else:
if filters.get("company"):
currency = get_company_currency(filters["company"])
else:
company = get_default_company()
currency = get_company_currency(company)
columns = [
{
"label": _("GL Entry"),
"fieldname": "gl_entry",
"fieldtype": "Link",
"options": "GL Entry",
"hidden": 1
},
{
"label": _("Posting Date"),
"fieldname": "posting_date",
"fieldtype": "Date",
"width": 90
},
{
"label": _("Account"),
"fieldname": "account",
"fieldtype": "Link",
"options": "Account",
"width": 180
},
{
"label": _("Debit ({0})").format(currency),
"fieldname": "debit",
"fieldtype": "Float",
"width": 100
},
{
"label": _("Credit ({0})").format(currency),
"fieldname": "credit",
"fieldtype": "Float",
"width": 100
},
{
"label": _("Balance ({0})").format(currency),
"fieldname": "balance",
"fieldtype": "Float",
"width": 130
}
]
columns.extend([
{
"label": _("Voucher Type"),
"fieldname": "voucher_type",
"width": 120
},
{
"label": _("Voucher No"),
"fieldname": "voucher_no",
"fieldtype": "Dynamic Link",
"options": "voucher_type",
"width": 180
},
{
"label": _("Against Account"),
"fieldname": "against",
"width": 120
},
{
"label": _("Party Type"),
"fieldname": "party_type",
"width": 100
},
{
"label": _("Party"),
"fieldname": "party",
"width": 100
},
{
"label": _("Project"),
"options": "Project",
"fieldname": "project",
"width": 100
}
])
if filters.get("include_dimensions"):
for dim in get_accounting_dimensions(as_list = False):
columns.append({
"label": _(dim.label),
"options": dim.label,
"fieldname": dim.fieldname,
"width": 100
})
columns.extend([
{
"label": _("Cost Center"),
"options": "Cost Center",
"fieldname": "cost_center",
"width": 100
},
{
"label": _("Against Voucher Type"),
"fieldname": "against_voucher_type",
"width": 100
},
{
"label": _("Against Voucher"),
"fieldname": "against_voucher",
"fieldtype": "Dynamic Link",
"options": "against_voucher_type",
"width": 100
},
{
"label": _("Supplier Invoice No"),
"fieldname": "bill_no",
"fieldtype": "Data",
"width": 100
},
{
"label": _("Remarks"),
"fieldname": "remarks",
"width": 400
}
])
return columns
| 30.658672 | 257 | 0.719624 |
from __future__ import unicode_literals
import frappe, erpnext
from erpnext import get_company_currency, get_default_company
from erpnext.accounts.report.utils import get_currency, convert_to_presentation_currency
from frappe.utils import getdate, cstr, flt, fmt_money
from frappe import _, _dict
from erpnext.accounts.utils import get_account_currency
from erpnext.accounts.report.financial_statements import get_cost_centers_with_children
from six import iteritems
from erpnext.accounts.doctype.accounting_dimension.accounting_dimension import get_accounting_dimensions, get_dimension_with_children
from collections import OrderedDict
def execute(filters=None):
if not filters:
return [], []
account_details = {}
if filters and filters.get('print_in_account_currency') and \
not filters.get('account'):
frappe.throw(_("Select an account to print in account currency"))
for acc in frappe.db.sql("""select name, is_group from tabAccount""", as_dict=1):
account_details.setdefault(acc.name, acc)
if filters.get('party'):
filters.party = frappe.parse_json(filters.get("party"))
validate_filters(filters, account_details)
validate_party(filters)
filters = set_account_currency(filters)
columns = get_columns(filters)
res = get_result(filters, account_details)
return columns, res
def validate_filters(filters, account_details):
if not filters.get("company"):
frappe.throw(_("{0} is mandatory").format(_("Company")))
if not filters.get("from_date") and not filters.get("to_date"):
frappe.throw(_("{0} and {1} are mandatory").format(frappe.bold(_("From Date")), frappe.bold(_("To Date"))))
if filters.get("account") and not account_details.get(filters.account):
frappe.throw(_("Account {0} does not exists").format(filters.account))
if (filters.get("account") and filters.get("group_by") == _('Group by Account')
and account_details[filters.account].is_group == 0):
frappe.throw(_("Can not filter based on Account, if grouped by Account"))
if (filters.get("voucher_no")
and filters.get("group_by") in [_('Group by Voucher')]):
frappe.throw(_("Can not filter based on Voucher No, if grouped by Voucher"))
if filters.from_date > filters.to_date:
frappe.throw(_("From Date must be before To Date"))
if filters.get('project'):
filters.project = frappe.parse_json(filters.get('project'))
if filters.get('cost_center'):
filters.cost_center = frappe.parse_json(filters.get('cost_center'))
def validate_party(filters):
party_type, party = filters.get("party_type"), filters.get("party")
if party:
if not party_type:
frappe.throw(_("To filter based on Party, select Party Type first"))
else:
for d in party:
if not frappe.db.exists(party_type, d):
frappe.throw(_("Invalid {0}: {1}").format(party_type, d))
def set_account_currency(filters):
if filters.get("account") or (filters.get('party') and len(filters.party) == 1):
filters["company_currency"] = frappe.get_cached_value('Company', filters.company, "default_currency")
account_currency = None
if filters.get("account"):
account_currency = get_account_currency(filters.account)
elif filters.get("party"):
gle_currency = frappe.db.get_value(
"GL Entry", {
"party_type": filters.party_type, "party": filters.party[0], "company": filters.company
},
"account_currency"
)
if gle_currency:
account_currency = gle_currency
else:
account_currency = (None if filters.party_type in ["Employee", "Student", "Shareholder", "Member"] else
frappe.db.get_value(filters.party_type, filters.party[0], "default_currency"))
filters["account_currency"] = account_currency or filters.company_currency
if filters.account_currency != filters.company_currency and not filters.presentation_currency:
filters.presentation_currency = filters.account_currency
return filters
def get_result(filters, account_details):
accounting_dimensions = []
if filters.get("include_dimensions"):
accounting_dimensions = get_accounting_dimensions()
gl_entries = get_gl_entries(filters, accounting_dimensions)
data = get_data_with_opening_closing(filters, account_details,
accounting_dimensions, gl_entries)
result = get_result_as_list(data, filters)
return result
def get_gl_entries(filters, accounting_dimensions):
currency_map = get_currency(filters)
select_fields = """, debit, credit, debit_in_account_currency,
credit_in_account_currency """
order_by_statement = "order by posting_date, account, creation"
if filters.get("group_by") == _("Group by Voucher"):
order_by_statement = "order by posting_date, voucher_type, voucher_no"
if filters.get("include_default_book_entries"):
filters['company_fb'] = frappe.db.get_value("Company",
filters.get("company"), 'default_finance_book')
dimension_fields = ""
if accounting_dimensions:
dimension_fields = ', '.join(accounting_dimensions) + ','
distributed_cost_center_query = ""
if filters and filters.get('cost_center'):
select_fields_with_percentage = """, debit*(DCC_allocation.percentage_allocation/100) as debit, credit*(DCC_allocation.percentage_allocation/100) as credit, debit_in_account_currency*(DCC_allocation.percentage_allocation/100) as debit_in_account_currency,
credit_in_account_currency*(DCC_allocation.percentage_allocation/100) as credit_in_account_currency """
distributed_cost_center_query = """
UNION ALL
SELECT name as gl_entry,
posting_date,
account,
party_type,
party,
voucher_type,
voucher_no, {dimension_fields}
cost_center, project,
against_voucher_type,
against_voucher,
account_currency,
remarks, against,
is_opening, `tabGL Entry`.creation {select_fields_with_percentage}
FROM `tabGL Entry`,
(
SELECT parent, sum(percentage_allocation) as percentage_allocation
FROM `tabDistributed Cost Center`
WHERE cost_center IN %(cost_center)s
AND parent NOT IN %(cost_center)s
GROUP BY parent
) as DCC_allocation
WHERE company=%(company)s
{conditions}
AND posting_date <= %(to_date)s
AND cost_center = DCC_allocation.parent
""".format(dimension_fields=dimension_fields,select_fields_with_percentage=select_fields_with_percentage, conditions=get_conditions(filters).replace("and cost_center in %(cost_center)s ", ''))
gl_entries = frappe.db.sql(
"""
select
name as gl_entry, posting_date, account, party_type, party,
voucher_type, voucher_no, {dimension_fields}
cost_center, project,
against_voucher_type, against_voucher, account_currency,
remarks, against, is_opening, creation {select_fields}
from `tabGL Entry`
where company=%(company)s {conditions}
{distributed_cost_center_query}
{order_by_statement}
""".format(
dimension_fields=dimension_fields, select_fields=select_fields, conditions=get_conditions(filters), distributed_cost_center_query=distributed_cost_center_query,
order_by_statement=order_by_statement
),
filters, as_dict=1)
if filters.get('presentation_currency'):
return convert_to_presentation_currency(gl_entries, currency_map, filters.get('company'))
else:
return gl_entries
def get_conditions(filters):
conditions = []
if filters.get("account"):
lft, rgt = frappe.db.get_value("Account", filters["account"], ["lft", "rgt"])
conditions.append("""account in (select name from tabAccount
where lft>=%s and rgt<=%s and docstatus<2)""" % (lft, rgt))
if filters.get("cost_center"):
filters.cost_center = get_cost_centers_with_children(filters.cost_center)
conditions.append("cost_center in %(cost_center)s")
if filters.get("voucher_no"):
conditions.append("voucher_no=%(voucher_no)s")
if filters.get("group_by") == "Group by Party" and not filters.get("party_type"):
conditions.append("party_type in ('Customer', 'Supplier')")
if filters.get("party_type"):
conditions.append("party_type=%(party_type)s")
if filters.get("party"):
conditions.append("party in %(party)s")
if not (filters.get("account") or filters.get("party") or
filters.get("group_by") in ["Group by Account", "Group by Party"]):
conditions.append("posting_date >=%(from_date)s")
conditions.append("(posting_date <=%(to_date)s or is_opening = 'Yes')")
if filters.get("project"):
conditions.append("project in %(project)s")
if filters.get("finance_book"):
if filters.get("include_default_book_entries"):
conditions.append("(finance_book in (%(finance_book)s, %(company_fb)s, '') OR finance_book IS NULL)")
else:
conditions.append("finance_book in (%(finance_book)s)")
if not filters.get("show_cancelled_entries"):
conditions.append("is_cancelled = 0")
from frappe.desk.reportview import build_match_conditions
match_conditions = build_match_conditions("GL Entry")
if match_conditions:
conditions.append(match_conditions)
accounting_dimensions = get_accounting_dimensions(as_list=False)
if accounting_dimensions:
for dimension in accounting_dimensions:
if filters.get(dimension.fieldname):
if frappe.get_cached_value('DocType', dimension.document_type, 'is_tree'):
filters[dimension.fieldname] = get_dimension_with_children(dimension.document_type,
filters.get(dimension.fieldname))
conditions.append("{0} in %({0})s".format(dimension.fieldname))
else:
conditions.append("{0} in (%({0})s)".format(dimension.fieldname))
return "and {}".format(" and ".join(conditions)) if conditions else ""
def get_data_with_opening_closing(filters, account_details, accounting_dimensions, gl_entries):
data = []
gle_map = initialize_gle_map(gl_entries, filters)
totals, entries = get_accountwise_gle(filters, accounting_dimensions, gl_entries, gle_map)
data.append(totals.opening)
if filters.get("group_by") != _('Group by Voucher (Consolidated)'):
for acc, acc_dict in iteritems(gle_map):
if acc_dict.entries:
data.append({})
if filters.get("group_by") != _("Group by Voucher"):
data.append(acc_dict.totals.opening)
data += acc_dict.entries
data.append(acc_dict.totals.total)
if filters.get("group_by") != _("Group by Voucher"):
data.append(acc_dict.totals.closing)
data.append({})
else:
data += entries
data.append(totals.total)
data.append(totals.closing)
return data
def get_totals_dict():
def _get_debit_credit_dict(label):
return _dict(
account="'{0}'".format(label),
debit=0.0,
credit=0.0,
debit_in_account_currency=0.0,
credit_in_account_currency=0.0
)
return _dict(
opening = _get_debit_credit_dict(_('Opening')),
total = _get_debit_credit_dict(_('Total')),
closing = _get_debit_credit_dict(_('Closing (Opening + Total)'))
)
def group_by_field(group_by):
if group_by == _('Group by Party'):
return 'party'
elif group_by in [_('Group by Voucher (Consolidated)'), _('Group by Account')]:
return 'account'
else:
return 'voucher_no'
def initialize_gle_map(gl_entries, filters):
gle_map = OrderedDict()
group_by = group_by_field(filters.get('group_by'))
for gle in gl_entries:
gle_map.setdefault(gle.get(group_by), _dict(totals=get_totals_dict(), entries=[]))
return gle_map
def get_accountwise_gle(filters, accounting_dimensions, gl_entries, gle_map):
totals = get_totals_dict()
entries = []
consolidated_gle = OrderedDict()
group_by = group_by_field(filters.get('group_by'))
def update_value_in_dict(data, key, gle):
data[key].debit += flt(gle.debit)
data[key].credit += flt(gle.credit)
data[key].debit_in_account_currency += flt(gle.debit_in_account_currency)
data[key].credit_in_account_currency += flt(gle.credit_in_account_currency)
if data[key].against_voucher and gle.against_voucher:
data[key].against_voucher += ', ' + gle.against_voucher
from_date, to_date = getdate(filters.from_date), getdate(filters.to_date)
for gle in gl_entries:
if (gle.posting_date < from_date or
(cstr(gle.is_opening) == "Yes" and not filters.get("show_opening_entries"))):
update_value_in_dict(gle_map[gle.get(group_by)].totals, 'opening', gle)
update_value_in_dict(totals, 'opening', gle)
update_value_in_dict(gle_map[gle.get(group_by)].totals, 'closing', gle)
update_value_in_dict(totals, 'closing', gle)
elif gle.posting_date <= to_date:
update_value_in_dict(gle_map[gle.get(group_by)].totals, 'total', gle)
update_value_in_dict(totals, 'total', gle)
if filters.get("group_by") != _('Group by Voucher (Consolidated)'):
gle_map[gle.get(group_by)].entries.append(gle)
elif filters.get("group_by") == _('Group by Voucher (Consolidated)'):
keylist = [gle.get("voucher_type"), gle.get("voucher_no"), gle.get("account")]
for dim in accounting_dimensions:
keylist.append(gle.get(dim))
keylist.append(gle.get("cost_center"))
key = tuple(keylist)
if key not in consolidated_gle:
consolidated_gle.setdefault(key, gle)
else:
update_value_in_dict(consolidated_gle, key, gle)
update_value_in_dict(gle_map[gle.get(group_by)].totals, 'closing', gle)
update_value_in_dict(totals, 'closing', gle)
for key, value in consolidated_gle.items():
entries.append(value)
return totals, entries
def get_result_as_list(data, filters):
balance, balance_in_account_currency = 0, 0
inv_details = get_supplier_invoice_details()
for d in data:
if not d.get('posting_date'):
balance, balance_in_account_currency = 0, 0
balance = get_balance(d, balance, 'debit', 'credit')
d['balance'] = balance
d['account_currency'] = filters.account_currency
d['bill_no'] = inv_details.get(d.get('against_voucher'), '')
return data
def get_supplier_invoice_details():
inv_details = {}
for d in frappe.db.sql(""" select name, bill_no from `tabPurchase Invoice`
where docstatus = 1 and bill_no is not null and bill_no != '' """, as_dict=1):
inv_details[d.name] = d.bill_no
return inv_details
def get_balance(row, balance, debit_field, credit_field):
balance += (row.get(debit_field, 0) - row.get(credit_field, 0))
return balance
def get_columns(filters):
if filters.get("presentation_currency"):
currency = filters["presentation_currency"]
else:
if filters.get("company"):
currency = get_company_currency(filters["company"])
else:
company = get_default_company()
currency = get_company_currency(company)
columns = [
{
"label": _("GL Entry"),
"fieldname": "gl_entry",
"fieldtype": "Link",
"options": "GL Entry",
"hidden": 1
},
{
"label": _("Posting Date"),
"fieldname": "posting_date",
"fieldtype": "Date",
"width": 90
},
{
"label": _("Account"),
"fieldname": "account",
"fieldtype": "Link",
"options": "Account",
"width": 180
},
{
"label": _("Debit ({0})").format(currency),
"fieldname": "debit",
"fieldtype": "Float",
"width": 100
},
{
"label": _("Credit ({0})").format(currency),
"fieldname": "credit",
"fieldtype": "Float",
"width": 100
},
{
"label": _("Balance ({0})").format(currency),
"fieldname": "balance",
"fieldtype": "Float",
"width": 130
}
]
columns.extend([
{
"label": _("Voucher Type"),
"fieldname": "voucher_type",
"width": 120
},
{
"label": _("Voucher No"),
"fieldname": "voucher_no",
"fieldtype": "Dynamic Link",
"options": "voucher_type",
"width": 180
},
{
"label": _("Against Account"),
"fieldname": "against",
"width": 120
},
{
"label": _("Party Type"),
"fieldname": "party_type",
"width": 100
},
{
"label": _("Party"),
"fieldname": "party",
"width": 100
},
{
"label": _("Project"),
"options": "Project",
"fieldname": "project",
"width": 100
}
])
if filters.get("include_dimensions"):
for dim in get_accounting_dimensions(as_list = False):
columns.append({
"label": _(dim.label),
"options": dim.label,
"fieldname": dim.fieldname,
"width": 100
})
columns.extend([
{
"label": _("Cost Center"),
"options": "Cost Center",
"fieldname": "cost_center",
"width": 100
},
{
"label": _("Against Voucher Type"),
"fieldname": "against_voucher_type",
"width": 100
},
{
"label": _("Against Voucher"),
"fieldname": "against_voucher",
"fieldtype": "Dynamic Link",
"options": "against_voucher_type",
"width": 100
},
{
"label": _("Supplier Invoice No"),
"fieldname": "bill_no",
"fieldtype": "Data",
"width": 100
},
{
"label": _("Remarks"),
"fieldname": "remarks",
"width": 400
}
])
return columns
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.