code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import riemann
from riemann import utils
from riemann.tx import shared
from riemann.tx.tx import TxIn, TxOut
from riemann.tx import zcash_shared as z
from typing import cast, Optional, Sequence, Tuple
class OverwinterTx(z.ZcashByteData):
tx_ins: Tuple[TxIn, ...]
tx_outs: Tuple[TxOut, ...]
lock_time: bytes
expiry_height: bytes
tx_joinsplits: Tuple[z.SproutJoinsplit, ...]
joinsplit_pubkey: Optional[bytes]
joinsplit_sig: Optional[bytes]
header: bytes
group_id: bytes
hsigs: Tuple[bytes, ...]
primary_inputs: Tuple[bytes, ...]
tx_id_le: bytes
tx_id: bytes
def __init__(self,
tx_ins: Sequence[TxIn],
tx_outs: Sequence[TxOut],
lock_time: bytes,
expiry_height: bytes,
tx_joinsplits: Sequence[z.SproutJoinsplit],
joinsplit_pubkey: Optional[bytes],
joinsplit_sig: Optional[bytes]):
super().__init__()
if 'overwinter' not in riemann.get_current_network_name():
raise ValueError(
'OverwinterTx not supported by network {}.'
.format(riemann.get_current_network_name()))
self.validate_bytes(lock_time, 4)
self.validate_bytes(expiry_height, 4)
if utils.le2i(expiry_height) > 499999999:
raise ValueError('Expiry time too high.'
'Expected <= 499999999. Got {}'
.format(utils.le2i(expiry_height)))
for tx_in in tx_ins:
if not isinstance(tx_in, TxIn):
raise ValueError(
'Invalid TxIn. '
'Expected instance of TxIn. Got {}'
.format(type(tx_in).__name__))
for tx_out in tx_outs:
if not isinstance(tx_out, TxOut):
raise ValueError(
'Invalid TxOut. '
'Expected instance of TxOut. Got {}'
.format(type(tx_out).__name__))
if len(tx_joinsplits) > 5:
raise ValueError('Too many joinsplits. Stop that.')
for tx_joinsplit in tx_joinsplits:
if not isinstance(tx_joinsplit, z.SproutJoinsplit):
raise ValueError(
'Invalid Joinsplit. '
'Expected instance of SproutJoinsplit. Got {}'
.format(type(tx_joinsplit).__name__))
if len(tx_joinsplits) != 0:
self.validate_bytes(joinsplit_pubkey, 32)
self.validate_bytes(joinsplit_sig, 64)
if len(tx_joinsplits) == 0 and len(tx_ins) == 0:
raise ValueError('Transaction must have tx_ins or joinsplits.')
self += b'\x03\x00\x00\x80' # Version 3 + fOverwintered
self += b'\x70\x82\xc4\x03' # Overwinter Group ID
self += shared.VarInt(len(tx_ins))
for tx_in in tx_ins:
self += tx_in
self += shared.VarInt(len(tx_outs))
for tx_out in tx_outs:
self += tx_out
self += lock_time
self += expiry_height
self += shared.VarInt(len(tx_joinsplits))
if len(tx_joinsplits) != 0:
for tx_joinsplit in tx_joinsplits:
self += tx_joinsplit
self += cast(bytes, joinsplit_pubkey)
self += cast(bytes, joinsplit_sig)
self.header = b'\x03\x00\x00\x80'
self.group_id = b'\x70\x82\xc4\x03'
self.version = b'\x03\x00'
self.tx_ins = tuple(tx_in for tx_in in tx_ins)
self.tx_outs = tuple(tx_out for tx_out in tx_outs)
self.lock_time = lock_time
self.expiry_height = expiry_height
if len(tx_joinsplits) != 0:
self.tx_joinsplits = tuple(js for js in tx_joinsplits)
self.joinsplit_pubkey = joinsplit_pubkey
self.joinsplit_sig = joinsplit_sig
# Zcash spec 5.4.1.4 Hsig hash function
self.hsigs = (tuple(self._hsig(i)
for i in range(len(self.tx_joinsplits))))
self.primary_inputs = (tuple(self._primary_input(i)
for i in range(len(self.tx_joinsplits))))
else:
self.tx_joinsplits = tuple()
self.joinsplit_pubkey = None
self.joinsplit_sig = None
self.hsigs = tuple()
self.primary_inputs = tuple()
self.tx_id_le = self.tx_id_le = utils.hash256(self.to_bytes())
self.tx_id = self.tx_id_le[::-1]
self._make_immutable()
if len(self) > 100000:
raise ValueError( # pragma: no cover
'Tx is too large. '
'Expect less than 100kB. Got: {} bytes'.format(len(self)))
def calculate_fee(self, input_values: Sequence[int]) -> int:
'''
Tx, list(int) -> int
'''
total_in = sum(input_values)
total_out = sum([utils.le2i(tx_out.value) for tx_out in self.tx_outs])
for js in self.tx_joinsplits:
total_in += utils.le2i(js.vpub_new)
total_out += utils.le2i(js.vpub_old)
return total_in - total_out
def copy(self,
tx_ins: Optional[Sequence[TxIn]] = None,
tx_outs: Optional[Sequence[TxOut]] = None,
lock_time: Optional[bytes] = None,
expiry_height: Optional[bytes] = None,
tx_joinsplits: Optional[Sequence[z.SproutJoinsplit]] = None,
joinsplit_pubkey: Optional[bytes] = None,
joinsplit_sig: Optional[bytes] = None) -> 'OverwinterTx':
'''
OverwinterTx, ... -> OverwinterTx
Makes a copy. Allows over-writing specific pieces.
'''
return OverwinterTx(
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
expiry_height=(expiry_height if expiry_height is not None
else self.expiry_height),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig))
def _hsig(self, index: int) -> bytes:
return utils.blake2b(
data=self._hsig_input(index),
digest_size=32,
person=b'ZcashComputehSig')
def _hsig_input(self, index: int) -> bytes:
'''
inputs for the hsig hash
'''
hsig_input = z.ZcashByteData()
hsig_input += self.tx_joinsplits[index].random_seed
hsig_input += self.tx_joinsplits[index].nullifiers
hsig_input += cast(bytes, self.joinsplit_pubkey)
return hsig_input.to_bytes()
def _primary_input(self, index: int) -> bytes:
'''
Primary input for the zkproof
'''
primary_input = z.ZcashByteData()
primary_input += self.tx_joinsplits[index].anchor
primary_input += self.tx_joinsplits[index].nullifiers
primary_input += self.tx_joinsplits[index].commitments
primary_input += self.tx_joinsplits[index].vpub_old
primary_input += self.tx_joinsplits[index].vpub_new
primary_input += self.hsigs[index]
primary_input += self.tx_joinsplits[index].vmacs
return primary_input.to_bytes()
@classmethod
def from_bytes(OverwinterTx, byte_string: bytes) -> 'OverwinterTx':
'''
byte-like -> OverwinterTx
'''
header = byte_string[0:4]
group_id = byte_string[4:8]
if header != b'\x03\x00\x00\x80' or group_id != b'\x70\x82\xc4\x03':
raise ValueError(
'Bad header or group ID. Expected {} and {}. Got: {} and {}'
.format(b'\x03\x00\x00\x80'.hex(),
b'\x70\x82\xc4\x03'.hex(),
header.hex(),
group_id.hex()))
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[8:])
current = 8 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
expiry_height = byte_string[current:current + 4]
current += 4
tx_joinsplits: Sequence[z.SproutJoinsplit]
if current == len(byte_string):
# No joinsplits
tx_joinsplits = tuple()
joinsplit_pubkey = None
joinsplit_sig = None
else:
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_joinsplits_num.number):
tx_joinsplit = z.SproutJoinsplit.from_bytes(
byte_string[current:])
current += len(tx_joinsplit)
tx_joinsplits.append(tx_joinsplit)
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
return OverwinterTx(
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
expiry_height=expiry_height,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig)
def is_witness(self) -> bool:
return False
def sighash_all(self, anyone_can_pay: bool = False, **kwargs) -> bytes:
return self.sighash(sighash_type=shared.SIGHASH_ALL, **kwargs)
def sighash_single(self, anyone_can_pay: bool = False, **kwargs) -> bytes:
return self.sighash(sighash_type=shared.SIGHASH_SINGLE, **kwargs)
def sighash(self,
sighash_type: int,
prevout_value: bytes,
script_code: bytes,
index: int = 0,
joinsplit: bool = False,
anyone_can_pay: bool = False) -> bytes:
'''
ZIP143
https://github.com/zcash/zips/blob/master/zip-0143.rst
'''
if joinsplit and anyone_can_pay:
raise ValueError('ANYONECANPAY can\'t be used with joinsplits')
data = z.ZcashByteData()
data += self.header
data += self.group_id
data += self._hash_prevouts(anyone_can_pay)
data += self._hash_sequence(sighash_type, anyone_can_pay)
data += self._hash_outputs(sighash_type, index)
data += self._hash_joinsplits()
data += self.lock_time
data += self.expiry_height
if anyone_can_pay:
sighash_type = sighash_type | shared.SIGHASH_ANYONECANPAY
data += utils.i2le_padded(sighash_type, 4)
if not joinsplit:
data += self.tx_ins[index].outpoint
data += script_code
data += prevout_value
data += self.tx_ins[index].sequence
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashSigHash' + bytes.fromhex('191ba85b')) # Branch ID
def _hash_prevouts(self, anyone_can_pay: bool) -> bytes:
if anyone_can_pay:
return b'\x00' * 32
data = z.ZcashByteData()
for tx_in in self.tx_ins:
data += tx_in.outpoint
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashPrevoutHash')
def _hash_sequence(self, sighash_type: int, anyone_can_pay: bool) -> bytes:
if anyone_can_pay or sighash_type == shared.SIGHASH_SINGLE:
return b'\x00' * 32
data = z.ZcashByteData()
for tx_in in self.tx_ins:
data += tx_in.sequence
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashSequencHash')
def _hash_outputs(self, sighash_type: int, index: int) -> bytes:
if sighash_type not in [shared.SIGHASH_ALL, shared.SIGHASH_SINGLE]:
return b'\x00' * 32
data = z.ZcashByteData()
if sighash_type == shared.SIGHASH_ALL:
for tx_out in self.tx_outs:
data += tx_out
if sighash_type == shared.SIGHASH_SINGLE:
if index > len(self.tx_outs):
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.')
data += self.tx_outs[index]
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashOutputsHash')
def _hash_joinsplits(self) -> bytes:
if len(self.tx_joinsplits) == 0:
return b'\x00' * 32
data = z.ZcashByteData()
for joinsplit in self.tx_joinsplits:
data += joinsplit
data += cast(bytes, self.joinsplit_pubkey)
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashJSplitsHash') | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/tx/overwinter.py | 0.756223 | 0.254243 | overwinter.py | pypi |
import riemann
from riemann import utils
from riemann.tx import shared
from riemann.tx.tx import TxIn, TxOut
from riemann.tx import zcash_shared as z
from typing import cast, Optional, Sequence, Tuple
class SproutTx(z.ZcashByteData):
tx_ins: Tuple[TxIn, ...]
tx_outs: Tuple[TxOut, ...]
lock_time: bytes
tx_joinsplits: Tuple[z.SproutJoinsplit, ...]
joinsplit_pubkey: Optional[bytes]
joinsplit_sig: Optional[bytes]
version: bytes
hsigs: Tuple[bytes, ...]
primary_inputs: Tuple[bytes, ...]
tx_id_le: bytes
tx_id: bytes
def __init__(self,
version: bytes,
tx_ins: Sequence[TxIn],
tx_outs: Sequence[TxOut],
lock_time: bytes,
tx_joinsplits: Sequence[z.SproutJoinsplit],
joinsplit_pubkey: Optional[bytes],
joinsplit_sig: Optional[bytes]):
super().__init__()
if 'sprout' not in riemann.get_current_network_name():
raise ValueError(
'SproutTx not supported by network {}.'
.format(riemann.get_current_network_name()))
self.validate_bytes(version, 4)
self.validate_bytes(lock_time, 4)
for tx_in in tx_ins:
if not isinstance(tx_in, TxIn):
raise ValueError(
'Invalid TxIn. '
'Expected instance of TxOut. Got {}'
.format(type(tx_in).__name__))
for tx_out in tx_outs:
if not isinstance(tx_out, TxOut):
raise ValueError(
'Invalid TxOut. '
'Expected instance of TxOut. Got {}'
.format(type(tx_out).__name__))
if utils.le2i(version) == 1:
if tx_joinsplits is not None and len(tx_joinsplits) != 0:
raise ValueError('Joinsplits not allowed in version 1 txns.')
if tx_ins is None or len(tx_ins) == 0:
raise ValueError('Version 1 txns must have at least 1 input.')
if utils.le2i(version) == 2:
if len(tx_joinsplits) > 5:
raise ValueError('Too many joinsplits. Stop that.')
for tx_joinsplit in tx_joinsplits:
if not isinstance(tx_joinsplit, z.SproutJoinsplit):
raise ValueError(
'Invalid Joinsplit. '
'Expected instance of SproutJoinsplit. Got {}'
.format(type(tx_joinsplit).__name__))
self.validate_bytes(joinsplit_pubkey, 32)
if joinsplit_sig is not None and joinsplit_sig != b'':
self.validate_bytes(joinsplit_sig, 64)
if utils.le2i(version) not in [1, 2]:
raise ValueError('Version must be 1 or 2. '
'Got: {}'.format(utils.le2i(version)))
self += version
self += shared.VarInt(len(tx_ins))
for tx_in in tx_ins:
self += tx_in
self += shared.VarInt(len(tx_outs))
for tx_out in tx_outs:
self += tx_out
self += lock_time
if version == utils.i2le_padded(2, 4):
self += shared.VarInt(len(tx_joinsplits))
for tx_joinsplit in tx_joinsplits:
self += tx_joinsplit
self += cast(bytes, joinsplit_pubkey)
self += cast(bytes, joinsplit_sig)
self.version = version
self.tx_ins = tuple(tx_in for tx_in in tx_ins)
self.tx_outs = tuple(tx_out for tx_out in tx_outs)
self.tx_joinsplits = tuple(js for js in tx_joinsplits)
self.lock_time = lock_time
if version == utils.i2le_padded(2, 4):
self.joinsplit_pubkey = joinsplit_pubkey
self.joinsplit_sig = joinsplit_sig
# Zcash spec 5.4.1.4 Hsig hash function
self.hsigs = (tuple(self._hsig(i)
for i in range(len(self.tx_joinsplits))))
self.primary_inputs = (tuple(self._primary_input(i)
for i in range(len(self.tx_joinsplits))))
else:
self.joinsplit_pubkey = None
self.joinsplit_sig = None
self.hsigs = tuple()
self.primary_inputs = tuple()
self.tx_id_le = utils.hash256(self.to_bytes())
self.tx_id = utils.hash256(self.to_bytes())[::-1]
self._make_immutable()
if len(self) > 100000:
raise ValueError( # pragma: no cover
'Tx is too large. '
'Expect less than 100kB. Got: {} bytes'.format(len(self)))
def _hsig(self, index: int) -> bytes:
return utils.blake2b(
data=self._hsig_input(index),
digest_size=32,
person=b'ZcashComputehSig')
def _hsig_input(self, index: int) -> bytes:
'''
inputs for the hsig hash
'''
hsig_input = z.ZcashByteData()
hsig_input += self.tx_joinsplits[index].random_seed
hsig_input += self.tx_joinsplits[index].nullifiers
hsig_input += cast(bytes, self.joinsplit_pubkey)
return hsig_input.to_bytes()
def _primary_input(self, index: int) -> bytes:
'''
Primary input for the zkproof
'''
primary_input = z.ZcashByteData()
primary_input += self.tx_joinsplits[index].anchor
primary_input += self.tx_joinsplits[index].nullifiers
primary_input += self.tx_joinsplits[index].commitments
primary_input += self.tx_joinsplits[index].vpub_old
primary_input += self.tx_joinsplits[index].vpub_new
primary_input += self.hsigs[index]
primary_input += self.tx_joinsplits[index].vmacs
return primary_input.to_bytes()
@classmethod
def from_bytes(SproutTx, byte_string: bytes) -> 'SproutTx':
'''
byte-like -> SproutTx
'''
version = byte_string[0:4]
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[4:])
current = 4 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
tx_joinsplits = None
joinsplit_pubkey = None
joinsplit_sig = None
if utils.le2i(version) == 2: # If we expect joinsplits
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_joinsplits_num)
for _ in range(tx_joinsplits_num.number):
joinsplit = z.SproutJoinsplit.from_bytes(byte_string[current:])
current += len(joinsplit)
tx_joinsplits.append(joinsplit)
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
return SproutTx(
version=version,
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig)
def calculate_fee(self, input_values: Sequence[int]) -> int:
'''
Tx, list(int) -> int
'''
total_in = sum(input_values)
total_out = sum([utils.le2i(tx_out.value) for tx_out in self.tx_outs])
for js in self.tx_joinsplits:
total_in += utils.le2i(js.vpub_new)
total_out += utils.le2i(js.vpub_old)
return total_in - total_out
def copy(self,
version: Optional[bytes] = None,
tx_ins: Optional[Sequence[TxIn]] = None,
tx_outs: Optional[Sequence[TxOut]] = None,
lock_time: Optional[bytes] = None,
tx_joinsplits: Optional[Sequence[z.SproutJoinsplit]] = None,
joinsplit_pubkey: Optional[bytes] = None,
joinsplit_sig: Optional[bytes] = None) -> 'SproutTx':
'''
SproutTx, ... -> SproutTx
Makes a copy. Allows over-writing specific pieces.
'''
return SproutTx(
version=version if version is not None else self.version,
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig))
def _sighash_prep(self, index: int, script: bytes) -> 'SproutTx':
'''
SproutTx, int, byte-like -> SproutTx
Sighashes suck
Performs the sighash setup described here:
https://en.bitcoin.it/wiki/OP_CHECKSIG#How_it_works
https://bitcoin.stackexchange.com/questions/3374/how-to-redeem-a-basic-tx
We save on complexity by refusing to support OP_CODESEPARATOR
'''
if len(self.tx_ins) == 0:
return self.copy(joinsplit_sig=b'')
# 0 out scripts in tx_ins
copy_tx_ins = [tx_in.copy(stack_script=b'', redeem_script=b'')
for tx_in in self.tx_ins]
# NB: The script for the current transaction input in txCopy is set to
# subScript (lead in by its length as a var-integer encoded!)
to_strip = shared.VarInt.from_bytes(script)
copy_tx_ins[index] = \
copy_tx_ins[index].copy(redeem_script=script[len(to_strip):])
return self.copy(tx_ins=copy_tx_ins, joinsplit_sig=b'')
def sighash_all(self,
index: int,
script: bytes,
anyone_can_pay: bool = False):
'''
SproutTx, int, byte-like, byte-like, bool -> bytearray
Sighashes suck
Generates the hash to be signed with SIGHASH_ALL
https://en.bitcoin.it/wiki/OP_CHECKSIG#Hashtype_SIGHASH_ALL_.28default.29
'''
copy_tx = self._sighash_prep(index=index, script=script)
if anyone_can_pay:
return self._sighash_anyone_can_pay(
index=index, copy_tx=copy_tx, sighash_type=shared.SIGHASH_ALL)
return self._sighash_final_hashing(copy_tx, shared.SIGHASH_ALL)
def sighash_single(self,
index: int,
script: bytes,
anyone_can_pay: bool = False):
'''
SproutTx, int, byte-like, byte-like, bool -> bytearray
Sighashes suck
Generates the hash to be signed with SIGHASH_SINGLE
https://en.bitcoin.it/wiki/OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_SINGLE
https://bitcoin.stackexchange.com/questions/3890/for-sighash-single-do-the-outputs-other-than-at-the-input-index-have-8-bytes-or
https://github.com/petertodd/python-bitcoinlib/blob/051ec4e28c1f6404fd46713c2810d4ebbed38de4/bitcoin/core/script.py#L913-L965
'''
if self.tx_joinsplits is not None:
raise ValueError('Sighash single not permitted with joinsplits.')
if index >= len(self.tx_outs):
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.')
copy_tx = self._sighash_prep(index=index, script=script)
# Remove outputs after the one we're signing
# Other tx_outs are set to -1 value and null scripts
copy_tx_outs = copy_tx.tx_outs[:index + 1]
copy_tx_outs = [TxOut(value=b'\xff' * 8, output_script=b'')
for _ in copy_tx.tx_ins] # Null them all
copy_tx_outs[index] = copy_tx.tx_outs[index] # Fix the current one
# Other tx_ins sequence numbers are set to 0
copy_tx_ins = [tx_in.copy(sequence=b'\x00\x00\x00\x00')
for tx_in in copy_tx.tx_ins] # Set all to 0
copy_tx_ins[index] = copy_tx.tx_ins[index] # Fix the current one
copy_tx = copy_tx.copy(
tx_ins=copy_tx_ins,
tx_outs=copy_tx_outs)
if anyone_can_pay: # Forward onwards
return self._sighash_anyone_can_pay(
index, copy_tx, shared.SIGHASH_SINGLE)
return self._sighash_final_hashing(copy_tx, shared.SIGHASH_SINGLE)
def _sighash_anyone_can_pay(
self,
index: int,
copy_tx: 'SproutTx',
sighash_type: int) -> bytes:
'''
int, SproutTx, int -> bytes
Applies SIGHASH_ANYONECANPAY procedure.
Should be called by another SIGHASH procedure.
Not on its own.
https://en.bitcoin.it/wiki/OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_ANYONECANPAY
'''
if self.tx_joinsplits is not None:
raise ValueError(
'Sighash anyonecanpay not permitted with joinsplits.')
# The txCopy input vector is resized to a length of one.
copy_tx_ins = [copy_tx.tx_ins[index]]
copy_tx = copy_tx.copy(tx_ins=copy_tx_ins)
return self._sighash_final_hashing(
copy_tx, sighash_type | shared.SIGHASH_ANYONECANPAY)
def _sighash_final_hashing(
self,
copy_tx: 'SproutTx',
sighash_type: int) -> bytes:
'''
SproutTx, int -> bytes
Returns the hash that should be signed
https://en.bitcoin.it/wiki/OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_ANYONECANPAY
'''
sighash = z.ZcashByteData()
sighash += copy_tx.to_bytes()
sighash += utils.i2le_padded(sighash_type, 4)
return utils.hash256(sighash.to_bytes()) | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/tx/sprout.py | 0.761982 | 0.268306 | sprout.py | pypi |
import riemann
from riemann import utils
from riemann.tx import shared
from riemann.tx.tx import TxIn, TxOut
from riemann.tx import zcash_shared as z
from typing import cast, Optional, Sequence, Tuple
class SaplingZkproof(z.ZcashByteData):
pi_sub_a: bytes
pi_sub_b: bytes
pi_sub_c: bytes
def __init__(self, pi_sub_a: bytes, pi_sub_b: bytes, pi_sub_c: bytes):
super().__init__()
self.validate_bytes(pi_sub_a, 48)
self.validate_bytes(pi_sub_b, 96)
self.validate_bytes(pi_sub_c, 48)
self += pi_sub_a
self += pi_sub_b
self += pi_sub_c
self.pi_sub_a = pi_sub_a
self.pi_sub_b = pi_sub_b
self.pi_sub_c = pi_sub_c
self._make_immutable()
@classmethod
def from_bytes(SaplingZkproof, byte_string: bytes) -> 'SaplingZkproof':
return SaplingZkproof(
pi_sub_a=byte_string[0:48],
pi_sub_b=byte_string[48:144],
pi_sub_c=byte_string[144:192])
class SaplingShieldedSpend(z.ZcashByteData):
cv: bytes
anchor: bytes
nullifier: bytes
rk: bytes
zkproof: SaplingZkproof
spend_auth_sig: bytes
def __init__(self,
cv: bytes,
anchor: bytes,
nullifier: bytes,
rk: bytes,
zkproof: SaplingZkproof,
spend_auth_sig: bytes):
super().__init__()
self.validate_bytes(cv, 32)
self.validate_bytes(anchor, 32)
self.validate_bytes(nullifier, 32)
self.validate_bytes(rk, 32)
self.validate_bytes(spend_auth_sig, 64)
if not isinstance(zkproof, SaplingZkproof):
raise ValueError(
'Invalid zkproof. '
'Expected instance of SaplingZkproof. Got {}'
.format(type(zkproof).__name__))
self += cv
self += anchor
self += nullifier
self += rk
self += zkproof
self += spend_auth_sig
self.cv = cv
self.anchor = anchor
self.nullifier = nullifier
self.rk = rk
self.zkproof = zkproof
self.spend_auth_sig = spend_auth_sig
self._make_immutable()
@classmethod
def from_bytes(SaplingShieldedSpend,
byte_string: bytes) -> 'SaplingShieldedSpend':
return SaplingShieldedSpend(
cv=byte_string[0:32],
anchor=byte_string[32:64],
nullifier=byte_string[64:96],
rk=byte_string[96:128],
zkproof=SaplingZkproof.from_bytes(byte_string[128:320]),
spend_auth_sig=byte_string[320:384])
class SaplingShieldedOutput(z.ZcashByteData):
cv: bytes
cmu: bytes
ephemeral_key: bytes
enc_ciphertext: bytes
out_ciphertext: bytes
zkproof: SaplingZkproof
def __init__(self,
cv: bytes,
cmu: bytes,
ephemeral_key: bytes,
enc_ciphertext: bytes,
out_ciphertext: bytes,
zkproof: SaplingZkproof):
super().__init__()
self.validate_bytes(cv, 32)
self.validate_bytes(cmu, 32)
self.validate_bytes(ephemeral_key, 32)
self.validate_bytes(enc_ciphertext, 580)
self.validate_bytes(out_ciphertext, 80)
if not isinstance(zkproof, SaplingZkproof):
raise ValueError(
'Invalid zkproof. '
'Expected instance of SaplingZkproof. Got {}'
.format(type(zkproof).__name__))
self += cv
self += cmu
self += ephemeral_key
self += enc_ciphertext
self += out_ciphertext
self += zkproof
self.cv = cv
self.cmu = cmu
self.ephemeral_key = ephemeral_key
self.enc_ciphertext = enc_ciphertext
self.out_ciphertext = out_ciphertext
self.zkproof = zkproof
self._make_immutable()
@classmethod
def from_bytes(
SaplingShieldedOutput,
byte_string: bytes) -> 'SaplingShieldedOutput':
return SaplingShieldedOutput(
cv=byte_string[0:32],
cmu=byte_string[32:64],
ephemeral_key=byte_string[64:96],
enc_ciphertext=byte_string[96:676],
out_ciphertext=byte_string[676:756],
zkproof=SaplingZkproof.from_bytes(byte_string[756:948]))
class SaplingJoinsplit(z.ZcashByteData):
vpub_old: bytes
vpub_new: bytes
anchor: bytes
nullifiers: bytes
commitments: bytes
ephemeral_key: bytes
random_seed: bytes
vmacs: bytes
zkproof: SaplingZkproof
encoded_notes: bytes
def __init__(self,
vpub_old: bytes,
vpub_new: bytes,
anchor: bytes,
nullifiers: bytes,
commitments: bytes,
ephemeral_key: bytes,
random_seed: bytes,
vmacs: bytes,
zkproof: SaplingZkproof,
encoded_notes: bytes):
super().__init__()
if not isinstance(zkproof, SaplingZkproof):
raise ValueError(
'Invalid zkproof. '
'Expected instance of SaplingZkproof. Got {}'
.format(type(zkproof).__name__))
if (utils.le2i(vpub_old) != 0 and utils.le2i(vpub_new) != 0):
raise ValueError('vpub_old or vpub_new must be zero')
self.validate_bytes(vpub_old, 8)
self.validate_bytes(vpub_new, 8)
self.validate_bytes(anchor, 32)
self.validate_bytes(nullifiers, 64)
self.validate_bytes(commitments, 64)
self.validate_bytes(ephemeral_key, 32)
self.validate_bytes(random_seed, 32)
self.validate_bytes(vmacs, 64)
self.validate_bytes(encoded_notes, 1202)
self += vpub_old
self += vpub_new
self += anchor
self += nullifiers
self += commitments
self += ephemeral_key
self += random_seed
self += vmacs
self += zkproof
self += encoded_notes
self.vpub_old = vpub_old
self.vpub_new = vpub_new
self.anchor = anchor
self.nullifiers = nullifiers
self.commitments = commitments
self.ephemeral_key = ephemeral_key
self.random_seed = random_seed
self.vmacs = vmacs
self.zkproof = zkproof
self.encoded_notes = encoded_notes
self._make_immutable()
@classmethod
def from_bytes(SaplingJoinsplit, byte_string: bytes) -> 'SaplingJoinsplit':
return SaplingJoinsplit(
vpub_old=byte_string[0:8],
vpub_new=byte_string[8:16],
anchor=byte_string[16:48],
nullifiers=byte_string[48:112],
commitments=byte_string[112:176],
ephemeral_key=byte_string[176:208],
random_seed=byte_string[208:240],
vmacs=byte_string[240:304],
zkproof=SaplingZkproof.from_bytes(byte_string[304:496]),
encoded_notes=byte_string[496:1698])
class SaplingTx(z.ZcashByteData):
tx_ins: Tuple[TxIn, ...]
tx_outs: Tuple[TxOut, ...]
lock_time: bytes
expiry_height: bytes
value_balance: bytes
tx_shielded_spends: Tuple[SaplingShieldedSpend, ...]
tx_shielded_outputs: Tuple[SaplingShieldedOutput, ...]
tx_joinsplits: Tuple[SaplingJoinsplit, ...]
joinsplit_pubkey: Optional[bytes]
joinsplit_sig: Optional[bytes]
binding_sig: Optional[bytes]
hsigs: Tuple[bytes, ...]
primary_inputs: Tuple[bytes, ...]
tx_id_le: bytes
tx_id: bytes
def __init__(self,
tx_ins: Sequence[TxIn],
tx_outs: Sequence[TxOut],
lock_time: bytes,
expiry_height: bytes,
value_balance: bytes,
tx_shielded_spends: Sequence[SaplingShieldedSpend],
tx_shielded_outputs: Sequence[SaplingShieldedOutput],
tx_joinsplits: Sequence[SaplingJoinsplit],
joinsplit_pubkey: Optional[bytes],
joinsplit_sig: Optional[bytes],
binding_sig: Optional[bytes]):
super().__init__()
if 'sapling' not in riemann.get_current_network_name():
raise ValueError(
'SaplingTx not supported by network {}.'
.format(riemann.get_current_network_name()))
self.validate_bytes(lock_time, 4)
self.validate_bytes(expiry_height, 4)
self.validate_bytes(value_balance, 8)
if utils.le2i(expiry_height) > 499999999:
raise ValueError('Expiry time too high.'
'Expected <= 499999999. Got {}'
.format(utils.le2i(expiry_height)))
if (len(tx_shielded_spends) + len(tx_shielded_outputs) == 0
and value_balance != b'\x00' * 8):
raise ValueError('If no shielded inputs or outputs, value balance '
'must be 8 0-bytes. Got {}'
.format(value_balance.hex()))
elif binding_sig is not None:
self.validate_bytes(binding_sig, 64)
for tx_in in tx_ins:
if not isinstance(tx_in, TxIn):
raise ValueError(
'Invalid TxIn. '
'Expected instance of TxOut. Got {}'
.format(type(tx_in).__name__))
for tx_out in tx_outs:
if not isinstance(tx_out, TxOut):
raise ValueError(
'Invalid TxOut. '
'Expected instance of TxOut. Got {}'
.format(type(tx_out).__name__))
if len(tx_joinsplits) > 5:
raise ValueError('Too many joinsplits. Stop that.')
for shielded_spend in tx_shielded_spends:
if not isinstance(shielded_spend, SaplingShieldedSpend):
raise ValueError(
'Invalid shielded spend. '
'Expected instance of SaplingShieldedSpend. Got {}'
.format(type(shielded_spend).__name__))
for shielded_output in tx_shielded_outputs:
if not isinstance(shielded_output, SaplingShieldedOutput):
raise ValueError(
'Invalid shielded output. '
'Expected instance of SaplingShieldedOutput. Got {}'
.format(type(shielded_output).__name__))
for tx_joinsplit in tx_joinsplits:
if not isinstance(tx_joinsplit, SaplingJoinsplit):
raise ValueError(
'Invalid Joinsplit. '
'Expected instance of SaplingJoinsplit. Got {}'
.format(type(tx_joinsplit).__name__))
if len(tx_joinsplits) != 0:
self.validate_bytes(joinsplit_pubkey, 32)
self.validate_bytes(joinsplit_sig, 64)
if len(tx_joinsplits) + len(tx_ins) + len(tx_shielded_spends) == 0:
raise ValueError('Transaction must have some input value.')
self += b'\x04\x00\x00\x80' # Sapling is always v4 with overwintered
self += b'\x85\x20\x2f\x89' # Sapling version group id
self += shared.VarInt(len(tx_ins))
for tx_in in tx_ins:
self += tx_in
self += shared.VarInt(len(tx_outs))
for tx_out in tx_outs:
self += tx_out
self += lock_time
self += expiry_height
self += value_balance
self += shared.VarInt(len(tx_shielded_spends))
if len(tx_shielded_spends) != 0:
for shielded_spend in tx_shielded_spends:
self += shielded_spend
self += shared.VarInt(len(tx_shielded_outputs))
if len(tx_shielded_outputs) != 0:
for shielded_output in tx_shielded_outputs:
self += shielded_output
self += shared.VarInt(len(tx_joinsplits))
if len(tx_joinsplits) != 0:
for tx_joinsplit in tx_joinsplits:
self += tx_joinsplit
self += cast(bytes, joinsplit_pubkey)
self += cast(bytes, joinsplit_sig)
if len(tx_shielded_outputs) + len(tx_shielded_spends) != 0:
self += cast(bytes, binding_sig)
self.binding_sig = binding_sig
else:
self.binding_sig = None
self.header = b'\x04\x00\x00\x80' # Sapling is always v4
self.group_id = b'\x85\x20\x2f\x89' # Sapling version group id
self.tx_ins = tuple(tx_in for tx_in in tx_ins)
self.tx_outs = tuple(tx_out for tx_out in tx_outs)
self.lock_time = lock_time
self.expiry_height = expiry_height
self.value_balance = value_balance
if len(tx_shielded_spends) != 0:
self.tx_shielded_spends = tuple(ss for ss in tx_shielded_spends)
else:
self.tx_shielded_spends = tuple()
if len(tx_shielded_outputs) != 0:
self.tx_shielded_outputs = tuple(so for so in tx_shielded_outputs)
else:
self.tx_shielded_outputs = tuple()
if len(tx_joinsplits) != 0:
self.tx_joinsplits = tuple(js for js in tx_joinsplits)
self.joinsplit_pubkey = joinsplit_pubkey
self.joinsplit_sig = joinsplit_sig
# Zcash spec 5.4.1.4 Hsig hash function
self.hsigs = (tuple(self._hsig(i)
for i in range(len(self.tx_joinsplits))))
self.primary_inputs = (tuple(self._primary_input(i)
for i in range(len(self.tx_joinsplits))))
else:
self.tx_joinsplits = tuple()
self.joinsplit_pubkey = None
self.joinsplit_sig = None
self.hsigs = tuple()
self.primary_inputs = tuple()
self.tx_id_le = utils.hash256(self.to_bytes())
self.tx_id = self.tx_id_le[::-1]
self._make_immutable()
if len(self) > 100000:
raise ValueError( # pragma: no cover
'Tx is too large. '
'Expect less than 100kB. Got: {} bytes'.format(len(self)))
def calculate_fee(self, input_values: Sequence[int]) -> int:
'''
SaplingTx, list(int) -> int
'''
total_in = sum(input_values)
total_out = sum([utils.le2i(tx_out.value) for tx_out in self.tx_outs])
shileded_net = utils.le2i(self.value_balance, signed=True)
for js in self.tx_joinsplits:
total_in += utils.le2i(js.vpub_new)
total_out += utils.le2i(js.vpub_old)
return total_in - total_out + shileded_net
def copy(self,
tx_ins: Optional[Sequence[TxIn]] = None,
tx_outs: Optional[Sequence[TxOut]] = None,
lock_time: Optional[bytes] = None,
expiry_height: Optional[bytes] = None,
value_balance: Optional[bytes] = None,
tx_shielded_spends:
Optional[Sequence[SaplingShieldedSpend]] = None,
tx_shielded_outputs:
Optional[Sequence[SaplingShieldedOutput]] = None,
tx_joinsplits: Optional[Sequence[SaplingJoinsplit]] = None,
joinsplit_pubkey: Optional[bytes] = None,
joinsplit_sig: Optional[bytes] = None,
binding_sig: Optional[bytes] = None):
'''
SaplingTx, ... -> SaplingTx
Makes a copy. Allows over-writing specific pieces.
'''
return SaplingTx(
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
lock_time=(lock_time if lock_time is not None
else self.lock_time),
expiry_height=(expiry_height if expiry_height is not None
else self.expiry_height),
value_balance=(value_balance if value_balance is not None
else self.value_balance),
tx_shielded_spends=(
tx_shielded_spends if tx_shielded_spends is not None
else self.tx_shielded_spends),
tx_shielded_outputs=(
tx_shielded_outputs if tx_shielded_outputs is not None
else self.tx_shielded_outputs),
tx_joinsplits=(tx_joinsplits if tx_joinsplits is not None
else self.tx_joinsplits),
joinsplit_pubkey=(joinsplit_pubkey if joinsplit_pubkey is not None
else self.joinsplit_pubkey),
joinsplit_sig=(joinsplit_sig if joinsplit_sig is not None
else self.joinsplit_sig),
binding_sig=(binding_sig if binding_sig is not None
else self.binding_sig))
def _hsig(self, index: int) -> bytes:
return utils.blake2b(
data=self._hsig_input(index),
digest_size=32,
person=b'ZcashComputehSig')
def _hsig_input(self, index: int) -> bytes:
'''
inputs for the hsig hash
'''
hsig_input = z.ZcashByteData()
hsig_input += self.tx_joinsplits[index].random_seed
hsig_input += self.tx_joinsplits[index].nullifiers
hsig_input += cast(bytes, self.joinsplit_pubkey)
return hsig_input.to_bytes()
def _primary_input(self, index: int) -> bytes:
'''
Primary input for the zkproof
'''
primary_input = z.ZcashByteData()
primary_input += self.tx_joinsplits[index].anchor
primary_input += self.tx_joinsplits[index].nullifiers
primary_input += self.tx_joinsplits[index].commitments
primary_input += self.tx_joinsplits[index].vpub_old
primary_input += self.tx_joinsplits[index].vpub_new
primary_input += self.hsigs[index]
primary_input += self.tx_joinsplits[index].vmacs
return primary_input.to_bytes()
@classmethod
def from_bytes(SaplingTx, byte_string: bytes) -> 'SaplingTx':
'''
byte-like -> SaplingTx
'''
header = byte_string[0:4]
group_id = byte_string[4:8]
if header != b'\x04\x00\x00\x80' or group_id != b'\x85\x20\x2f\x89':
raise ValueError(
'Bad header or group ID. Expected {} and {}. Got: {} and {}'
.format(b'\x04\x00\x00\x80'.hex(),
b'\x85\x20\x2f\x89'.hex(),
header.hex(),
group_id.hex()))
tx_ins = []
tx_ins_num = shared.VarInt.from_bytes(byte_string[8:])
current = 8 + len(tx_ins_num)
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
tx_outs = []
tx_outs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
lock_time = byte_string[current:current + 4]
current += 4
expiry_height = byte_string[current:current + 4]
current += 4
value_balance = byte_string[current:current + 8]
current += 8
tx_shielded_spends = []
shielded_spends_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(shielded_spends_num)
for _ in range(shielded_spends_num.number):
ss = SaplingShieldedSpend.from_bytes(byte_string[current:])
current += len(ss)
tx_shielded_spends.append(ss)
tx_shielded_outputs = []
shielded_outputs_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(shielded_outputs_num)
for _ in range(shielded_outputs_num.number):
so = SaplingShieldedOutput.from_bytes(byte_string[current:])
current += len(so)
tx_shielded_outputs.append(so)
tx_joinsplits = []
tx_joinsplits_num = shared.VarInt.from_bytes(byte_string[current:])
current += len(tx_outs_num)
for _ in range(tx_joinsplits_num.number):
tx_joinsplit = SaplingJoinsplit.from_bytes(
byte_string[current:])
current += len(tx_joinsplit)
tx_joinsplits.append(tx_joinsplit)
joinsplit_pubkey: Optional[bytes]
joinsplit_sig: Optional[bytes]
if len(tx_joinsplits) > 0:
joinsplit_pubkey = byte_string[current:current + 32]
current += 32
joinsplit_sig = byte_string[current:current + 64]
current += 64
else:
joinsplit_pubkey = None
joinsplit_sig = None
binding_sig: Optional[bytes]
if len(tx_shielded_spends) + len(tx_shielded_outputs) > 0:
binding_sig = byte_string[current:current + 64]
current += 64
else:
binding_sig = None
return SaplingTx(
tx_ins=tx_ins,
tx_outs=tx_outs,
lock_time=lock_time,
expiry_height=expiry_height,
value_balance=value_balance,
tx_shielded_spends=tx_shielded_spends,
tx_shielded_outputs=tx_shielded_outputs,
tx_joinsplits=tx_joinsplits,
joinsplit_pubkey=joinsplit_pubkey,
joinsplit_sig=joinsplit_sig,
binding_sig=binding_sig)
def is_witness(self) -> bool:
return False
def sighash_all(self, anyone_can_pay: bool = False, **kwargs) -> bytes:
return self.sighash(sighash_type=shared.SIGHASH_ALL, **kwargs)
def sighash_single(self, anyone_can_pay: bool = False, **kwargs) -> bytes:
return self.sighash(sighash_type=shared.SIGHASH_SINGLE, **kwargs)
def sighash(self,
sighash_type: int,
prevout_value: bytes,
index: int = 0,
joinsplit: bool = False,
script_code: Optional[bytes] = None,
anyone_can_pay: bool = False) -> bytes:
'''
ZIP243
https://github.com/zcash/zips/blob/master/zip-0243.rst
'''
if joinsplit and anyone_can_pay:
raise ValueError('ANYONECANPAY can\'t be used with joinsplits')
data = z.ZcashByteData()
data += self.header
data += self.group_id
data += self._hash_prevouts(anyone_can_pay)
data += self._hash_sequence(sighash_type, anyone_can_pay)
data += self._hash_outputs(sighash_type, index)
data += self._hash_joinsplits()
data += self._hash_shielded_spends()
data += self._hash_shielded_outputs()
data += self.lock_time
data += self.expiry_height
data += self.value_balance
if anyone_can_pay:
sighash_type = sighash_type | shared.SIGHASH_ANYONECANPAY
data += utils.i2le_padded(sighash_type, 4)
if not joinsplit:
data += self.tx_ins[index].outpoint
data += cast(bytes, script_code)
data += prevout_value
data += self.tx_ins[index].sequence
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashSigHash' + bytes.fromhex('bb09b876')) # Branch ID
def _hash_prevouts(self, anyone_can_pay: bool) -> bytes:
if anyone_can_pay:
return b'\x00' * 32
data = z.ZcashByteData()
for tx_in in self.tx_ins:
data += tx_in.outpoint
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashPrevoutHash')
def _hash_sequence(self, sighash_type: int, anyone_can_pay: bool) -> bytes:
if anyone_can_pay or sighash_type == shared.SIGHASH_SINGLE:
return b'\x00' * 32
data = z.ZcashByteData()
for tx_in in self.tx_ins:
data += tx_in.sequence
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashSequencHash')
def _hash_outputs(self, sighash_type: int, index: int) -> bytes:
if sighash_type not in [shared.SIGHASH_ALL, shared.SIGHASH_SINGLE]:
return b'\x00' * 32
data = z.ZcashByteData()
if sighash_type == shared.SIGHASH_ALL:
for tx_out in self.tx_outs:
data += tx_out
if sighash_type == shared.SIGHASH_SINGLE:
if index > len(self.tx_outs):
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.')
data += self.tx_outs[index]
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashOutputsHash')
def _hash_joinsplits(self) -> bytes:
if len(self.tx_joinsplits) == 0:
return b'\x00' * 32
data = z.ZcashByteData()
for joinsplit in self.tx_joinsplits:
data += joinsplit
data += cast(bytes, self.joinsplit_pubkey)
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashJSplitsHash')
def _hash_shielded_spends(self) -> bytes:
if len(self.tx_shielded_spends) == 0:
return b'\x00' * 32
data = z.ZcashByteData()
for ss in self.tx_shielded_spends:
data += ss[:320] # Strip off spend_auth_sig
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashSSpendsHash')
def _hash_shielded_outputs(self) -> bytes:
if len(self.tx_shielded_outputs) == 0:
return b'\x00' * 32
data = z.ZcashByteData()
for so in self.tx_shielded_outputs:
data += so
return utils.blake2b(
data=data.to_bytes(),
digest_size=32,
person=b'ZcashSOutputHash') | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/tx/sapling.py | 0.861902 | 0.27113 | sapling.py | pypi |
import riemann
from riemann import utils
from riemann.script.opcodes import CODE_TO_INT, INT_TO_CODE
def serialize(script_string: str) -> bytes:
'''
Serialize a human-readable script to bytes
Example:
serialize('OP_DUP OP_CAT OP_HASH160 0011deadbeef')
Args:
script_string: A human-readable Bitcoin Script string
Returns:
The Script serialized as a bytestring
'''
string_tokens = script_string.split()
serialized_script = bytearray()
for token in string_tokens:
if token == 'OP_CODESEPARATOR' or token == 'OP_PUSHDATA4':
raise NotImplementedError('{} is a bad idea.'.format(token))
if token in riemann.network.CODE_TO_INT_OVERWRITE:
serialized_script.extend(
[riemann.network.CODE_TO_INT_OVERWRITE[token]])
elif token in CODE_TO_INT:
serialized_script.extend([CODE_TO_INT[token]])
else:
token_bytes = bytes.fromhex(token)
if len(token_bytes) <= 75:
op = 'OP_PUSH_{}'.format(len(token_bytes))
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(token_bytes)
elif len(token_bytes) > 75 and len(token_bytes) <= 255:
op = 'OP_PUSHDATA1'
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(utils.i2le(len(token_bytes)))
serialized_script.extend(token_bytes)
elif len(token_bytes) > 255 and len(token_bytes) <= 1000:
op = 'OP_PUSHDATA2'
serialized_script.extend([CODE_TO_INT[op]])
serialized_script.extend(
utils.i2le_padded(len(token_bytes), 2))
serialized_script.extend(token_bytes)
else:
raise NotImplementedError(
'Hex string too long to serialize.')
return serialized_script
def hex_serialize(script_string: str) -> str:
'''
Serialize a human-readable script to hex
Example:
hex_serialize('OP_DUP OP_CAT OP_HASH160 0011deadbeef')
Args:
script_string: A human-readable Bitcoin Script string
Returns:
The Script serialized as a hex string
'''
return serialize(script_string).hex()
def deserialize(serialized_script: bytes) -> str:
'''
Deserialize a human-readable script from bytes
Example:
deserialize(b'\x19\x76\xa9\x88\xac')
Args:
serialized_script: The Script serialized as a bytestring
Returns:
A human-readable Script string
'''
deserialized = []
i = 0
while i < len(serialized_script):
current_byte = serialized_script[i]
if current_byte == 0xab:
raise NotImplementedError('OP_CODESEPARATOR is a bad idea.')
if current_byte <= 75 and current_byte != 0:
deserialized.append(
serialized_script[i + 1: i + 1 + current_byte].hex())
i += 1 + current_byte
if i > len(serialized_script):
raise IndexError(
'Push {} caused out of bounds exception.'
.format(current_byte))
elif current_byte == 76:
# next hex blob length
blob_len = serialized_script[i + 1]
deserialized.append(
serialized_script[i + 2: i + 2 + blob_len].hex())
i += 2 + blob_len
elif current_byte == 77:
# next hex blob length
blob_len = utils.le2i(serialized_script[i + 1: i + 3])
deserialized.append(
serialized_script[i + 3: i + 3 + blob_len].hex())
i += 3 + blob_len
elif current_byte == 78:
raise NotImplementedError('OP_PUSHDATA4 is a bad idea.')
else:
if current_byte in riemann.network.INT_TO_CODE_OVERWRITE:
deserialized.append(
riemann.network.INT_TO_CODE_OVERWRITE[current_byte])
elif current_byte in INT_TO_CODE:
deserialized.append(INT_TO_CODE[current_byte])
else:
raise ValueError(
'Unsupported opcode. '
'Got 0x%x' % serialized_script[i])
i += 1
return ' '.join(deserialized)
def hex_deserialize(script_hex: str) -> str:
'''
Deserialize a human-readable script from hex
Example:
hex_deserialize('1976a988ac')
Args:
serialized_script: The Script serialized as a hex string
Returns:
A human-readable Script string
'''
return deserialize(bytes.fromhex(script_hex)) | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/script/serialization.py | 0.710829 | 0.266572 | serialization.py | pypi |
from typing import Dict, List, Tuple
OPCODE_LIST: List[Tuple[str, int]] = [
("OP_0", 0),
("OP_PUSHDATA1", 76),
("OP_PUSHDATA2", 77),
("OP_PUSHDATA4", 78),
("OP_1NEGATE", 79),
("OP_RESERVED", 80),
("OP_1", 81),
("OP_2", 82),
("OP_3", 83),
("OP_4", 84),
("OP_5", 85),
("OP_6", 86),
("OP_7", 87),
("OP_8", 88),
("OP_9", 89),
("OP_10", 90),
("OP_11", 91),
("OP_12", 92),
("OP_13", 93),
("OP_14", 94),
("OP_15", 95),
("OP_16", 96),
("OP_NOP", 97),
("OP_VER", 98),
("OP_IF", 99),
("OP_NOTIF", 100),
("OP_VERIF", 101),
("OP_VERNOTIF", 102),
("OP_ELSE", 103),
("OP_ENDIF", 104),
("OP_VERIFY", 105),
("OP_RETURN", 106),
("OP_TOALTSTACK", 107),
("OP_FROMALTSTACK", 108),
("OP_2DROP", 109),
("OP_2DUP", 110),
("OP_3DUP", 111),
("OP_2OVER", 112),
("OP_2ROT", 113),
("OP_2SWAP", 114),
("OP_IFDUP", 115),
("OP_DEPTH", 116),
("OP_DROP", 117),
("OP_DUP", 118),
("OP_NIP", 119),
("OP_OVER", 120),
("OP_PICK", 121),
("OP_ROLL", 122),
("OP_ROT", 123),
("OP_SWAP", 124),
("OP_TUCK", 125),
("OP_CAT", 126),
("OP_SUBSTR", 127),
("OP_LEFT", 128),
("OP_RIGHT", 129),
("OP_SIZE", 130),
("OP_INVERT", 131),
("OP_AND", 132),
("OP_OR", 133),
("OP_XOR", 134),
("OP_EQUAL", 135),
("OP_EQUALVERIFY", 136),
("OP_RESERVED1", 137),
("OP_RESERVED2", 138),
("OP_1ADD", 139),
("OP_1SUB", 140),
("OP_2MUL", 141),
("OP_2DIV", 142),
("OP_NEGATE", 143),
("OP_ABS", 144),
("OP_NOT", 145),
("OP_0NOTEQUAL", 146),
("OP_ADD", 147),
("OP_SUB", 148),
("OP_MUL", 149),
("OP_DIV", 150),
("OP_MOD", 151),
("OP_LSHIFT", 152),
("OP_RSHIFT", 153),
("OP_BOOLAND", 154),
("OP_BOOLOR", 155),
("OP_NUMEQUAL", 156),
("OP_NUMEQUALVERIFY", 157),
("OP_NUMNOTEQUAL", 158),
("OP_LESSTHAN", 159),
("OP_GREATERTHAN", 160),
("OP_LESSTHANOREQUAL", 161),
("OP_GREATERTHANOREQUAL", 162),
("OP_MIN", 163),
("OP_MAX", 164),
("OP_WITHIN", 165),
("OP_RIPEMD160", 166),
("OP_SHA1", 167),
("OP_SHA256", 168),
("OP_HASH160", 169),
("OP_HASH256", 170),
("OP_CODESEPARATOR", 171),
("OP_CHECKSIG", 172),
("OP_CHECKSIGVERIFY", 173),
("OP_CHECKMULTISIG", 174),
("OP_CHECKMULTISIGVERIFY", 175),
("OP_NOP1", 176),
("OP_NOP2", 177),
("OP_CHECKLOCKTIMEVERIFY", 177),
("OP_NOP3", 178),
("OP_CHECKSEQUENCEVERIFY", 178),
("OP_NOP4", 179),
("OP_NOP5", 180),
("OP_NOP6", 181),
("OP_NOP7", 182),
("OP_NOP8", 183),
("OP_NOP9", 184),
("OP_NOP10", 185),
("OP_INVALIDOPCODE", 255),
]
for i in range(1, 76):
OPCODE_LIST.append(("OP_PUSH_%d" % i, i))
CODE_TO_INT: Dict[str, int] = dict(o for o in OPCODE_LIST)
INT_TO_CODE: Dict[int, str] = dict((o[1], o[0]) for o in OPCODE_LIST) | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/script/opcodes.py | 0.477067 | 0.238362 | opcodes.py | pypi |
from riemann.encoding import base58, bech32, cashaddr
from typing import Dict, List, Optional, Tuple
class Network:
'''
Basic Network class.
holding space for the various prefixes.
Not all features are used by all coins.
'''
SYMBOL: Optional[str] = None
NETWORK_NAME: Optional[str] = None
SUBNET_NAME: Optional[str] = None
P2PKH_PREFIX: bytes = b'\x00'
P2SH_PREFIX: bytes = b'\x05'
SEGWIT: bool = False
P2WSH_PREFIX: bytes = b'\x00\x20'
P2WPKH_PREFIX: bytes = b'\x00\x14'
BECH32_HRP: Optional[str] = None
SEGWIT_ENCODER = bech32
LEGACY_ENCODER = base58
CASHADDR_ENCODER = cashaddr
SEGWIT_TX_FLAG = b'\x00\x01'
FORKID: Optional[int] = None
OPCODE_CHANGES: List[Tuple[str, int]] = []
CASHADDR_PREFIX: Optional[str] = None
CASHADDR_P2SH: Optional[bytes] = None
CASHADDR_P2PKH: Optional[bytes] = None
CODE_TO_INT_OVERWRITE: Dict[str, int] = {}
INT_TO_CODE_OVERWRITE: Dict[int, str] = {}
class BitcoinMain(Network):
SYMBOL = 'BTC'
NETWORK_NAME = 'bitcoin'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x00'
P2SH_PREFIX = b'\x05'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'bc'
class BitcoinTest(Network):
SYMBOL = 'tBTC'
NETWORK_NAME = 'bitcoin'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
BECH32_HRP = 'tb'
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
class BitcoinRegtest(Network):
SYMBOL = 'rBTC'
NETWORK_NAME = 'bitcoin'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'bcrt'
class LitecoinMain(Network):
SYMBOL = 'LTC'
NETWORK_NAME = 'litecoin'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x30'
P2SH_PREFIX = b'\x32'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'ltc'
class LitecoinTest(Network):
SYMBOL = 'tLTC'
NETWORK_NAME = 'litecoin'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\x3a'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'tltc'
class LitecoinRegtest(Network):
SYMBOL = 'rLTC'
NETWORK_NAME = 'litecoin'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\x3a'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'tltc' # no specific reg bech32 hrp specifed
class BitcoinCashMain(Network):
SYMBOL = 'BCH'
NETWORK_NAME = 'bitcoin_cash'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x00'
P2SH_PREFIX = b'\x05'
SEGWIT = False
FORKID = 0
CASHADDR_PREFIX = 'bitcoincash'
CASHADDR_P2SH = b'\x08'
CASHADDR_P2PKH = b'\x00'
class BitcoinCashTest(Network):
SYMBOL = 'tBCH'
NETWORK_NAME = 'bitcoin_cash'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = False
FORKID = 0
CASHADDR_PREFIX = 'bchtest'
CASHADDR_P2SH = b'\x08'
CASHADDR_P2PKH = b'\x00'
class BitcoinCashRegtest(Network):
SYMBOL = 'rBCH'
NETWORK_NAME = 'bitcoin_cash'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = False
FORKID = 0
CASHADDR_PREFIX = 'bchtest'
CASHADDR_P2SH = b'\x08'
CASHADDR_P2PKH = b'\x00'
class BitcoinGoldMain(Network):
SYMBOL = 'BTG'
NETWORK_NAME = 'bitcoin_gold'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x26'
P2SH_PREFIX = b'\x17'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'btg'
FORKID = 79
class BitcoinGoldTest(Network):
SYMBOL = 'tBTG'
NETWORK_NAME = 'bitcoin_gold'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'tbtg'
FORKID = 79
class BitcoinGoldRegtest(Network):
SYMBOL = 'rBTG'
NETWORK_NAME = 'bitcoin_gold'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'tbtg' # no specific reg bech32 hrp specifed
FORKID = 79
class DogecoinMain(Network):
SYMBOL = 'DOGE'
NETWORK_NAME = 'dogecoin'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x1e'
P2SH_PREFIX = b'\x16'
SEGWIT = False # as of 4/2018, at least; dogewit is a-comin', they say
class DogecoinTest(Network):
SYMBOL = 'tDOGE'
NETWORK_NAME = 'dogecoin'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x71'
P2SH_PREFIX = b'\xc4'
SEGWIT = False
class DogecoinRegtest(Network):
''' I can detect no sign of a Doge reg network;
for most coins, the reg values are the same as test'''
SYMBOL = 'rDOGE'
NETWORK_NAME = 'dogecoin'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x71'
P2SH_PREFIX = b'\xc4'
SEGWIT = False
class DashMain(Network):
SYMBOL = 'DASH'
NETWORK_NAME = 'dash'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x4c'
P2SH_PREFIX = b'\x10'
SEGWIT = False
class DashTest(Network):
SYMBOL = 'tDASH'
NETWORK_NAME = 'dash'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x8c'
P2SH_PREFIX = b'\x13'
SEGWIT = False
class DashRegtest(Network):
SYMBOL = 'rDASH'
NETWORK_NAME = 'dash'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x8c'
P2SH_PREFIX = b'\x13'
SEGWIT = False
class ZcashSproutMain(Network):
SYMBOL = 'ZEC'
NETWORK_NAME = 'zcash_sprout'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x1c\xb8'
P2SH_PREFIX = b'\x1c\xbd'
SEGWIT = False
class ZcashSproutTest(Network):
SYMBOL = 'tZEC'
NETWORK_NAME = 'zcash_sprout'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x1d\x25'
P2SH_PREFIX = b'\x1c\xba'
SEGWIT = False
class ZcashSproutRegtest(Network):
SYMBOL = 'rZEC'
NETWORK_NAME = 'zcash_sprout'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x1d\x25'
P2SH_PREFIX = b'\x1c\xba'
SEGWIT = False
class ZcashOverwinterMain(Network):
SYMBOL = 'ZEC'
NETWORK_NAME = 'zcash_overwinter'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x1c\xb8'
P2SH_PREFIX = b'\x1c\xbd'
SEGWIT = False
class ZcashOverwinterTest(Network):
SYMBOL = 'tZEC'
NETWORK_NAME = 'zcash_overwinter'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x1d\x25'
P2SH_PREFIX = b'\x1c\xba'
SEGWIT = False
class ZcashOverwinterRegtest(Network):
SYMBOL = 'rZEC'
NETWORK_NAME = 'zcash_overwinter'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x1d\x25'
P2SH_PREFIX = b'\x1c\xba'
SEGWIT = False
class ZcashSaplingMain(Network):
SYMBOL = 'ZEC'
NETWORK_NAME = 'zcash_sapling'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x1c\xb8'
P2SH_PREFIX = b'\x1c\xbd'
SEGWIT = False
class ZcashSaplingTest(Network):
SYMBOL = 'tZEC'
NETWORK_NAME = 'zcash_sapling'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x1d\x25'
P2SH_PREFIX = b'\x1c\xba'
SEGWIT = False
class ZcashSaplingRegtest(Network):
SYMBOL = 'rZEC'
NETWORK_NAME = 'zcash_sapling'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x1d\x25'
P2SH_PREFIX = b'\x1c\xba'
SEGWIT = False
class DecredMain(Network):
SYMBOL = 'DCR'
NETWORK_NAME = 'decred'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x07\x3f'
P2PK_PREFIX = b'\x13\x86'
P2SH_PREFIX = b'\x07\x1a'
SEGWIT = False
OPCODE_CHANGES = [
('OP_BLAKE256', 168),
('OP_SHA256', 192)
]
CODE_TO_INT_OVERWRITE = dict(o for o in OPCODE_CHANGES)
INT_TO_CODE_OVERWRITE = dict((o[1], o[0]) for o in OPCODE_CHANGES)
class DecredTest(Network):
SYMBOL = 'DCRT'
NETWORK_NAME = 'decred'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x0f\x21'
P2PK_PREFIX = b'\x28\xf7'
P2SH_PREFIX = b'\x0e\xfc'
SEGWIT = False
OPCODE_CHANGES = [
('OP_BLAKE256', 168),
('OP_SHA256', 192)
]
CODE_TO_INT_OVERWRITE = dict(o for o in OPCODE_CHANGES)
INT_TO_CODE_OVERWRITE = dict((o[1], o[0]) for o in OPCODE_CHANGES)
class DecredSimnet(Network):
SYMBOL = 'DCRS'
NETWORK_NAME = 'decred'
SUBNET_NAME = 'simnet'
P2PKH_PREFIX = b'\x28\xf7'
P2SH_PREFIX = b'\x0e\xfc'
SEGWIT = False
OPCODE_CHANGES = [
('OP_BLAKE256', 168),
('OP_SHA256', 192)
]
CODE_TO_INT_OVERWRITE = dict(o for o in OPCODE_CHANGES)
INT_TO_CODE_OVERWRITE = dict((o[1], o[0]) for o in OPCODE_CHANGES)
class PivxMain(Network):
SYMBOL = 'PIVX'
NETWORK_NAME = 'pivx'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x1e'
P2SH_PREFIX = b'\x0d'
SEGWIT = False
class PivxTest(Network):
SYMBOL = 'tPIVX'
NETWORK_NAME = 'pivx'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x8b'
P2SH_PREFIX = b'\x13'
SEGWIT = False
class PivxRegtest(Network):
SYMBOL = 'rPIVX'
NETWORK_NAME = 'pivx'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x8b'
P2SH_PREFIX = b'\x13'
SEGWIT = False
class ViacoinMain(Network):
SYMBOL = 'VIA'
NETWORK_NAME = 'viacoin'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x47'
P2SH_PREFIX = b'\x21'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'via'
class ViacoinTest(Network):
SYMBOL = 'tVIA'
NETWORK_NAME = 'viacoin'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x7f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'tvia'
class ViacoinSimnet(Network):
SYMBOL = 'sVIA'
NETWORK_NAME = 'viacoin'
SUBNET_NAME = 'simnet'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'svia'
class FeathercoinMain(Network):
SYMBOL = 'FTC'
NETWORK_NAME = 'feathercoin'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x0e'
P2SH_PREFIX = b'\x60'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'fc'
class FeathercoinTest(Network):
SYMBOL = 'tFTC'
NETWORK_NAME = 'feathercoin'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'tf'
class FeathercoinRegtest(Network):
SYMBOL = 'rFTC'
NETWORK_NAME = 'feathercoin'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'fcrt'
class BitcoinDarkMain(Network):
SYMBOL = 'BTCD'
NETWORK_NAME = 'bitcoin_dark'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x3c'
P2SH_PREFIX = b'\x55'
SEGWIT = False
class BitcoinDarkTest(Network):
SYMBOL = 'tBTCD'
NETWORK_NAME = 'bitcoin_dark'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = False
class BitcoinDarkRegtest(Network):
# like DOGE, I can find no BTCD regnet. Also the code is really old.
SYMBOL = 'rBTCD'
NETWORK_NAME = 'bitcoin_dark'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = False
class AxeMain(Network):
SYMBOL = 'AXE'
NETWORK_NAME = 'axe'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x37'
P2SH_PREFIX = b'\x10'
SEGWIT = False
class AxeTest(Network):
SYMBOL = 'tAXE'
NETWORK_NAME = 'axe'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x8c'
P2SH_PREFIX = b'\x13'
SEGWIT = False
class AxeRegtest(Network):
SYMBOL = 'rAXE'
NETWORK_NAME = 'axe'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x8c'
P2SH_PREFIX = b'\x13'
SEGWIT = False
class BitcoreMain(Network):
SYMBOL = 'BTX'
NETWORK_NAME = 'bitcore'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x00'
P2SH_PREFIX = b'\x32'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
class BitcoreTest(Network):
SYMBOL = 'tBTX'
NETWORK_NAME = 'bitcore'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\x3a'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
class BitcoreRegtest(Network):
SYMBOL = 'rBTX'
NETWORK_NAME = 'bitcore'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\x3a'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
class DigibyteMain(Network):
SYMBOL = 'DGB'
NETWORK_NAME = 'digibyte'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x1e'
P2SH_PREFIX = b'\x3f'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'dgb'
class DigibyteTest(Network):
SYMBOL = 'tDGB'
NETWORK_NAME = 'digibyte'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x7e'
P2SH_PREFIX = b'\x8c'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'dgbt'
class DigibyteRegtest(Network):
SYMBOL = 'rDGB'
NETWORK_NAME = 'digibyte'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x7e'
P2SH_PREFIX = b'\x8c'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'dgbrt'
class GroestlcoinMain(Network):
SYMBOL = 'GRS'
NETWORK_NAME = 'groestlcoin'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x24'
P2SH_PREFIX = b'\x05'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'grs'
# BECH32 & HRPs are active on groestlcoin as of version 2.16.0 May 13, 2018
class GroestlcoinTest(Network):
SYMBOL = 'tGRS'
NETWORK_NAME = 'groestlcoin'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'tgrs'
class GroestlcoinRegtest(Network):
SYMBOL = 'rGRS'
NETWORK_NAME = 'groestlcoin'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'grsrt'
class MonacoinMain(Network):
SYMBOL = 'MONA'
NETWORK_NAME = 'monacoin'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x32'
P2SH_PREFIX = b'\x37'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
# BECH32_HRP = 'mona'
# bech32 isn't active yet but the team has chosen hrps.
class MonacoinTest(Network):
SYMBOL = 'tMONA'
NETWORK_NAME = 'monacoin'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
# BECH32_HRP = 'tmona'
class MonacoinRegtest(Network):
SYMBOL = 'rMONA'
NETWORK_NAME = 'monacoin'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
# BECH32_HRP = 'tmona'
class NavcoinMain(Network):
SYMBOL = 'NAV'
NETWORK_NAME = 'navcoin'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x35'
P2SH_PREFIX = b'\x55'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
# bech32 is not yet active on Navcoin
class NavcoinTest(Network):
SYMBOL = 'tNAV'
NETWORK_NAME = 'navcoin'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x36'
P2SH_PREFIX = b'\x56'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
class NavcoinRegtest(Network):
SYMBOL = 'rNAV'
NETWORK_NAME = 'navcoin'
SUBNET_NAME = 'reg'
# one of the only coins with different prefixes for reg and test
P2PKH_PREFIX = b'\x14'
P2SH_PREFIX = b'\x60'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
class SyscoinMain(Network):
SYMBOL = 'SYS'
NETWORK_NAME = 'syscoin'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x3f'
P2SH_PREFIX = b'\x05'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
class SyscoinTest(Network):
SYMBOL = 'tSYS'
NETWORK_NAME = 'syscoin'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x41'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
class SyscoinRegtest(Network):
SYMBOL = 'rSYS'
NETWORK_NAME = 'syscoin'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x41'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
SEGWIT_ENCODER = base58
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
class VertcoinMain(Network):
SYMBOL = 'VTC'
NETWORK_NAME = 'vertcoin'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x47'
P2SH_PREFIX = b'\x05'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'vtc'
class VertcoinTest(Network):
SYMBOL = 'tVTC'
NETWORK_NAME = 'vertcoin'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x4a'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'tvtc'
class VertcoinRegtest(Network):
SYMBOL = 'rVTC'
NETWORK_NAME = 'vertcoin'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x6f'
P2SH_PREFIX = b'\xc4'
SEGWIT = True
P2WSH_PREFIX = b'\x00\x20'
P2WPKH_PREFIX = b'\x00\x14'
BECH32_HRP = 'bcrt' # That's the same as Bitcoin's reg.
class BitcoinPrivateMain(Network):
# Bitcoin Private can pay out to segwit/bech32 wallets,
# but has no support beyond that. It is upcoming.
SYMBOL = 'BCTP'
NETWORK_NAME = 'bitcoin_private'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x13\x25'
P2SH_PREFIX = b'\x13\xaf'
SEGWIT = False
FORKID = 42
class BitcoinPrivateTest(Network):
SYMBOL = 'tBTCP'
NETWORK_NAME = 'bitcoin_private'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x19\x57'
P2SH_PREFIX = b'\x19\xe0'
SEGWIT = False
FORKID = 42
class BitcoinPrivateRegtest(Network):
SYMBOL = 'rBCTP'
NETWORK_NAME = 'bitcoin_private'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x19\x57'
P2SH_PREFIX = b'\x19\xe0'
SEGWIT = False
FORKID = 42
class VergeMain(Network):
SYMBOL = 'XVG'
NETWORK_NAME = 'verge'
SUBNET_NAME = 'main'
P2PKH_PREFIX = b'\x1e'
P2SH_PREFIX = b'\x21'
SEGWIT = False
class VergeTest(Network):
SYMBOL = 'tXVG'
NETWORK_NAME = 'verge'
SUBNET_NAME = 'test'
P2PKH_PREFIX = b'\x73'
P2SH_PREFIX = b'\xc6'
SEGWIT = False
class VergeRegtest(Network):
''' I can detect no sign of a Verge reg network;
for most coins, the reg values are the same as test'''
SYMBOL = 'rXVG'
NETWORK_NAME = 'verge'
SUBNET_NAME = 'reg'
P2PKH_PREFIX = b'\x73'
P2SH_PREFIX = b'\xc6'
SEGWIT = False
# Well kids, that's a bundle. | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/networks/networks.py | 0.684159 | 0.234089 | networks.py | pypi |
# To add a new coin
# 1. define a class in networks.py
# 2. add it to SUPPORTED
from .networks import *
SUPPORTED = {
'bitcoin_main': BitcoinMain,
'bitcoin_test': BitcoinTest,
'bitcoin_reg': BitcoinRegtest,
'litecoin_main': LitecoinMain,
'litecoin_test': LitecoinTest,
'litecoin_reg': LitecoinRegtest,
'bitcoin_cash_main': BitcoinCashMain,
'bitcoin_cash_test': BitcoinCashTest,
'bitcoin_cash_reg': BitcoinCashRegtest,
'bitcoin_gold_main': BitcoinGoldMain,
'bitcoin_gold_test': BitcoinGoldTest,
'bitcoin_gold_reg': BitcoinGoldRegtest,
'dogecoin_main': DogecoinMain,
'dogecoin_test': DogecoinTest,
'dogecoin_reg': DogecoinRegtest,
'dash_main': DashMain,
'dash_test': DashTest,
'dash_reg': DashRegtest,
'zcash_sprout_main': ZcashSproutMain,
'zcash_sprout_test': ZcashSproutTest,
'zcash_sprout_reg': ZcashSproutRegtest,
'zcash_overwinter_main': ZcashOverwinterMain,
'zcash_overwinter_test': ZcashOverwinterTest,
'zcash_overwinter_reg': ZcashOverwinterRegtest,
'zcash_sapling_main': ZcashSaplingMain,
'zcash_sapling_test': ZcashSaplingTest,
'zcash_sapling_reg': ZcashSaplingRegtest,
'decred_main': DecredMain,
'decred_test': DecredTest,
'decred_simnet': DecredSimnet,
'pivx_main': PivxMain,
'pivx_test': PivxTest,
'pivx_reg': PivxRegtest,
'viacoin_main': ViacoinMain,
'viacoin_test': ViacoinTest,
'viacoin_simnet': ViacoinSimnet,
'feathercoin_main': FeathercoinMain,
'feathercoin_test': FeathercoinTest,
'feathercoin_reg': FeathercoinRegtest,
'bitcoin_dark_main': BitcoinDarkMain,
'bitcoin_dark_test': BitcoinDarkTest,
'bitcoin_dark_reg': BitcoinDarkRegtest,
'axe_main': AxeMain,
'axe_test': AxeTest,
'axe_reg': AxeRegtest,
'bitcore_main': BitcoreMain,
'bitcore_test': BitcoreTest,
'bitcore_reg': BitcoreRegtest,
'digibyte_main': DigibyteMain,
'digibyte_test': DigibyteTest,
'digibyte_reg': DigibyteRegtest,
'groestlcoin_main': GroestlcoinMain,
'groestlcoin_test': GroestlcoinTest,
'groestlcoin_reg': GroestlcoinRegtest,
'monacoin_main': MonacoinMain,
'monacoin_test': MonacoinTest,
'monacoin_reg': MonacoinRegtest,
'navcoin_main': NavcoinMain,
'navcoin_test': NavcoinTest,
'navcoin_reg': NavcoinRegtest,
'syscoin_main': SyscoinMain,
'syscoin_test': SyscoinTest,
'syscoin_reg': SyscoinRegtest,
'vertcoin_main': VertcoinMain,
'vertcoin_test': VertcoinTest,
'vertcoin_reg': VertcoinRegtest,
'bitcoin_private_main': BitcoinPrivateMain,
'bitcoin_private_test': BitcoinPrivateTest,
'bitcoin_private_reg': BitcoinPrivateRegtest,
'verge_main': VergeMain,
'verge_test': VergeTest,
'verge_reg': VergeRegtest
}
def get_network(name):
'''
Check by name if network is supported. Then return the class.
'''
if name not in SUPPORTED:
raise ValueError('Unknown chain specifed: {}'.format(name))
return SUPPORTED[name] | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/networks/__init__.py | 0.512205 | 0.345423 | __init__.py | pypi |
from riemann import tx
from riemann import utils as rutils
from riemann.encoding import addresses as addr
from riemann.script import serialization as ser
from riemann.script import opcodes
MAX_STANDARD_TX_WEIGHT = 400000
MIN_STANDARD_TX_NONWITNESS_SIZE = 82
def check_is_standard_tx(t: tx.Tx) -> bool:
'''
Analog of Bitcoin's IsStandardTx
Args:
t (tx.Tx): the transaction
Returns:
(bool): True for standard, false for non-standard
'''
# 'version'
if t.version[0] not in [1, 2]:
return False
# 'tx-size'
if len(t.no_witness()) * 3 + len(t) > MAX_STANDARD_TX_WEIGHT:
return False
for tx_in in t.tx_ins:
try:
# 'scriptsig-size'
# 'scriptsig-not-pushonly'
if (len(tx_in.script_sig) > 1650
or not is_push_only(tx_in.script_sig)):
return False
except Exception:
return False
# 'scriptpubkey'
# 'dust'
# 'multi-op-return'
if not check_is_standard(t):
return False
return True
def is_push_only(script_sig: bytes) -> bool:
'''
Determines whether a script is push-only
Does this by parsing, and inspecting non-data elements
Args:
script_sig (bytes): the scriptSig
Returns:
(bool): True if Push Only, otherwise False
'''
script = ser.deserialize(script_sig)
non_data_opcodes = [t for t in script if t[0:3] == 'OP_']
for token in non_data_opcodes:
integer_opcode = opcodes.CODE_TO_INT[token]
if (integer_opcode in [79, 80]
or integer_opcode >= 97):
return False
return True
def check_is_standard(t: tx.Tx) -> bool:
'''
Analog of Bitcoin's IsStandard
Args:
t (tx.Tx): the transaction to check
Returns:
(bool): True for standard, false for non-standard
'''
for o in t.tx_outs:
# 'scriptpubkey'
if not is_standard_output_type(o):
return False
# 'dust'
if (rutils.le2i(o.value) < 550
and o.output_script[:2] != b'\x00\x14'):
return False
# 'multi-op-return'
if len([is_op_return(o) for o in t.tx_outs]) > 1:
return False
return True
def is_op_return(o: tx.TxOut) -> bool:
'''
Checks whether a txout is standard TX_NULL_DATA op_return output
Args:
o (tx.TxOut): the output
Returns:
(bool): True if standard opreturn, otherwise false
'''
script: str = ser.deserialize(o.output_script)
split_script = script.split()
# TX_NULL_DATA, up to 83 bytes (80 for safety)
if (rutils.le2i(o.value) == 0
and split_script[0] == 'OP_RETURN'
and len(script) < 80):
return True
return False
def is_standard_output_type(o: tx.TxOut) -> bool:
'''
Checks standardness of an output based on its value and output script
Args:
o (tx.TxOut): the output the check
Returns:
(bool): True if standard, False otherwise
'''
# TX_SCRIPTHASH
# TX_WITNESS_V0_KEYHASH
# TX_WITNESS_V0_SCRIPTHASH
# TX_PUBKEYHASH
try:
addr.from_output_script(o.output_script)
return True
except ValueError:
pass
script: str = ser.deserialize(o.output_script)
split_script = script.split()
# TX_PUBKEY
if (split_script[-1] == 'OP_CHECKSIG'
and len(split_script) == 2
and len(bytes.fromhex(split_script[1])) in [33, 65]):
return True
# TX_MULTISIG, up to x-of-3
if (split_script[-1] == 'OP_CHECKMULTISIG'
and split_script[-2] in ['OP_1', 'OP_2', 'OP_3']):
num_pubkeys = int(split_script[-2][-1])
num_sigs = int(split_script[0][-1])
if (num_sigs > num_pubkeys # 3-of-2, or 16-of-3, or something
or len(split_script) != num_pubkeys + 3): # some junk script
return False
for pubkey in split_script[1:-2]:
if len(bytes.fromhex(pubkey)) not in [33, 65]:
return False
return True
# TX_NONSTANDARD/TX_WITNESS_UNKNOWN
return False
def check_tx_size_small(t: tx.Tx) -> bool:
'''
Args:
t (tx.Tx): the transaction
Returns:
(bool): True for standard, False for non-standard
'''
return len(t.no_witness()) >= MIN_STANDARD_TX_NONWITNESS_SIZE
def check_final(
t: tx.Tx,
best_height: int = 0,
best_timestamp: int = 0) -> bool:
'''
Checks absolute locktime of a transaction.
Pass in the best height and timestamp
Args:
t (tx.Tx): the transaction
best_height (int): best known Bitcoin height
best_timestamp (int): best known Bitcoin timestamp
'''
lock = rutils.le2i(t.lock_time)
if lock >= 500_000_000: # timelocked
return lock <= best_timestamp
else: # height-locked
return lock <= best_height
def check_bip68_final():
...
def check_nonstandard_inputs():
...
def check_witness_nonstandard():
...
def check_too_many_sigops():
...
def check_non_mandatory_script():
... | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/networks/standard.py | 0.676406 | 0.264602 | standard.py | pypi |
import riemann
CHARSET = 'qpzry9x8gf2tvdw0s3jn54khce6mua7l'
def encode(data: bytes) -> str:
'''Convert bytes to cashaddr-bech32'''
if riemann.network.CASHADDR_PREFIX is None:
raise ValueError('Network {} does not support cashaddresses.'
.format(riemann.get_current_network_name()))
data = convertbits(data, 8, 5)
checksum = calculate_checksum(riemann.network.CASHADDR_PREFIX, data)
payload = b32encode(data + checksum)
form = '{prefix}:{payload}'
return form.format(
prefix=riemann.network.CASHADDR_PREFIX,
payload=payload)
def decode(data: str) -> bytes:
'''Convert cashaddr-bech32 to bytes'''
if riemann.network.CASHADDR_PREFIX is None:
raise ValueError('Network {} does not support cashaddresses.'
.format(riemann.get_current_network_name()))
if data.find(riemann.network.CASHADDR_PREFIX) != 0:
raise ValueError('Malformed cashaddr. Cannot locate prefix: {}'
.format(riemann.network.CASHADDR_PREFIX))
# the data is everything after the colon
prefix, data = data.split(':')
decoded = b32decode(data)
if not verify_checksum(prefix, decoded):
raise ValueError('Bad cash address checksum')
converted = convertbits(decoded, 5, 8)
return bytes(converted[:-6]) # remove the checksum from the end
def polymod(values):
chk = 1
generator = [
(0x01, 0x98f2bc8e61),
(0x02, 0x79b76d99e2),
(0x04, 0xf33e5fb3c4),
(0x08, 0xae2eabe2a8),
(0x10, 0x1e4f43e470)]
for value in values:
top = chk >> 35
chk = ((chk & 0x07ffffffff) << 5) ^ value
for i in generator:
if top & i[0] != 0:
chk ^= i[1]
return chk ^ 1
def prefix_expand(prefix):
return [ord(x) & 0x1f for x in prefix] + [0]
def calculate_checksum(prefix, payload):
poly = polymod(prefix_expand(prefix) + payload + [0, 0, 0, 0, 0, 0, 0, 0])
out = list()
for i in range(8):
out.append((poly >> 5 * (7 - i)) & 0x1f)
return out
def verify_checksum(prefix, payload):
return polymod(prefix_expand(prefix) + payload) == 0
def b32decode(inputs):
out = list()
for letter in inputs:
out.append(CHARSET.find(letter))
return out
def b32encode(inputs):
out = ''
for char_code in inputs:
out += CHARSET[char_code]
return out
def convertbits(data, frombits, tobits, pad=True):
acc = 0
bits = 0
ret = []
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
if value < 0 or (value >> frombits):
return None
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad:
if bits:
ret.append((acc << (tobits - bits)) & maxv)
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
return None
return ret | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/encoding/cashaddr.py | 0.512693 | 0.245893 | cashaddr.py | pypi |
import riemann
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
def encode(data: bytes) -> str:
'''Convert bytes to bech32'''
if riemann.network.BECH32_HRP is None:
raise ValueError(
'Network ({}) does not support bech32 encoding.'
.format(riemann.get_current_network_name()))
return segwit_encode(riemann.network.BECH32_HRP, data[0], data[2:])
def decode(bech: str) -> bytes:
'''Convert bech32 to bytes'''
if riemann.network.BECH32_HRP is None:
raise ValueError(
'Network ({}) does not support bech32 encoding.'
.format(riemann.get_current_network_name()))
(version_prefix, hash_int_array) = \
segwit_decode(riemann.network.BECH32_HRP, bech)
ret = bytearray()
ret.extend([version_prefix])
ret.extend([len(hash_int_array)])
ret.extend(hash_int_array)
return bytes(ret)
def segwit_decode(hrp, addr):
'''Decode a segwit address.'''
hrpgot, data = bech32_decode(addr)
if hrpgot != hrp:
return (None, None)
decoded = convertbits(data[1:], 5, 8, False)
if decoded is None or len(decoded) < 2 or len(decoded) > 40:
return (None, None)
if data[0] > 16:
return (None, None)
if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:
return (None, None)
return (data[0], decoded)
def segwit_encode(hrp, witver, witprog):
'''Encode a segwit address.'''
ret = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5))
if segwit_decode(hrp, ret) == (None, None):
return None
return ret
def bech32_encode(hrp, data):
'''Compute a Bech32 string given HRP and data values.'''
combined = data + bech32_create_checksum(hrp, data)
return hrp + '1' + ''.join([CHARSET[d] for d in combined])
def bech32_decode(bech):
'''Validate a Bech32 string, and determine HRP and data.'''
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech))
or (bech.lower() != bech and bech.upper() != bech)):
return (None, None)
bech = bech.lower()
pos = bech.rfind('1')
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
return (None, None)
if not all(x in CHARSET for x in bech[pos + 1:]):
return (None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos + 1:]]
if not bech32_verify_checksum(hrp, data):
return (None, None)
return (hrp, data[:-6])
def bech32_polymod(values):
'''Internal function that computes the Bech32 checksum.'''
generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
chk = 1
for value in values:
top = chk >> 25
chk = (chk & 0x1ffffff) << 5 ^ value
for i in range(5):
chk ^= generator[i] if ((top >> i) & 1) else 0
return chk
def bech32_hrp_expand(hrp):
'''Expand the HRP into values for checksum computation.'''
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
def bech32_verify_checksum(hrp, data):
'''Verify a checksum given HRP and converted data characters.'''
return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1
def bech32_create_checksum(hrp, data):
'''Compute the checksum values given HRP and data.'''
values = bech32_hrp_expand(hrp) + data
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
def convertbits(data, frombits, tobits, pad=True):
'''General power-of-2 base conversion.'''
acc = 0
bits = 0
ret = []
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
if value < 0 or (value >> frombits):
return None
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad:
if bits:
ret.append((acc << (tobits - bits)) & maxv)
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
return None
return ret | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/encoding/bech32.py | 0.461502 | 0.432183 | bech32.py | pypi |
from riemann import utils
from typing import Callable, Tuple
BASE58_ALPHABET = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
BASE58_BASE = len(BASE58_ALPHABET)
BASE58_LOOKUP = dict((c, i) for i, c in enumerate(BASE58_ALPHABET))
def encode(data: bytes, checksum: bool = True) -> str:
'''Convert binary to base58 using BASE58_ALPHABET.'''
if checksum:
data = data + utils.hash256(data)[:4]
v, prefix = to_long(256, lambda x: x, data)
data = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v])
return data.decode("utf8")
def decode(s: str, checksum: bool = True) -> bytes:
'''Convert base58 to binary using BASE58_ALPHABET.'''
v, prefix = to_long(
BASE58_BASE, lambda c: BASE58_LOOKUP[c], s.encode("utf8"))
data = from_long(v, prefix, 256, lambda x: x)
if checksum:
data, the_hash = data[:-4], data[-4:]
if utils.hash256(data)[:4] == the_hash:
return data
raise ValueError("hashed base58 has bad checksum %s" % s)
return data
def encode_with_checksum(data: bytes) -> str:
'''
A "hashed_base58" structure is a base58 integer (which looks like a string)
with four bytes of hash data at the end.
This function turns data into its hashed_base58 equivalent.
'''
return encode(data, checksum=True)
def decode_with_checksum(s: str) -> bytes:
'''
If the passed string is base58check, return the binary data.
Otherwise raises a ValueError.
'''
return decode(s, checksum=True)
def has_checksum(base58: str) -> bool:
'''Return True if and only if base58 is valid hashed_base58.'''
try:
decode_with_checksum(base58)
except ValueError:
return False
return True
def from_long(
v: int,
prefix: int,
base: int,
charset: Callable[..., int]) -> bytes:
'''
The inverse of to_long. Convert an integer to an arbitrary base.
Args:
v: the integer value to convert
prefix: the number of prefixed 0s to include
base: the new radix
charset: an array indicating printable characters to use for each value
'''
ba = bytearray()
while v > 0:
try:
v, mod = divmod(v, base)
ba.append(charset(mod))
except Exception:
raise ValueError(
"can't convert to character corresponding to %d" % mod)
ba.extend([charset(0)] * prefix)
ba.reverse()
return bytes(ba)
def to_long(
base: int,
lookup_f: Callable[..., int],
s: bytes) -> Tuple[int, int]:
'''
Convert an array to a (possibly bignum) integer, along with a prefix value
of how many prefixed zeros there are.
Args:
base: the source radix
lookup_f: a function to convert an element of s to a value between 0
and base-1.
s: the value to convert
'''
prefix = 0
v = 0
for c in s:
v *= base
try:
v += lookup_f(c)
except Exception:
raise ValueError(f"bad character {c!r} in string {s!r}")
if v == 0:
prefix += 1
return v, prefix | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/encoding/base58.py | 0.866613 | 0.393909 | base58.py | pypi |
import riemann
from riemann import utils
from riemann.script import serialization as script_ser
def _hash_to_sh_address(
script_hash: bytes,
witness: bool = False,
cashaddr: bool = True) -> str:
'''
Turns a script hash into a SH address. Prefers Cashaddrs to legacy
addresses whenever supported.
Args:
script_hash: The 20 or 32 byte hash of the script
witness: Pass True to generate a witness address if supported.
Default False.
cashaddr: Pass False to prefer legacy to cashaddr. Default True
Returns:
The encoded address
'''
addr_bytes = bytearray()
if riemann.network.CASHADDR_P2SH is not None and cashaddr:
addr_bytes.extend(riemann.network.CASHADDR_P2SH)
addr_bytes.extend(script_hash)
return riemann.network.CASHADDR_ENCODER.encode(addr_bytes)
if witness:
addr_bytes.extend(riemann.network.P2WSH_PREFIX)
addr_bytes.extend(script_hash)
return riemann.network.SEGWIT_ENCODER.encode(addr_bytes)
else:
addr_bytes.extend(riemann.network.P2SH_PREFIX)
addr_bytes.extend(script_hash)
return riemann.network.LEGACY_ENCODER.encode(addr_bytes)
def _ser_script_to_sh_address(
script_bytes: bytes,
witness: bool = False,
cashaddr: bool = True) -> str:
'''
Turns a serialized script into a SH address. Prefers Cashaddrs to legacy
addresses whenever supported.
Args:
script_bytes: The serialized script, as a bytestring
witness: Pass True to generate a witness address if supported.
Default False.
cashaddr: Pass False to prefer legacy to cashaddr. Default True
Returns:
The encoded address
'''
if witness:
script_hash = utils.sha256(script_bytes)
else:
script_hash = utils.hash160(script_bytes)
return _hash_to_sh_address(
script_hash=script_hash,
witness=witness,
cashaddr=cashaddr)
def make_sh_address(
script_string: str,
witness: bool = False,
cashaddr: bool = True) -> str:
'''
Turns a human-readable script into an address. Prefers Cashaddrs to legacy
addresses whenever supported.
Args:
script_string: The human-readable script
witness: Pass True to generate a witness address if supported.
Default False.
cashaddr: Pass False to prefer legacy to cashaddr. Default True
Returns:
The encoded address
'''
script_bytes = script_ser.serialize(script_string)
return _ser_script_to_sh_address(
script_bytes=script_bytes,
witness=witness,
cashaddr=cashaddr)
def make_p2wsh_address(script_string: str) -> str:
'''
Turns a human-readable script into a p2wsh address
Args:
script_string: The human-readable script
Returns:
The encoded address
'''
return make_sh_address(script_string=script_string,
witness=True)
def make_p2sh_address(script_string: str) -> str:
'''
Turns a human-readable script into a p2sh address, cashaddr if possible
Args:
script_string: The human-readable script
Returns:
The encoded address
'''
return make_sh_address(script_string=script_string,
witness=False)
def make_legacy_p2sh_address(script_string: str) -> str:
'''
Turns a human-readable script into a non-cashaddr p2sh address
Args:
script_string: The human-readable script
Returns:
The encoded address
'''
return make_sh_address(script_string=script_string,
witness=False,
cashaddr=False)
def _make_pkh_address(
pubkey_hash: bytes,
witness: bool = False,
cashaddr: bool = True) -> str:
'''
Turns a 20-byte public key has into an address
Args:
pubkey_hash: The 20 or 32 byte hash of the public key
witness: Pass True to generate a witness address if supported.
Default False.
cashaddr: Pass False to prefer legacy to cashaddr. Default True
Returns:
The encoded address
'''
addr_bytes = bytearray()
if riemann.network.CASHADDR_P2PKH is not None and cashaddr:
addr_bytes.extend(riemann.network.CASHADDR_P2PKH)
addr_bytes.extend(pubkey_hash)
return riemann.network.CASHADDR_ENCODER.encode(addr_bytes)
if witness:
addr_bytes.extend(riemann.network.P2WPKH_PREFIX)
addr_bytes.extend(pubkey_hash)
return riemann.network.SEGWIT_ENCODER.encode(addr_bytes)
else:
addr_bytes.extend(riemann.network.P2PKH_PREFIX)
addr_bytes.extend(pubkey_hash)
return riemann.network.LEGACY_ENCODER.encode(addr_bytes)
def make_pkh_address(
pubkey: bytes,
witness: bool = False,
cashaddr: bool = True) -> str:
'''
Turns a pubkey into an address. Prefers Cashaddrs to legacy addresses
whenever supported.
Args:
pubkey: The 33 or 65 byte public key
witness: Pass True to generate a witness address if supported.
Default False.
cashaddr: Pass False to prefer legacy to cashaddr. Default True
Returns:
The encoded address
'''
pubkey_hash = utils.hash160(pubkey)
return _make_pkh_address(pubkey_hash=pubkey_hash,
witness=witness,
cashaddr=cashaddr)
def make_p2wpkh_address(pubkey: bytes) -> str:
'''
Turns a pubkey into a p2wpkh address
Args:
pubkey: The 33 or 65 byte public key
Returns:
The encoded address
'''
return make_pkh_address(pubkey=pubkey, witness=True)
def make_p2pkh_address(pubkey: bytes) -> str:
'''
Turns a pubkey into a p2pkh address, cashaddr if available
Args:
pubkey: The 33 or 65 byte public key
Returns:
The encoded address
'''
return make_pkh_address(pubkey=pubkey, witness=False)
def make_legacy_p2pkh_address(pubkey: bytes) -> str:
'''
Turns a pubkey into a legacy p2pkh address
Args:
pubkey: The 33 or 65 byte public key
Returns:
The encoded address
'''
return make_pkh_address(pubkey=pubkey, witness=False, cashaddr=False)
def parse(address: str) -> bytes:
'''
Decode an address to the underlying raw bytes
Args:
address: The address to parse
Returns:
The raw bytestring encoded by the address
'''
try:
return bytearray(riemann.network.LEGACY_ENCODER.decode(address))
except ValueError:
pass
try:
return bytearray(riemann.network.SEGWIT_ENCODER.decode(address))
except Exception:
pass
try:
return bytearray(riemann.network.CASHADDR_ENCODER.decode(address))
except Exception:
pass
raise ValueError(
'Unsupported address format. Got: {}'.format(address))
def to_output_script(address: str) -> bytes:
'''
Convert an address into its associated output script.
Args:
address: The address to parse
Returns:
The output script that corresponds to the address, suitable for
inclusion in a TxOut
'''
parsed = parse(address)
parsed_hash = b''
try:
if (parsed.find(riemann.network.P2WPKH_PREFIX) == 0
and len(parsed) == 22):
return parsed
except TypeError:
pass
try:
if (parsed.find(riemann.network.P2WSH_PREFIX) == 0
and len(parsed) == 34):
return parsed
except TypeError:
pass
try:
if (parsed.find(riemann.network.CASHADDR_P2SH) == 0
and len(parsed) == len(riemann.network.CASHADDR_P2SH) + 20):
prefix = b'\xa9\x14' # OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2SH_PREFIX):]
suffix = b'\x87' # OP_EQUAL
except TypeError:
pass
try:
if (parsed.find(riemann.network.CASHADDR_P2PKH) == 0
and len(parsed) == len(riemann.network.CASHADDR_P2PKH) + 20):
prefix = b'\x76\xa9\x14' # OP_DUP OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2PKH_PREFIX):]
suffix = b'\x88\xac' # OP_EQUALVERIFY OP_CHECKSIG
except TypeError:
pass
if (parsed.find(riemann.network.P2PKH_PREFIX) == 0
and len(parsed) == len(riemann.network.P2PKH_PREFIX) + 20):
prefix = b'\x76\xa9\x14' # OP_DUP OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2PKH_PREFIX):]
suffix = b'\x88\xac' # OP_EQUALVERIFY OP_CHECKSIG
if (parsed.find(riemann.network.P2SH_PREFIX) == 0
and len(parsed) == len(riemann.network.P2SH_PREFIX) + 20):
prefix = b'\xa9\x14' # OP_HASH160 PUSH14
parsed_hash = parsed[len(riemann.network.P2SH_PREFIX):]
suffix = b'\x87' # OP_EQUAL
if parsed_hash == b'':
raise ValueError('Cannot parse output script from address.')
output_script = prefix + parsed_hash + suffix
return output_script
def from_output_script(output_script: bytes, cashaddr: bool = True) -> str:
'''
Convert an output script (the on-chain format) to an address
Args:
output_script: The output script to encode as an address
cashaddr: Pass False to prefer legacy to cashaddr. Default True.
'''
try:
if (len(output_script) == len(riemann.network.P2WSH_PREFIX) + 32
and output_script.find(riemann.network.P2WSH_PREFIX) == 0):
# Script hash is the last 32 bytes
return _hash_to_sh_address(
output_script[-32:], witness=True, cashaddr=cashaddr)
except TypeError:
pass
try:
if (len(output_script) == len(riemann.network.P2WPKH_PREFIX) + 20
and output_script.find(riemann.network.P2WPKH_PREFIX) == 0):
# PKH is the last 20 bytes
return _make_pkh_address(
output_script[-20:], witness=True, cashaddr=cashaddr)
except TypeError:
pass
if len(output_script) == 25 and output_script.find(b'\x76\xa9\x14') == 0:
return _make_pkh_address(
output_script[3:23], witness=False, cashaddr=cashaddr)
elif len(output_script) == 23 and output_script.find(b'\xa9\x14') == 0:
return _hash_to_sh_address(
output_script[2:22], witness=False, cashaddr=cashaddr)
raise ValueError('Cannot parse address from script.')
def parse_hash(address: str) -> bytes:
'''
Extract the pubkey or script hash encoded in an address
Args:
address: The address to parse
Returns:
The 20 or 32 byte hash represented by the address
'''
raw = parse(address)
# Cash addresses
try:
if address.find(riemann.network.CASHADDR_PREFIX) == 0:
if raw.find(riemann.network.CASHADDR_P2SH) == 0:
return raw[len(riemann.network.CASHADDR_P2SH):]
if raw.find(riemann.network.CASHADDR_P2PKH) == 0:
return raw[len(riemann.network.CASHADDR_P2PKH):]
except TypeError:
pass
# Segwit addresses
try:
if address.find(riemann.network.BECH32_HRP) == 0:
if raw.find(riemann.network.P2WSH_PREFIX) == 0:
return raw[len(riemann.network.P2WSH_PREFIX):]
if raw.find(riemann.network.P2WPKH_PREFIX) == 0:
return raw[len(riemann.network.P2WPKH_PREFIX):]
except TypeError:
pass
# Legacy Addresses
if raw.find(riemann.network.P2SH_PREFIX) == 0:
return raw[len(riemann.network.P2SH_PREFIX):]
if raw.find(riemann.network.P2PKH_PREFIX) == 0:
return raw[len(riemann.network.P2PKH_PREFIX):]
raise ValueError('Could not parse hash, unknown error') | /riemann-tx-2.1.0.tar.gz/riemann-tx-2.1.0/riemann/encoding/addresses.py | 0.876066 | 0.269746 | addresses.py | pypi |
import asyncio
from zeta import electrum, utils
from zeta.db import checkpoint, headers
from typing import cast, List, Optional, Union
from zeta.zeta_types import Header, ElectrumHeaderNotification, ElectrumHeader
async def sync(
outq: Optional['asyncio.Queue[Header]'] = None,
network: str = 'bitcoin_main') -> None: # pragma: nocover
'''
Starts all header tracking processes
1. subscribe to headers feed (track chain tip)
2. catch up to the servers' view of the chain tip
3. clean up any headers that didn't fit a chain when we found them
'''
utils.LOGGER.info('starting chain sync tasks')
best_known_block_height = _initial_setup(network)
utils.LOGGER.info('heaviest block at {}'.format(best_known_block_height))
# NB: assume there hasn't been a 10 block reorg
asyncio.ensure_future(_track_chain_tip(outq))
asyncio.ensure_future(_catch_up(best_known_block_height))
asyncio.ensure_future(_maintain_db())
def _initial_setup(network: str) -> int: # pragma: nocover
'''
Ensures the database directory exists, and tables exist
Then set the highest checkpoint, and return its height
'''
# Get the highest checkpoint
# NB: normally it is bad to follow height
# but this is an explicitly trusted source
latest_checkpoint = max(
checkpoint.CHECKPOINTS[network],
key=lambda k: k['height'])
headers.store_header(latest_checkpoint)
return cast(int, headers.find_heaviest()[0]['height'])
async def _track_chain_tip(
outq: Optional['asyncio.Queue[Header]'] = None) \
-> None: # pragma: nocover
'''
subscribes to headers, and starts the header queue handler
'''
q: asyncio.Queue[ElectrumHeaderNotification] = asyncio.Queue()
await electrum.subscribe_to_headers(q)
asyncio.ensure_future(_header_queue_handler(q, outq))
async def _header_queue_handler(
inq: 'asyncio.Queue[ElectrumHeaderNotification]',
outq: Optional['asyncio.Queue[Header]'] = None) \
-> None: # pragma: nocover
'''
Handles a queue of incoming headers. Ingests each individually
Args:
q (asyncio.Queue): the queue of headers awaiting ingestion
'''
header_dict: ElectrumHeader
while True:
electrum_response = await inq.get()
# NB: the initial result and subsequent notifications are inconsistent
# so we try to unwrap it from a list
try:
header_dict = cast(List[ElectrumHeader], electrum_response)[0]
except Exception:
header_dict = cast(ElectrumHeader, electrum_response)
header = headers.parse_header(header_dict['hex'])
utils.LOGGER.info('got new header: {}'.format(header['hash']))
# store the header and send it to the outq
headers.store_header(header)
if outq is not None:
await outq.put(header)
async def _catch_up(from_height: int) -> None: # pragma: nocover
'''
Catches the chain tip up to latest by batch requesting headers
Schedules itself at a new from_height if not complete yet
Increments by 2014 to pad against the possibility of multiple off-by-ones
Args:
from_height (int): height we currently have, and want to start from
'''
electrum_response = await electrum.get_headers(
start_height=max(from_height - 10, 0),
count=2016)
utils.LOGGER.info(
'catch-up task got {} new headers'.format(electrum_response['count']))
# NB: we requested 2016. If we got back 2016, it's likely there are more
if electrum_response['count'] == 2016:
asyncio.ensure_future(_catch_up(from_height - 10 + 2014))
_process_header_batch(electrum_response['hex'])
async def _maintain_db() -> None: # pragma: nocover
'''
Loop that runs some DB maintenance tasks
Restoring them attempts to connect them to another known header
'''
utils.LOGGER.info('starting chain DB maintenance')
while True:
utils.LOGGER.info('running chain DB maintenance tasks')
asyncio.ensure_future(check_for_floating_headers())
asyncio.ensure_future(mark_best_chain())
asyncio.ensure_future(_check_for_main_chain_gaps())
# TODO: run this on each new header instead
await asyncio.sleep(600)
async def mark_best_chain() -> None:
'''
Marks headers in the best chain.
We do this by finding the heaviest block we know of and then traversing
recursively up its ancestors until we reach a common ancestor
or a missing link (which is unexpected unless we hit our checkpoint)
'''
tip = headers.find_heaviest()[0]
headers.set_chain_tip()
utils.LOGGER.debug(
'marking best chain. current tip is {}'.format(tip['hash']))
await mark_best_chain_from(tip)
async def _check_for_main_chain_gaps() -> None:
'''Checks the main chain for gaps'''
gaps_ends = headers.find_main_chain_gap_ends()
for height in gaps_ends:
asyncio.ensure_future(_fill_in_main_chain_gap(height))
async def _fill_in_main_chain_gap(height: int) -> None:
'''Fills in a miain chain gap'''
gap_end_or_none = headers.find_main_chain_block_at_height(height)
if gap_end_or_none is None:
return
gap_end = cast(Header, gap_end_or_none)
parent_or_none = headers.find_by_hash(gap_end['prev_block'])
if parent_or_none is None:
return
parent = cast(Header, parent_or_none)
utils.LOGGER.info('filling in best chain gap ending at {}'.format(height))
await mark_best_chain_from(parent)
async def mark_best_chain_from(current: Header) -> None:
'''Marks the best chain backwards from a known height'''
count = 0
while headers.mark_best_at_height(current):
if count > 300:
count = 0
await asyncio.sleep(2) # prevent blocking everything
utils.LOGGER.info('marking height {} best block {}'.format(
current['height'], current['hash']))
next_or_none = headers.find_by_hash(current['prev_block'])
if next_or_none is None:
utils.LOGGER.error('failed to find parent in best chain')
break
current = cast(Header, next_or_none)
count += 1
async def check_for_floating_headers() -> None:
'''
This checks for floating headers (those at height 0)
And then tries to find their place
'''
# NB: 0 means no known parent
floating = headers.find_by_height(0)
if len(floating) != 0:
utils.LOGGER.info(
're-storing {} floating headers'.format(len(floating)))
# NB: this will attempt to find their parent and fill in height/accdiff
for header in floating:
headers.store_header(header)
def _process_header_batch(electrum_hex: str) -> None: # pragma: nocover
'''
Processes a batch of headers and sends to the DB for storage
Args:
electrum_hex (str): The 'hex' attribute of electrum's getheaders res
'''
# NB: this comes as a single hex string with all headers concatenated
blob = bytes.fromhex(electrum_hex)
header_list: List[Union[Header, str]]
header_list = [blob[i:i + 80].hex() for i in range(0, len(blob), 80)]
utils.LOGGER.info('storing {} new headers'.format(len(header_list)))
headers.batch_store_header(header_list) | /riemann-zeta-6.0.0.tar.gz/riemann-zeta-6.0.0/zeta/sync/chain.py | 0.758779 | 0.198977 | chain.py | pypi |
import os
from typing import Any, Dict, List
null = None # NB: I copied the list from elsewhere
SERVERS: Dict[str, List[Dict[str, Any]]] = {
'bitcoin_test': [
# {
# "nickname": null,
# "hostname": "testnet.qtornado.com",
# "ip_addr": null,
# "ports": [
# "s51002"
# ],
# "version": "1.4",
# "pruning_limit": 0,
# "seen_at": 1533670768.8676639
# }
{
"nickname": null,
"hostname": "testnet.hsmiths.com",
"ip_addr": null,
"ports": [
"s53012"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.8676639
}
],
'bitcoin_main': [
{
"nickname": null,
"hostname": "104.250.141.242",
"ip_addr": null,
"ports": [
"s50002"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.8676639
},
{
"nickname": null,
"hostname": "134.119.179.55",
"ip_addr": null,
"ports": [
"s50002"
],
"version": "1.0",
"pruning_limit": 0,
"seen_at": 1533670768.731586
},
{
"nickname": null,
"hostname": "139.162.14.142",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.8676212
},
{
"nickname": null,
"hostname": "165.227.22.180",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.867455
},
{
"nickname": null,
"hostname": "3smoooajg7qqac2y.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867671
},
{
"nickname": null,
"hostname": "3tm3fjg3ds5fcibw.onion",
"ip_addr": null,
"ports": [
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867779
},
{
"nickname": "electroli",
"hostname": "46.166.165.18",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.945237
},
{
"nickname": null,
"hostname": "4cii7ryno5j3axe4.onion",
"ip_addr": null,
"ports": [
"t50001"
],
"version": "1.2",
"pruning_limit": 0,
"seen_at": 1533670768.86764
},
{
"nickname": null,
"hostname": "4yi77lkjgy4bwtj3.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.86769
},
{
"nickname": null,
"hostname": "7jwtirwsaogb6jv2.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.2",
"pruning_limit": 0,
"seen_at": 1533670768.8675048
},
{
"nickname": null,
"hostname": "abc1.hsmiths.com",
"ip_addr": "76.174.26.91",
"ports": [
"s60002",
"t60001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.584984
},
{
"nickname": null,
"hostname": "alviss.coinjoined.com",
"ip_addr": "94.130.136.185",
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.867656
},
{
"nickname": "antumbra",
"hostname": "antumbra.se",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.022753
},
{
"nickname": "j_fdk_b",
"hostname": "b.1209k.com",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.020474
},
{
"nickname": null,
"hostname": "bauerjda5hnedjam.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867549
},
{
"nickname": null,
"hostname": "Bitcoin-node.nl",
"ip_addr": "82.217.214.215",
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.867786
},
{
"nickname": null,
"hostname": "bitcoin.corgi.party",
"ip_addr": "176.223.139.65",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867529
},
{
"nickname": null,
"hostname": "bitcoin.grey.pw",
"ip_addr": "173.249.8.197",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.8676748
},
{
"nickname": null,
"hostname": "bitcoin3nqy3db7c.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867647
},
{
"nickname": null,
"hostname": "btc.cihar.com",
"ip_addr": "78.46.177.74",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.759933
},
{
"nickname": null,
"hostname": "btc.gravitech.net",
"ip_addr": "37.187.167.132",
"ports": [
"s50002"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.86759
},
{
"nickname": "mustyoshi",
"hostname": "btc.mustyoshi.com",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.945437
},
{
"nickname": null,
"hostname": "btc.outoftime.co",
"ip_addr": "121.44.121.158",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867712
},
{
"nickname": "selavi",
"hostname": "btc.smsys.me",
"ip_addr": null,
"ports": [
"t110",
"s995"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.021612
},
{
"nickname": "cplus",
"hostname": "btc1.commerce-plus.com",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.96599
},
{
"nickname": "clueless",
"hostname": "cluelessperson.com",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.021714
},
{
"nickname": "condor1003",
"hostname": "condor1003.server4you.de",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.945338
},
{
"nickname": null,
"hostname": "currentlane.lovebitco.in",
"ip_addr": "88.198.91.74",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.8676338
},
{
"nickname": null,
"hostname": "daedalus.bauerj.eu",
"ip_addr": "84.200.105.74",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.8677042
},
{
"nickname": null,
"hostname": "dxm.no-ip.biz",
"ip_addr": "78.51.123.122",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.628011
},
{
"nickname": null,
"hostname": "e-1.claudioboxx.com",
"ip_addr": "37.61.209.146",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.8677719
},
{
"nickname": null,
"hostname": "e-2.claudioboxx.com",
"ip_addr": "37.61.209.147",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867769
},
{
"nickname": null,
"hostname": "e-4.claudioboxx.com",
"ip_addr": "134.119.179.67",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.8678012
},
{
"nickname": null,
"hostname": "E-X.not.fyi",
"ip_addr": "170.130.28.174",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.86766
},
{
"nickname": "ECDSA",
"hostname": "ecdsa.net",
"ip_addr": null,
"ports": [
"t",
"s110"
],
"version": "1.0",
"pruning_limit": 100,
"seen_at": 1465686119.02029
},
{
"nickname": "fydel",
"hostname": "ele.einfachmalnettsein.de",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.022509
},
{
"nickname": "Luggs",
"hostname": "elec.luggs.co",
"ip_addr": "95.211.185.14",
"ports": [
"s443"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867761
},
{
"nickname": "Pielectrum",
"hostname": "ELEC.Pieh0.co.uk",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.022244
},
{
"nickname": "trouth_eu",
"hostname": "electrum-europe.trouth.net",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.040206
},
{
"nickname": null,
"hostname": "electrum-unlimited.criptolayer.net",
"ip_addr": "188.40.93.205",
"ports": [
"s50002"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.696258
},
{
"nickname": "molec",
"hostname": "electrum.0x0000.de",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.966419
},
{
"nickname": "anonymized1",
"hostname": "electrum.anonymized.io",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.020339
},
{
"nickname": null,
"hostname": "electrum.dk",
"ip_addr": "92.246.24.225",
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.867573
},
{
"nickname": "DragonZone",
"hostname": "electrum.dragonzone.net",
"ip_addr": null,
"ports": [
"t",
"h",
"s",
"g"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.966315
},
{
"nickname": null,
"hostname": "electrum.eff.ro",
"ip_addr": "185.36.252.200",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867525
},
{
"nickname": null,
"hostname": "electrum.festivaldelhumor.org",
"ip_addr": "173.212.247.250",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.8674629
},
{
"nickname": "hsmiths",
"hostname": "electrum.hsmiths.com",
"ip_addr": "76.174.26.91",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867747
},
{
"nickname": null,
"hostname": "electrum.infinitum-nihil.com",
"ip_addr": "192.30.120.110",
"ports": [
"s50002"
],
"version": "1.0",
"pruning_limit": 0,
"seen_at": 1533670768.73193
},
{
"nickname": "JWU42",
"hostname": "ELECTRUM.jdubya.info",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 1000,
"seen_at": 1465686119.022112
},
{
"nickname": null,
"hostname": "electrum.leblancnet.us",
"ip_addr": "69.27.173.238",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867794
},
{
"nickname": "RMevaere",
"hostname": "electrum.mevaere.fr",
"ip_addr": null,
"ports": [
"t0",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.945171
},
{
"nickname": "neocrypto",
"hostname": "electrum.neocrypto.io",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.021477
},
{
"nickname": "netpros",
"hostname": "electrum.netpros.co",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.020614
},
{
"nickname": "NOIP",
"hostname": "electrum.no-ip.org",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.020677
},
{
"nickname": "Online",
"hostname": "Electrum.Online",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.020231
},
{
"nickname": null,
"hostname": "electrum.qtornado.com",
"ip_addr": "88.99.162.199",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.8676858
},
{
"nickname": "faro",
"hostname": "electrum.site2.me",
"ip_addr": null,
"ports": [
"t40001",
"s40002"
],
"version": "1.0",
"pruning_limit": 100,
"seen_at": 1465686119.020781
},
{
"nickname": "Snipa",
"hostname": "electrum.snipanet.com",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.021272
},
{
"nickname": "stoff-sammlung",
"hostname": "electrum.stoff-sammlung.de",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.966188
},
{
"nickname": "gORlECTRUM",
"hostname": "ELECTRUM.top-master.com",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.020941
},
{
"nickname": "trouth",
"hostname": "electrum.trouth.net",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.02263
},
{
"nickname": "dogydins",
"hostname": "electrum.villocq.com",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.040277
},
{
"nickname": "eniac",
"hostname": "electrum0.snel.it",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.94539
},
{
"nickname": null,
"hostname": "electrumx.bot.nu",
"ip_addr": "173.91.90.62",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867776
},
{
"nickname": null,
"hostname": "electrumx.nmdps.net",
"ip_addr": "109.61.102.5",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867459
},
{
"nickname": "Pielectrum_TOR",
"hostname": "electrumx67xeros.onion",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.944136
},
{
"nickname": null,
"hostname": "electrumxhqdsmlu.onion",
"ip_addr": null,
"ports": [
"t50001"
],
"version": "1.2",
"pruning_limit": 0,
"seen_at": 1533670768.628143
},
{
"nickname": "j_fdk_b_tor",
"hostname": "fdkbwjykvl2f3hup.onion",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.020525
},
{
"nickname": "j_fdk_h_tor",
"hostname": "fdkhv2bb7hqel2e7.onion",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.021149
},
{
"nickname": "electron",
"hostname": "gh05.geekhosters.com",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.945288
},
{
"nickname": "j_fdk_h",
"hostname": "h.1209k.com",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.020997
},
{
"nickname": null,
"hostname": "helicarrier.bauerj.eu",
"ip_addr": "178.32.88.133",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867583
},
{
"nickname": null,
"hostname": "hsmiths4fyqlw5xw.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867569
},
{
"nickname": null,
"hostname": "hsmiths5mjk6uijs.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.86774
},
{
"nickname": "DEVV",
"hostname": "ilikehuskies.no-ip.org",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.022576
},
{
"nickname": null,
"hostname": "ip101.ip-54-37-91.eu",
"ip_addr": "54.37.91.101",
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.696357
},
{
"nickname": null,
"hostname": "ip119.ip-54-37-91.eu",
"ip_addr": "54.37.91.119",
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.867723
},
{
"nickname": null,
"hostname": "ip120.ip-54-37-91.eu",
"ip_addr": "54.37.91.120",
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.867553
},
{
"nickname": null,
"hostname": "ip239.ip-54-36-234.eu",
"ip_addr": "54.36.234.239",
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.867719
},
{
"nickname": "fydel_tor",
"hostname": "ixxdq23ewy77sau6.onion",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.02234
},
{
"nickname": null,
"hostname": "iy5jbpzok4spzetr.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867765
},
{
"nickname": "JWU42[b]",
"hostname": "jwu42.hopto.org",
"ip_addr": null,
"ports": [
"t50003",
"s50004"
],
"version": "1.0",
"pruning_limit": 1000,
"seen_at": 1465686119.022186
},
{
"nickname": null,
"hostname": "kirsche.emzy.de",
"ip_addr": "78.47.61.83",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867726
},
{
"nickname": null,
"hostname": "liyqfqfsiewcsumb.onion",
"ip_addr": null,
"ports": [
"s50003",
"t50001"
],
"version": "1.2",
"pruning_limit": 0,
"seen_at": 1533670768.867557
},
{
"nickname": null,
"hostname": "luggscoqbymhvnkp.onion",
"ip_addr": null,
"ports": [
"t80"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.8674839
},
{
"nickname": "j_fdk_mash_tor",
"hostname": "mashtk6hmnysevfj.onion",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.021092
},
{
"nickname": null,
"hostname": "ndnd.selfhost.eu",
"ip_addr": "217.233.81.39",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.86773
},
{
"nickname": null,
"hostname": "ndndword5lpb7eex.onion",
"ip_addr": null,
"ports": [
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867682
},
{
"nickname": null,
"hostname": "orannis.com",
"ip_addr": "50.35.67.146",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867517
},
{
"nickname": "selavi_tor",
"hostname": "ozahtqwp25chjdjd.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.628438
},
{
"nickname": null,
"hostname": "qtornadoklbgdyww.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.628462
},
{
"nickname": null,
"hostname": "rbx.curalle.ovh",
"ip_addr": "176.31.252.219",
"ports": [
"s50002"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867744
},
{
"nickname": "cplus_tor",
"hostname": "rvm6c7kj63mtztgn.onion",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.965912
},
{
"nickname": null,
"hostname": "ryba-btc.noip.pl",
"ip_addr": "109.199.70.182",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.86748
},
{
"nickname": null,
"hostname": "rybabtcmltnlykbd.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867521
},
{
"nickname": null,
"hostname": "s7clinmo4cazmhul.onion",
"ip_addr": null,
"ports": [
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867476
},
{
"nickname": null,
"hostname": "such.ninja",
"ip_addr": "163.172.61.154",
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.867708
},
{
"nickname": null,
"hostname": "tardis.bauerj.eu",
"ip_addr": "51.15.138.64",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.867758
},
{
"nickname": "ulrichard",
"hostname": "ulrichard.ch",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 10000,
"seen_at": 1465686119.020178
},
{
"nickname": "ECO",
"hostname": "ultra-ecoelectrum.my-gateway.de",
"ip_addr": null,
"ports": [
"t",
"s"
],
"version": "1.0",
"pruning_limit": 100,
"seen_at": 1465686119.020727
},
{
"nickname": "US",
"hostname": "us.electrum.be",
"ip_addr": "208.110.73.107",
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.494337
},
{
"nickname": "hsmiths2",
"hostname": "VPS.hsmiths.com",
"ip_addr": "51.15.77.78",
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.8675961
},
{
"nickname": null,
"hostname": "wsw6tua3xl24gsmi264zaep6seppjyrkyucpsmuxnjzyt3f3j6swshad.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.4",
"pruning_limit": 0,
"seen_at": 1533670768.86779
},
{
"nickname": null,
"hostname": "y4td57fxytoo5ki7.onion",
"ip_addr": null,
"ports": [
"s50002",
"t50001"
],
"version": "1.1",
"pruning_limit": 0,
"seen_at": 1533670768.867754
}
]
} | /riemann-zeta-6.0.0.tar.gz/riemann-zeta-6.0.0/zeta/electrum/servers.py | 0.455683 | 0.345519 | servers.py | pypi |
import sqlite3
from zeta import crypto
from zeta.db import connection
from riemann.encoding import addresses as addr
from typing import cast, Optional, List
from zeta.zeta_types import KeyEntry
def key_from_row(
row: sqlite3.Row,
secret_phrase: Optional[str] = None,
get_priv: bool = False) -> KeyEntry:
'''
Does what it says on the tin
'''
res = cast(KeyEntry, dict((k, row[k]) for k in row.keys()))
if get_priv and secret_phrase:
privkey = crypto.decode_aes(row['privkey'], secret_phrase)
res['privkey'] = privkey
else:
res['privkey'] = b''
return res
def validate_key(k: KeyEntry) -> bool:
'''
Checks internal consistency of a key entry
'''
# missing expected keys, prevents runtime errors later in this method
if set(['pubkey', 'privkey', 'address']) - set(k.keys()) != set():
return False
# pubkey is malformatted
if not crypto.is_pubkey(k['pubkey']):
return False
# pubkey matches privkey
if k['privkey'] != b'':
pubkey = crypto.to_pubkey(crypto.coerce_key(k['privkey'])).hex()
if k['pubkey'] != pubkey:
return False
# address matches pubkey
if k['address'] != addr.make_p2wpkh_address(bytes.fromhex(k['pubkey'])):
return False
return True
def store_key(key_entry: KeyEntry, secret_phrase: Optional[str]) -> bool:
if not validate_key(key_entry):
return False
k = key_entry.copy() # type: ignore
c = connection.get_cursor()
try:
k['privkey'] = crypto.encode_aes(
message_bytes=k['privkey'],
secret_phrase=cast(str, secret_phrase))
c.execute(
'''
INSERT OR IGNORE INTO addresses VALUES (
:address,
:script)
''',
{'address': k['address'], 'script': b''})
c.execute(
'''
INSERT OR REPLACE INTO keys VALUES (
:pubkey,
:privkey,
:derivation,
:chain,
:address)
''',
k)
connection.commit()
return True
finally:
c.close()
def find_by_address(
address: str,
secret_phrase: Optional[str] = None,
get_priv: bool = False) -> Optional[KeyEntry]:
'''
finds a key by its primary address
its primary address is the bech32 p2wpkh of its compressed pubkey
'''
c = connection.get_cursor()
try:
res = c.execute(
'''
SELECT * FROM keys
WHERE address = :address
''',
{'address': address})
for a in res:
# little hacky. returns first entry
# we know there can only be one
return key_from_row(a, secret_phrase, get_priv)
return None
finally:
c.close()
def find_by_pubkey(
pubkey: str,
secret_phrase: Optional[str] = None,
get_priv: bool = False) -> List[KeyEntry]:
'''
finds a key by its pubkey
'''
c = connection.get_cursor()
try:
res = [key_from_row(r, secret_phrase, get_priv) for r in c.execute(
'''
SELECT * FROM keys
WHERE pubkey = :pubkey
''',
{'pubkey': pubkey})]
return res
finally:
c.close()
def find_by_script(
script: bytes,
secret_phrase: Optional[str] = None,
get_priv: bool = False) -> List[KeyEntry]:
'''
Finds all KeyEntries whose pubkey appears in a certain script
'''
c = connection.get_cursor()
try:
res = [key_from_row(r, secret_phrase, get_priv) for r in c.execute(
'''
SELECT * FROM keys
WHERE pubkey IN
(SELECT pubkey FROM pubkey_to_script
WHERE script = :script)
''',
{'script': script})]
return res
finally:
c.close()
def count_keys() -> int:
'''
Returns the number of keys in the database
Returns:
(int): the key count
'''
c = connection.get_cursor()
try:
return c.execute(
'''
SELECT COUNT(*) FROM keys
''').fetchone()[0]
finally:
c.close() | /riemann-zeta-6.0.0.tar.gz/riemann-zeta-6.0.0/zeta/db/keys.py | 0.729616 | 0.160628 | keys.py | pypi |
import math
import sqlite3
from riemann import utils as rutils
from zeta.db import connection
from zeta.zeta_types import Header
from typing import cast, List, Optional, Tuple, Union
def header_from_row(row: sqlite3.Row) -> Header:
'''
Does what it says on the tin
'''
return cast(Header, dict((k, row[k]) for k in row.keys()))
def check_work(header: Header) -> bool:
'''
Checks a header's work against its work target
Args:
(dict): The header to check
Returns:
(bool): True if the header has enough work, otherwise false
'''
nbits = bytes.fromhex(cast(str, header['nbits']))
return int(cast(str, header['hash']), 16) <= make_target(nbits)
def make_target(nbits: bytes) -> int:
'''
converts an nbits from a header into the target
Args:
nbits (bytes): the 4-byte nbits bytestring
Returns:
(int): the target threshold
'''
exponent = rutils.be2i(nbits[-1:]) - 3
return math.floor(rutils.le2i(nbits[:-1]) * 0x100 ** (exponent))
def parse_difficulty(nbits: bytes) -> int:
'''
converts an nbits from a header into the difficulty
Args:
nbits (bytes): the 4-byte nbits bytestring
Returns:
(int): the difficulty (no decimals)
'''
return make_target(b'\xff\xff\x00\x1d') // make_target(nbits)
def parse_header(header: str) -> Header:
'''
Parses a header to a dict
Args:
header (str): hex formatted 80 byte header
Returns:
dict:
hash (str): the header hash 0000-first
version (int): the block version as an int
prev_block (str): the previous block hash 0000-first
merkle_root (str): the block transaction merkle tree root
timestamp (int): the block header timestamp
nbits (str): the difficulty bits
nonce (str): the nonce
difficulty (int): the difficulty as an int
hex (str): the full header as hex
height (int): the block height (always 0)
'''
if len(header) != 160:
raise ValueError('Invalid header received')
as_bytes = bytes.fromhex(header)
nbits = as_bytes[72:76]
return {
'hash': rutils.hash256(bytes.fromhex(header))[::-1].hex(),
'version': rutils.le2i(as_bytes[0:4]),
'prev_block': as_bytes[4:36][::-1].hex(),
'merkle_root': as_bytes[36:68].hex(),
'timestamp': rutils.le2i(as_bytes[68:72]),
'nbits': nbits.hex(),
'nonce': as_bytes[76:80].hex(),
'difficulty': parse_difficulty(nbits),
'hex': header,
'height': 0,
'accumulated_work': 0
}
def batch_store_header(h: List[Union[Header, str]]) -> bool:
'''
Stores a batch of headers in the database
Args:
header list(str or dict): parsed or unparsed header
Returns:
(bool): true if succesful, false if error
'''
# TODO: Refactor and improve
c = connection.get_cursor()
headers: List[Header] = [normalize_header(header) for header in h]
headers = list(filter(check_work, headers))
headers = _trim_batch(headers)
for header in headers:
_find_parent_in_batch(header, headers)
try:
for header in headers:
c.execute(
'''
INSERT OR REPLACE INTO headers VALUES (
:hash,
:version,
:prev_block,
:merkle_root,
:timestamp,
:nbits,
:nonce,
:difficulty,
:hex,
:height,
:accumulated_work)
''',
(header))
connection.commit()
return True
finally:
c.close()
def normalize_header(header: Union[Header, str]) -> Header:
'''
Normalizes string header inputs to Header objects
Args:
header (str or Header): the string or object input
Returns:
(Header): the normalized header
'''
if isinstance(header, str):
parsed_header = parse_header(cast(str, header))
else:
parsed_header = cast(Header, header)
parsed_header['height'] = 0
parsed_header['accumulated_work'] = 0
return parsed_header
def _trim_batch(batch: List[Header]) -> List[Header]:
# NB: this block finds the last header for which we know a parent
# it discards headers earlier in the batch
# this pretty much assumes batches are ordered
for i in range(len(batch)):
parent = find_by_hash(
cast(str, batch[i]['prev_block']))
if parent:
batch[i]['height'] = parent['height'] + 1
batch[i]['accumulated_work'] = (
parent['accumulated_work']
+ batch[0]['difficulty'])
batch = batch[i:]
break
return batch
def _find_parent_in_batch(header: Header, batch: List[Header]) -> None:
'''
Finds a parent in the current batch
Args:
header (Header): the header we care about
batch (List(Header)): the current batch
'''
# NB: this block checks if the header has a parent in the current batch
# it populates the height and accumulated work fields if so
if header['height'] != 0:
return
results = list(filter(
lambda k: k['hash'] == header['prev_block'],
batch))
if len(results) == 0 or results[0]['height'] == 0:
return
parent = results[0]
header['height'] = parent['height'] + 1
header['accumulated_work'] = \
parent['accumulated_work'] + header['difficulty']
def try_to_associate_height_and_work(header: Header) -> Header:
'''
Tries to associate height and work with a header based
on a parent in the db
Args:
header (Header): the header we're looking for info on
Returns:
(Header): the modified header
'''
parent_height, parent_work = parent_height_and_work(header)
if parent_height != 0:
header['height'] = parent_height + 1
header['accumulated_work'] = parent_work + header['difficulty']
else:
header['height'] = 0
header['accumulated_work'] = 0
return header
def parent_height_and_work(header: Header) -> Tuple[int, int]:
'''
Find the header's parent in the DB and return its height and work
Args:
header (Header): the child header
Returns:
(int, int): the parent's height and work, both 0 if not found
'''
parent_or_none = find_by_hash(header['prev_block'])
if parent_or_none is not None:
parent = cast(Header, parent_or_none)
parent_work = parent['accumulated_work']
parent_height = parent['height']
return parent_height, parent_work
else:
return 0, 0
def store_header(header: Union[Header, str]) -> bool:
'''
Stores a header in the database
Args:
header (str or dict): parsed or unparsed header
Returns:
(bool): true if succesful, false if error
'''
if isinstance(header, str):
header = parse_header(header)
if not check_work(header):
raise ValueError('Invalid header')
if header['height'] == 0:
try_to_associate_height_and_work(header)
c = connection.get_cursor()
try:
c.execute(
'''
INSERT OR REPLACE INTO headers VALUES (
:hash,
:version,
:prev_block,
:merkle_root,
:timestamp,
:nbits,
:nonce,
:difficulty,
:hex,
:height,
:accumulated_work)
''',
(header))
connection.commit()
return True
finally:
c.close()
def find_by_height(height: int) -> List[Header]:
'''
Finds headers by blockheight. Can return more than 1
Args:
height (str): integer blockheight
Returns:
dict:
hash (str): the header hash 0000-first
version (int): the block version as an int
prev_block (str): the previous block hash 0000-first
merkle_root (str): the block transaction merkle tree root
timestamp (int): the block header timestamp
nbits (str): the difficulty bits
nonce (str): the nonce
difficulty (int): the difficulty as an int
hex (str): the full header as hex
height (int): the block height
'''
c = connection.get_cursor()
try:
res = [header_from_row(r) for r in c.execute(
'''
SELECT * FROM headers
WHERE height = :height
''',
{'height': height})]
return res
finally:
c.close()
def find_by_hash(hash: str) -> Optional[Header]:
'''
Finds a header by hash
Args:
has (str): 0000-first header hash
Returns:
dict:
hash (str): the header hash 0000-first
version (int): the block version as an int
prev_block (str): the previous block hash 0000-first
merkle_root (str): the block transaction merkle tree root
timestamp (int): the block header timestamp
nbits (str): the difficulty bits
nonce (str): the nonce
difficulty (int): the difficulty as an int
hex (str): the full header as hex
height (int): the block height
'''
c = connection.get_cursor()
try:
res = [header_from_row(r) for r in c.execute(
'''
SELECT * FROM headers
WHERE hash = :hash
''',
{'hash': hash})]
if len(res) != 0:
return res[0]
return None
finally:
c.close()
def find_highest() -> List[Header]:
'''
Finds the highest headers we know of.
This is not very useful
Returns:
(List(Header)): the highest headers
'''
c = connection.get_cursor()
try:
res = [header_from_row(r) for r in c.execute(
'''
SELECT * FROM headers
WHERE height = (SELECT max(height) FROM headers)
'''
)]
return res
finally:
c.close()
def find_heaviest() -> List[Header]:
'''
Finds the heaviest blocks we know of
This returns a list, because between difficulty resets the
blocks will accumulate work at the same rate
Generally we'll take the 0th entry and then it'll work out over time
Returns:
(List(Header)): the heaviest headers
'''
c = connection.get_cursor()
try:
res = [header_from_row(r) for r in c.execute(
'''
SELECT * FROM headers
WHERE accumulated_work =
(SELECT max(accumulated_work) FROM headers)
'''
)]
return res
finally:
c.close()
def find_main_chain_block_at_height(height: int) -> Optional[Header]:
'''
Finds a block in the main chain at a specific height
Args:
height (int): the height we're looking for
Returns:
Optional(Header): the main chain block at that height or None
'''
c = connection.get_cursor()
try:
res = c.execute(
'''
SELECT * FROM headers WHERE hash IN
(SELECT hash FROM best_chain
WHERE height = :height)
''', {'height': height})
for h in res:
return header_from_row(h)
return None
finally:
c.close()
def mark_best_at_height(best: Header) -> bool:
'''
Marks a header the best at its height by storing its hash in the
best_chain table.
Args:
best (Header); the header we believe is best at the height
'''
c = connection.get_cursor()
current_or_none = find_main_chain_block_at_height(best['height'])
if current_or_none is not None:
current = cast(Header, current_or_none)
if current['hash'] == best['hash']:
return False
try:
c.execute(
'''
INSERT OR REPLACE INTO best_chain VALUES (
:height,
:hash)
''', {'height': best['height'], 'hash': best['hash']})
connection.commit()
return True
finally:
c.close()
def set_chain_tip() -> bool:
'''
Deletes best chain enties above the height of a transaction
'''
c = connection.get_cursor()
try:
c.execute('''
DELETE FROM best_chain WHERE height >
(SELECT height FROM HEADERS WHERE accumulated_work =
(SELECT max(accumulated_work) FROM headers)
LIMIT 1)
''')
connection.commit()
return True
finally:
c.close()
def find_main_chain_gap_ends() -> List[int]:
c = connection.get_cursor()
try:
return [r['height'] for r in c.execute('''
SELECT a.height
FROM best_chain a
WHERE NOT EXISTS
(SELECT b.height
FROM best_chain b
WHERE b.height = a.height-1)
AND a.height >
(SELECT MIN(c.height)
FROM best_chain c)
''')]
finally:
c.close() | /riemann-zeta-6.0.0.tar.gz/riemann-zeta-6.0.0/zeta/db/headers.py | 0.753648 | 0.479016 | headers.py | pypi |
import sqlite3
from riemann import utils as rutils
from riemann.encoding import addresses as addr
from zeta import utils
from zeta.db import connection
from zeta.zeta_types import Outpoint, Prevout, PrevoutEntry, TransactionEntry
from typing import List, Optional
def prevout_from_row(row: sqlite3.Row) -> Prevout:
res: Prevout = {
'outpoint': Outpoint(
tx_id=row['tx_id'],
index=row['idx']),
'value': row['value'],
'spent_at': row['spent_at'],
'spent_by': row['spent_by'],
'address': row['address']}
return res
def _flatten_prevout(prevout: Prevout) -> PrevoutEntry:
outpoint = '{tx_id}{index}'.format(
tx_id=utils.reverse_hex(prevout['outpoint']['tx_id']),
index=rutils.i2le_padded(prevout['outpoint']['index'], 4).hex())
return {
'outpoint': outpoint,
'tx_id': prevout['outpoint']['tx_id'],
'idx': prevout['outpoint']['index'],
'value': prevout['value'],
'spent_at': prevout['spent_at'],
'spent_by': prevout['spent_by'],
'address': prevout['address']
}
def validate_prevout(prevout: Prevout) -> bool:
'''
Validates the internal structure of a prevout
'''
try:
if prevout['value'] <= 0:
return False
addr.parse_hash(prevout['address'])
except Exception:
return False
return True
def store_prevout(prevout: Prevout) -> bool:
'''
Stores a prevout in the database
Args:
prevout (dict): the prevout
Return:
(bool): true if successful, false if error
'''
c = connection.get_cursor()
if not validate_prevout(prevout):
raise ValueError('invalid prevout')
try:
flattened = _flatten_prevout(prevout)
c.execute(
'''
INSERT OR REPLACE INTO prevouts VALUES (
:outpoint,
:tx_id,
:idx,
:value,
:spent_at,
:spent_by,
:address)
''',
flattened)
connection.commit()
return True
finally:
c.close()
def batch_store_prevout(prevout_list: List[Prevout]) -> bool:
'''
Stores a batch of prevouts in the DB. Uses only one transaction
Args:
prevout_list (list(Prevout)): the prevouts to store
Returns:
(bool): True if prevouts were stored, false otherwise
'''
c = connection.get_cursor()
for prevout in prevout_list:
if not validate_prevout(prevout):
raise ValueError('invalid prevout in batch')
try:
flattened_list = list(map(_flatten_prevout, prevout_list))
for prevout_entry in flattened_list:
c.execute(
'''
INSERT OR REPLACE INTO prevouts VALUES (
:outpoint,
:tx_id,
:idx,
:value,
:spent_at,
:spent_by,
:address)
''',
prevout_entry)
connection.commit()
return True
finally:
c.close()
def find_by_address(address: str) -> List[Prevout]:
'''
Finds prevouts by associated address
One address may have many prevouts
Args:
address (str):
'''
c = connection.get_cursor()
try:
return [prevout_from_row(r) for r in c.execute(
'''
SELECT * FROM prevouts
WHERE address = :address
''',
{'address': address})]
finally:
c.close()
def find_by_tx_id(tx_id: str) -> List[Prevout]:
c = connection.get_cursor()
try:
res = [prevout_from_row(p) for p in c.execute(
'''
SELECT * from prevouts
WHERE tx_id = :tx_id
''',
{'tx_id': tx_id}
)]
return res
finally:
c.close()
def find_by_outpoint(outpoint: Outpoint) -> Optional[Prevout]:
c = connection.get_cursor()
try:
res = [prevout_from_row(p) for p in c.execute(
'''
SELECT * from prevouts
WHERE tx_id = :tx_id
AND idx = :index
''',
outpoint
)]
for p in res:
# little hacky. returns first entry
# we know there can only be one
return p
return None
finally:
c.close()
def find_all_unspents() -> List[Prevout]:
c = connection.get_cursor()
try:
res = [prevout_from_row(p) for p in c.execute(
'''
SELECT * from prevouts
WHERE spent_at = -2
'''
)]
return res
finally:
c.close()
def find_by_child(child_tx_id: str) -> List[Prevout]:
c = connection.get_cursor()
try:
res = [prevout_from_row(p) for p in c.execute(
'''
SELECT * from prevouts
WHERE spent_by = :child_tx_id
''',
{'child_tx_id': child_tx_id}
)]
return res
finally:
c.close()
def find_by_value_range(
lower_value: int,
upper_value: int,
unspents_only: bool = True) -> List[Prevout]:
c = connection.get_cursor()
try:
# I don't like this.
# figure out how to do this without string format
res = [prevout_from_row(p) for p in c.execute(
'''
SELECT * from prevouts
WHERE value <= :upper_value
AND value >= :lower_value
AND spent_at {operator} -2
'''.format(operator=('==' if unspents_only else '!=')),
{'upper_value': upper_value,
'lower_value': lower_value})]
return res
finally:
c.close()
def find_spent_by_mempool_tx() -> List[Prevout]:
'''
Finds prevouts that have been spent by a tx in the mempool
Useful for checking if a tx can be replaced or has confirmed
'''
c = connection.get_cursor()
try:
# I don't like this.
# figure out how to do this without string format
res = [prevout_from_row(p) for p in c.execute(
'''
SELECT * from prevouts
WHERE spent_at == -1
''')]
return res
finally:
c.close()
def check_for_known_outpoints(
outpoint_list: List[Outpoint]) -> List[Outpoint]:
'''
Finds all prevouts we know of from a list of outpoints
Useful for checking whether the DB already knows about specific prevouts
'''
# NB: We want to flatten the outpoint to look it up in the DB
flattened_list: List[str] = []
for o in outpoint_list:
flat_outpoint = '{tx_id}{index}'.format(
tx_id=utils.reverse_hex(o['tx_id']),
index=rutils.i2le_padded(o['index'], 4).hex())
flattened_list.append(flat_outpoint)
c = connection.get_cursor()
try:
question_marks = ', '.join(['?' for _ in range(len(outpoint_list))])
cursor = c.execute(
'''
SELECT tx_id, idx FROM prevouts
WHERE outpoint IN ({question_marks})
'''.format(question_marks=question_marks),
flattened_list)
res = [Outpoint(tx_id=p['tx_id'], index=p['idx']) for p in cursor]
return res
finally:
c.close()
def find_all() -> List[Prevout]:
'''
Finds all prevouts
'''
c = connection.get_cursor()
try:
res = [prevout_from_row(r) for r in c.execute(
'''
SELECT * FROM prevouts
''')]
return res
finally:
c.close()
def find_unconfirmed_creator() -> List[Prevout]:
'''
Finds outpoints where we have not confirmed their creator
'''
c = connection.get_cursor()
try:
res = [prevout_from_row(r) for r in c.execute(
'''
SELECT * FROM prevouts
WHERE outpoint NOT IN (
SELECT outpoint FROM tx_outs
)
OR outpoint IN (
SELECT outpoint FROM tx_outs
WHERE included_in IN (
SELECT tx_id FROM transactions
WHERE confirmed_height < 0))
''')]
return res
finally:
c.close()
def delete_prevout(prevout: Prevout) -> bool:
'''
Deletes a prevout from the database
'''
flat_outpoint = '{tx_id}{index}'.format(
tx_id=utils.reverse_hex(prevout['outpoint']['tx_id']),
index=rutils.i2le_padded(prevout['outpoint']['index'], 4).hex())
c = connection.get_cursor()
try:
c.execute(
'''
DELETE FROM prevouts WHERE outpoint = :outpoint
''', {'outpoint': flat_outpoint})
return True
finally:
c.close()
def calculate_net_tx_value(t: TransactionEntry) -> int:
'''calculate the change in value at tracked addresses from a tx'''
c = connection.get_cursor()
try:
prevouts = [prevout_from_row(a) for a in c.execute(
'''
SELECT * FROM prevouts
WHERE tx_id == :tx_id
OR spent_by == :tx_id
''', {'tx_id': t['tx_id']})]
spending = [p for p in prevouts if p['spent_by'] == t['tx_id']]
creating = [p for p in prevouts
if p['outpoint']['tx_id'] == t['tx_id']]
return sum([p['value'] for p in creating]) \
- sum([p['value'] for p in spending])
finally:
c.close() | /riemann-zeta-6.0.0.tar.gz/riemann-zeta-6.0.0/zeta/db/prevouts.py | 0.670932 | 0.196981 | prevouts.py | pypi |
import os
from zeta.db import connection
from zeta.zeta_types import Header
from typing import Dict, List
network: str = os.environ.get('ZETA_NETWORK', 'bitcoin_main')
CHECKPOINTS: Dict[str, List[Header]] = {
'bitcoin_main': [
{
'hash': '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f',
'version': 1,
'prev_block': '0000000000000000000000000000000000000000000000000000000000000000',
'merkle_root': '3ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a',
'timestamp': 1231006505,
'nbits': 'ffff001d',
'nonce': '1dac2b7c',
'difficulty': 1,
'hex': '0100000000000000000000000000000000000000000000000000000000000000000000003ba3edfd7a7b12b27ac72c3e67768f617fc81bc3888a51323a9fb8aa4b1e5e4a29ab5f49ffff001d1dac2b7c',
'height': 0,
'accumulated_work': 0
},
{
'hash': '00000000000000000029f5e855578d7a81f4501f38093c46cb88a47664bf3c0e',
'version': 549453824,
'prev_block': '0000000000000000001e6525727cc0a729b1e928dff16db10d789176b59dd3eb',
'merkle_root': '19a0368be5061871be3929e11b0e13de2c5f34e45310ca2798ebe14783413252',
'timestamp': 1544230162,
'nbits': '7cd93117',
'nonce': '5507350b',
'difficulty': 5646403851534,
'hex': '0000c020ebd39db57691780db16df1df28e9b129a7c07c7225651e00000000000000000019a0368be5061871be3929e11b0e13de2c5f34e45310ca2798ebe1478341325212150b5c7cd931175507350b',
'height': 552955,
'accumulated_work': 0
},
{
'hash': '000000000000000002cce816c0ab2c5c269cb081896b7dcb34b8422d6b74ffa1',
'version': 536870912,
'prev_block': '000000000000000003035bc31911d3eea46c8a23b36d6d558141d1d09cc960cf',
'merkle_root': 'fa0f9ea6c329b99b6d17576b73bc781267e566430aee747205b0acbca5238302',
'timestamp': 1468082773,
'nbits': 'fd260518',
'nonce': 'b432bd82',
'difficulty': 213398925331,
'hex': '00000020cf60c99cd0d14181556d6db3238a6ca4eed31119c35b03030000000000000000fa0f9ea6c329b99b6d17576b73bc781267e566430aee747205b0acbca5238302552a8157fd260518b432bd82',
'height': 420000,
'accumulated_work': 0
}],
'bitcoin_test': [
{
'hash': '000000000000fce208da3e3b8afcc369835926caa44044e9c2f0caa48c8eba0f',
'version': 536870912,
'prev_block': '00000000000317883bdb2a052dc8370a43355aef82aec7ac88ec2bb300bb5896',
'merkle_root': '32dfad3bd94b176f500f15bdf242b5a524d5faeb12b3431bbc0cd3980eb8975e',
'timestamp': 1534969326,
'nbits': '9c61031b',
'nonce': '675abfd0',
'difficulty': 19381,
'hex': '000000209658bb00b32bec88acc7ae82ef5a35430a37c82d052adb3b881703000000000032dfad3bd94b176f500f15bdf242b5a524d5faeb12b3431bbc0cd3980eb8975eeec57d5b9c61031b675abfd0',
'height': 1400000,
'accumulated_work': 0
}]
} | /riemann-zeta-6.0.0.tar.gz/riemann-zeta-6.0.0/zeta/db/checkpoint.py | 0.487795 | 0.179279 | checkpoint.py | pypi |
import sqlite3
from riemann import utils as rutils
from riemann.encoding import addresses as addr
from riemann.script import serialization as script_ser
from zeta import crypto
from zeta.db import connection
from zeta.zeta_types import AddressEntry
from typing import cast, List, Union, Optional
def address_from_row(row: sqlite3.Row) -> AddressEntry:
'''
Turns a row object into an AddressEntry dict
'''
a: AddressEntry = {
'address': row['address'],
'script': row['script'],
'script_pubkeys': pubkeys_from_script(row['script'])
}
return a
def validate_address(address: AddressEntry) -> bool:
'''
Validates the address data structure
'''
try:
h = addr.parse_hash(address['address'])
if address['script'] == b'':
return True
if address['script_pubkeys'] != pubkeys_from_script(address['script']):
return False
if h in [rutils.sha256(address['script']), # p2wsh
rutils.hash160(address['script'])]: # p2sh
return True
except (ValueError, TypeError, KeyError):
pass
return False
def pubkeys_from_script(script: bytes) -> List[str]:
'''
guess-parses pubkeys from a serialized bitcoin script
'''
res: List[str] = []
s = script_ser.deserialize(script)
for token in s.split():
if crypto.is_pubkey(token):
res.append(token)
return res
def store_address(address: Union[str, AddressEntry]) -> bool:
'''
stores an address in the db
accepts a string address
'''
a: AddressEntry
if type(address) is str:
a = {
'address': cast(str, address),
'script': b'',
'script_pubkeys': []
}
else:
a = cast(AddressEntry, address)
if not validate_address(a):
raise ValueError('invalid address entry')
c = connection.get_cursor()
try:
c.execute(
'''
INSERT OR REPLACE INTO addresses VALUES (
:address,
:script)
''',
a)
# NB: we track what pubkeys show up in what scripts so we can search
for pubkey in a['script_pubkeys']:
c.execute(
'''
INSERT OR REPLACE INTO pubkey_to_script VALUES (
:pubkey,
:script)
''',
{'pubkey': pubkey, 'script': a['script']})
connection.commit()
return True
finally:
c.close()
def find_associated_pubkeys(script: bytes) -> List[str]:
'''
looks up pubkeys associated with a script
somewhat redundant with pubkeys_from_script
'''
c = connection.get_cursor()
try:
res = c.execute(
'''
SELECT pubkey FROM pubkey_to_script
WHERE script = :script
''',
{'script': script})
return [r['pubkey'] for r in res]
finally:
c.close()
def find_by_address(address: str) -> Optional[AddressEntry]:
'''
Finds an AddressEntry for the address if it exists, returns None otherwise
'''
c = connection.get_cursor()
try:
res = c.execute(
'''
SELECT * from addresses
WHERE address = :address
''',
{'address': address})
for a in res:
# little hacky. returns first entry
# we know there can only be one
return address_from_row(a)
return None
finally:
c.close()
def find_by_script(script: bytes) -> List[AddressEntry]:
'''
Finds all AddressEntries with the corresponding Script
'''
c = connection.get_cursor()
try:
res = [address_from_row(r) for r in c.execute(
'''
SELECT * FROM addresses
WHERE script = :script
''',
{'script': script})]
return res
finally:
c.close()
def find_by_pubkey(pubkey: str) -> List[AddressEntry]:
'''
Finds all AddressEntries whose script includes the specified pubkey
'''
c = connection.get_cursor()
try:
res = [address_from_row(r) for r in c.execute(
'''
SELECT * FROM addresses
WHERE script IN
(SELECT script FROM pubkey_to_script
WHERE pubkey = :pubkey)
''',
{'pubkey': pubkey})]
return res
finally:
c.close()
def find_all_addresses() -> List[str]:
'''
Finds all addresses that we're tracking
'''
c = connection.get_cursor()
try:
return [r['address'] for r in c.execute(
'''
SELECT address FROM addresses
'''
)]
finally:
c.close() | /riemann-zeta-6.0.0.tar.gz/riemann-zeta-6.0.0/zeta/db/addresses.py | 0.664758 | 0.232332 | addresses.py | pypi |
r"""
**Riemann**, a pure-Python package for computing :math:`n`-dimensional Riemann sums.
"""
from decimal import Decimal
import functools
import inspect
import itertools
from numbers import Number
import operator
import typing
@typing.runtime_checkable
class FunctionSRV(typing.Protocol):
r"""
Callable type that represents a function of several real variables.
Inherits from :class:`typing.Protocol`.
.. math::
f: \mathbb{R}^{n} \rightarrow \mathbb{R}
Instances of this class are analagous to the following function:
.. code-block:: python
>>> from numbers import Number
>>> def function(*x: Number) -> Number: ...
The callable object takes any number of :class:`numbers.Number` objects and returns a single
:class:`numbers.Number` object.
This class uses the :func:`typing.runtime_checkable` decorator, so :func:`isinstance` can be
to determine whether a callable object is an instance of this class:
.. doctest:: python
>>> from numbers import Number
>>> from riemann import FunctionSRV
>>> def function(*x: Number) -> Number: ...
>>> isinstance(function, FunctionSRV)
True
>>> def f():
... return 0
>>> isinstance(f, FunctionSRV)
True
>>> def g(x):
... return x
>>> isinstance(g, FunctionSRV)
True
>>> def h(x, y):
... return x * y
>>> isinstance(h, FunctionSRV)
True
>>> def i(x, y, z):
... return x ** 2 + y ** 2 + z ** 2
>>> isinstance(i, FunctionSRV)
True
"""
def __call__(self, *args: Number) -> Number: ...
class RSumRule:
r"""
Specifies that a particular Riemann sum rule should be used over an interval.
"""
@classmethod
def value(cls, lower: Decimal, length: Decimal) -> Decimal:
r"""
:param lower: The lower bound of the interval of summation
:param length: The length of each partition of the interval of summation
:return: The value of :math:`x_{i}^{*}`
"""
raise NotImplementedError
class Left(RSumRule):
r"""
Specifies that the left rule should be used to compute the Riemann sum over an interval.
"""
@classmethod
def value(cls, lower: Decimal, length: Decimal) -> Decimal:
r"""
.. math::
x_{i}^{*} = x_{i-1} = a + i \Delta x
:param lower: The lower bound of the interval of summation
:param length: The length of each partition of the interval of summation
:return: The value of :math:`x_{i}^{*}`
"""
return lower
class Middle(RSumRule):
r"""
Specifies that the midpoint rule should be used to compute the Riemann sum over an interval.
"""
@classmethod
def value(cls, lower: Decimal, length: Decimal) -> Decimal:
r"""
.. math::
x_{i}^{*} = \frac{x_{i-1} + x_{i}}{2} = a + (i + \frac{1}{2}) \Delta x
:param lower: The lower bound of the interval of summation
:param length: The length of each partition of the interval of summation
:return: The value of :math:`x_{i}^{*}`
"""
return lower + length / 2
class Right(RSumRule):
r"""
Specifies that the right rule should be used to compute the Riemann sum over an interval.
"""
@classmethod
def value(cls, lower: Decimal, length: Decimal) -> Decimal:
r"""
.. math::
x_{i}^{*} = x_{i} = a + (i + 1) \Delta x
:param lower: The lower bound of the interval of summation
:param length: The length of each partition of the interval of summation
:return: The value of :math:`x_{i}^{*}`
"""
return lower + length
class Interval:
"""
Represents the closed interval over which a Riemann sum is computed.
:param lower: The lower bound of the interval
:param upper: The upper bound of the interval
:return: The number of partitions dividing the interval
"""
def __init__(self, lower: Number, upper: Number, npartitions: int):
self.lower = Decimal(str(lower) if isinstance(lower, float) else lower)
self.upper = Decimal(str(upper) if isinstance(upper, float) else upper)
self.npartitions = npartitions
self.length = (self.upper - self.lower) / self.npartitions
def __repr__(self):
return "{}(lower={}, upper={}, npartitions={})".format(
type(self).__name__,
self.lower,
self.upper,
self.npartitions
)
def partitions(self, rule: RSumRule) -> typing.Generator[Decimal, None, None]:
"""
:param rule: The rule to use for compute the Riemann sum
:return: A generator of the values of each partition of the interval
"""
lower, length = self.lower, self.length
for _ in range(self.npartitions):
yield rule.value(lower, length)
lower += length
def riemann_sum(
function: FunctionSRV,
intervals: typing.Sequence[Interval],
rules: typing.Sequence[typing.Type[RSumRule]]
):
r"""
Computes the Riemann sum of a function of several variables over a closed multidimensional
interval using specified Riemann sum rules.
The following must all be equal:
- The number of parameters of ``function``
- The number of elements in ``intervals``
- The number of elements in ``rules``.
In other words, every parameter in ``function`` must correspond to exactly one element in
``intervals`` and one element in ``rules``.
The order of ``intervals`` and ``rules`` is significant.
During computation, each parameter of ``function`` is mapped to its corresponding element in
``intervals`` and its corresponding element in ``rules``.
That is, the first parameter of ``function`` corresopnds to ``intervals[0]`` and ``rules[0]``,
the second to ``intervals[1]`` and ``rules[1]``, etc.
:param function: A callable object representing function of several real variables
:param intervals: The closed intervals over which the Riemann sum is calculated
:param rules: The rules to use for calculating the Riemann sum
:return: The value of the Riemann sum over the indicated intervals using the indicated rules
:raise ValueError: The ``function`` parameter list, ``intervals``, and ``rules`` are not equal in length
"""
ndimensions = len(inspect.signature(function).parameters)
if len(intervals) != ndimensions:
raise ValueError(
"The length of 'intervals' must equal the length of the parameter list of 'funcion'"
)
if len(rules) != ndimensions:
raise ValueError(
"The length of 'rules' must equal the length of the parameter list of 'function'"
)
delta = functools.reduce(operator.mul, (x.length for x in intervals))
values = (x.partitions(r) for x, r in zip(intervals, rules))
return (sum(function(*v) for v in itertools.product(*values)) * delta).normalize()
def trapezoidal_rule(
function: FunctionSRV, intervals: typing.Sequence[Interval]
):
r"""
Computes the Riemann sum of a function of several variables over a closed multidimensional
interval using the trapezoidal.
This function utilizes the functionality of :py:func:`riemann_sum` to compute the Riemann sum.
:param function: A callable object representing function of several real variables
:param intervals: The closed intervals over which the Riemann sum is calculated
:return: The value of the Riemann sum over the indicated intervals using the trapezoidal rule
"""
rules = itertools.product((Left, Right), repeat=len(intervals))
ncombinations = Decimal(2) ** len(intervals)
return (sum(riemann_sum(function, intervals, r) for r in rules) / ncombinations).normalize() | /riemann-1.0.0a2-py3-none-any.whl/riemann.py | 0.960249 | 0.817137 | riemann.py | pypi |
### how to enable ncnn vulkan capability
follow [the build and install instruction](https://github.com/Tencent/ncnn/blob/master/docs/how-to-build/how-to-build.md)
make sure you have installed vulkan sdk from [lunarg vulkan sdk website](https://vulkan.lunarg.com/sdk/home)
Usually, you can enable the vulkan compute inference feature by adding only one line of code to your application.
```cpp
// enable vulkan compute feature before loading
ncnn::Net net;
net.opt.use_vulkan_compute = 1;
```
### does my graphics device support vulkan
Some platforms have been tested and known working. In theory, if your platform support vulkan api, either 1.0 or 1.1, it shall work.
* Y = known work
* ? = shall work, not confirmed
* / = not applied
| |windows|linux|android|mac|ios|
|---|---|---|---|---|---|
|intel|Y|Y|?|?|/|
|amd|Y|Y|/|?|/|
|nvidia|Y|Y|?|/|/|
|qcom|/|/|Y|/|/|
|apple|/|/|/|Y|Y|
|arm|/|?|Y|/|/|
You can search [the vulkan database](https://vulkan.gpuinfo.org) to see if your device supports vulkan.
Some old buggy drivers may produce wrong result, that are blacklisted in ncnn and treated as non-vulkan capable device.
You could check if your device and driver have this issue with [my conformance test here](vulkan-conformance-test).
Most of these systems are android with version lower than 8.1.
### why using vulkan over cuda/opencl/metal
In the beginning, I had no GPGPU programming experience, and I had to learn one.
vulkan is considered more portable and well supported by venders and the cross-platform low-overhead graphics api. As a contrast, cuda is only available on nvidia device, metal is only available on macos and ios, while loading opencl library is banned in android 7.0+ and does not work on ios.
### I got errors like "vkCreateComputePipelines failed -1000012000" or random stalls or crashes
Upgrade your vulkan driver.
[intel https://downloadcenter.intel.com/product/80939/Graphics-Drivers](https://downloadcenter.intel.com/product/80939/Graphics-Drivers)
[amd https://www.amd.com/en/support](https://www.amd.com/en/support)
[nvidia https://www.nvidia.com/Download/index.aspx](https://www.nvidia.com/Download/index.aspx)
### how to use ncnn vulkan on android
minimum android ndk version: android-ndk-r18b
minimum sdk platform api version: android-24
link your jni project with libvulkan.so
[The squeezencnn example](https://github.com/Tencent/ncnn/tree/master/examples/squeezencnn) have equipped gpu inference, you could take it as reference.
### how to use ncnn vulkan on ios
setup vulkan sdk (https://vulkan.lunarg.com/sdk/home#mac)
metal only works on real device with arm64 cpu (iPhone 5s and later)
link your project with MoltenVK framework and Metal
### what about the layers without vulkan support
These layers have vulkan support currently
AbsVal, BatchNorm, BinaryOp, Cast, Clip, Concat, Convolution, ConvolutionDepthWise, Crop, Deconvolution, DeconvolutionDepthWise, Dropout, Eltwise, Flatten, HardSigmoid, InnerProduct, Interp, LRN, Packing, Padding, Permute, Pooling(pad SAME not supported), PReLU, PriorBox, ReLU, Reorg, Reshape, Scale, ShuffleChannel, Sigmoid, Softmax, TanH, UnaryOp
For these layers without vulkan support, ncnn inference engine will automatically fallback to cpu path.
Thus, it is usually not a serious issue if your network only has some special head layers like SSD or YOLO. All examples in ncnn are known working properly with vulkan enabled.
### my model runs slower on gpu than cpu
The current vulkan inference implementation is far from the preferred state. Many handful optimization techniques are planned, such as winograd convolution, operator fusion, fp16 storage and arithmetic etc.
It is common that your model runs slower on gpu than cpu on arm devices like mobile phones, since we have quite good arm optimization in ncnn ;)
### vulkan device not found / extra high cpu utility while vulkan is enabled on nvidia gpu
There are severel reasons could lead to this outcome. First please check your driver status with `nvidia-smi`. If you have correctly installed your driver, you should see something like this:
```bash
$ nvidia-smi
Sat Mar 06 19:53:16 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 451.48 Driver Version: 451.48 CUDA Version: 11.0 |
|-------------------------------+----------------------+----------------------+
| GPU Name TCC/WDDM | Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 GeForce GTX 1060 WDDM | 00000000:02:00.0 Off | N/A |
| N/A 31C P8 5W / N/A | 90MiB / 6144MiB | 0% Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
```
If `nvidia-smi` crashes or cannot be found, please reinstall your graphics driver.
If ncnn *is* utilizing the Tesla GPU, you can see your program in the `Processes` block at the bottom. In that case, it's likely some operators are not yet supported in Vulkan, and have fallbacked to the CPU, thus leading to a low utilization of the GPU.
If you *couldn't* find your process running, plase check the active driver model, which can be found to the right of your device name. For Geforce and Titan GPUs, the default driver model is WDDM (Windows Desktop Driver Model), which supports both rendering graphics as well as computing. But for Tesla GPUs, without configuration, the driver model is defualted to TCC ([Tesla Computing Cluster](https://docs.nvidia.com/gameworks/content/developertools/desktop/tesla_compute_cluster.htm)). NVIDIA's TCC driver does not support Vulkan, so you need to use the following command to set the driver model back to WDDM, to use Vulkan:
```bash
$ nvidia-smi -g 0 -dm 0
```
The number following `-g` is the GPU ID (which can be found to the left of your device name in `nvidia-smi` output); and `-dm` stands for driver model, 0 refers to WDDM and 1 means TCC.
| /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/docs/how-to-use-and-FAQ/FAQ-ncnn-vulkan.md | 0.44553 | 0.794185 | FAQ-ncnn-vulkan.md | pypi |
### caffemodel should be row-major
`caffe2ncnn` tool assumes the caffemodel is row-major (produced by c++ caffe train command).
The kernel 3x3 weights should be stored as
```
a b c
d e f
g h i
```
However, matlab caffe produced col-major caffemodel.
You have to transpose all the kernel weights by yourself or re-training using c++ caffe train command.
Besides, you may interest in https://github.com/conanhujinming/matcaffe2caffe
### check input is RGB or BGR
If your caffemodel is trained using c++ caffe and opencv, then the input image should be BGR order.
If your model is trained using matlab caffe or pytorch or mxnet or tensorflow, the input image would probably be RGB order.
The channel order can be changed on-the-fly through proper pixel type enum
```
// construct RGB blob from rgb image
ncnn::Mat in_rgb = ncnn::Mat::from_pixels(rgb_data, ncnn::Mat::PIXEL_RGB, w, h);
// construct BGR blob from bgr image
ncnn::Mat in_bgr = ncnn::Mat::from_pixels(bgr_data, ncnn::Mat::PIXEL_BGR, w, h);
// construct BGR blob from rgb image
ncnn::Mat in_bgr = ncnn::Mat::from_pixels(rgb_data, ncnn::Mat::PIXEL_RGB2BGR, w, h);
// construct RGB blob from bgr image
ncnn::Mat in_rgb = ncnn::Mat::from_pixels(bgr_data, ncnn::Mat::PIXEL_BGR2RGB, w, h);
```
### image decoding
JPEG(`.jpg`,`.jpeg`) is loss compression, people may get different pixel value for same image on same position.
`.bmp` images are recommended instead.
### interpolation / resizing
There are several image resizing methods, which may generate different result for same input image.
Even we specify same interpolation method, different frameworks/libraries and their various versions may also introduce difference.
A good practice is feed same size image as the input layer expected, e.g. read a 224x244 bmp image when input layer need 224x224 size.
### Mat::from_pixels/from_pixels_resize assume that the pixel data is continuous
You shall pass continuous pixel buffer to from_pixels family.
If your image is an opencv submat from an image roi, call clone() to get a continuous one.
```
cv::Mat image;// the image
cv::Rect facerect;// the face rectangle
cv::Mat faceimage = image(facerect).clone();// get a continuous sub image
ncnn::Mat in = ncnn::Mat::from_pixels(faceimage.data, ncnn::Mat::PIXEL_BGR, faceimage.cols, faceimage.rows);
```
### pre process
Apply pre process according to your training configuration
Different model has different pre process config, you may find the following transform config in Data layer section
```
transform_param {
mean_value: 103.94
mean_value: 116.78
mean_value: 123.68
scale: 0.017
}
```
Then the corresponding code for ncnn pre process is
```cpp
const float mean_vals[3] = { 103.94f, 116.78f, 123.68f };
const float norm_vals[3] = { 0.017f, 0.017f, 0.017f };
in.substract_mean_normalize(mean_vals, norm_vals);
```
Mean file is not supported currently
So you have to pre process the input data by yourself (use opencv or something)
```
transform_param {
mean_file: "imagenet_mean.binaryproto"
}
```
For pytorch or mxnet-gluon
```python
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
```
Then the corresponding code for ncnn pre process is
```cpp
// R' = (R / 255 - 0.485) / 0.229 = (R - 0.485 * 255) / 0.229 / 255
// G' = (G / 255 - 0.456) / 0.224 = (G - 0.456 * 255) / 0.224 / 255
// B' = (B / 255 - 0.406) / 0.225 = (B - 0.406 * 255) / 0.225 / 255
const float mean_vals[3] = {0.485f*255.f, 0.456f*255.f, 0.406f*255.f};
const float norm_vals[3] = {1/0.229f/255.f, 1/0.224f/255.f, 1/0.225f/255.f};
in.substract_mean_normalize(mean_vals, norm_vals);
```
### use the desired blob
The blob names for input and extract are differ among models.
For example, squeezenet v1.1 use "data" as input blob and "prob" as output blob while mobilenet-ssd use "data" as input blob and "detection_out" as output blob.
Some models may need multiple input or produce multiple output.
```cpp
ncnn::Extractor ex = net.create_extractor();
ex.input("data", in);// change "data" to yours
ex.input("mask", mask);// change "mask" to yours
ex.extract("output1", out1);// change "output1" to yours
ex.extract("output2", out2);// change "output2" to yours
```
### blob may have channel gap
Each channel pointer is aligned by 128bit in ncnn Mat structure.
blob may have gaps between channels if (width x height) can not divided exactly by 4
Prefer using ncnn::Mat::from_pixels or ncnn::Mat::from_pixels_resize for constructing input blob from image data
If you do need a continuous blob buffer, reshape the output.
```cpp
// out is the output blob extracted
ncnn::Mat flattened_out = out.reshape(out.w * out.h * out.c);
// plain array, C-H-W
const float* outptr = flattened_out;
```
### create new Extractor for each image
The `ncnn::Extractor` object is stateful, if you reuse for different input, you will always get exact the same result cached inside.
Always create new Extractor to process images in loop unless you do know how the stateful Extractor works.
```cpp
for (int i=0; i<count; i++)
{
// always create Extractor
// it's cheap and almost instantly !
ncnn::Extractor ex = net.create_extractor();
// use
ex.input(your_data[i]);
}
```
### use proper loading api
If you want to load plain param file buffer, you shall use Net::load_param_mem instead of Net::load_param.
For more information about the ncnn model load api, see [ncnn-load-model](ncnn-load-model)
```cpp
ncnn::Net net;
// param_buffer is the content buffe of XYZ.param file
net.load_param_mem(param_buffer);
```
| /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/docs/how-to-use-and-FAQ/FAQ-ncnn-produce-wrong-result.md | 0.659624 | 0.885928 | FAQ-ncnn-produce-wrong-result.md | pypi |
# implement elementwise addition with/without broadcast using BinaryOp operation
* input must be fp32 storage without packing
* output is expected to be fp32 storage without packing
```cpp
void binary_add(const ncnn::Mat& a, const ncnn::Mat& b, ncnn::Mat& c)
{
ncnn::Option opt;
opt.num_threads = 2;
opt.use_fp16_storage = false;
opt.use_packing_layout = false;
ncnn::Layer* op = ncnn::create_layer("BinaryOp");
// set param
ncnn::ParamDict pd;
pd.set(0, 0);// op_type
op->load_param(pd);
op->create_pipeline(opt);
// forward
std::vector<ncnn::Mat> bottoms(2);
bottoms[0] = a;
bottoms[1] = b;
std::vector<ncnn::Mat> tops(1);
op->forward(bottoms, tops, opt);
c = tops[0];
op->destroy_pipeline(opt);
delete op;
}
```
# implement 3x3 box blur on three channel image using ConvolutionDepthWise operation
* input must be fp32 storage without packing
* output is expected to be fp32 storage without packing
```cpp
void convolution_3x3_boxblur_RGB(const ncnn::Mat& rgb, ncnn::Mat& out)
{
ncnn::Option opt;
opt.num_threads = 2;
opt.use_fp16_storage = false;
opt.use_packing_layout = false;
ncnn::Layer* op = ncnn::create_layer("ConvolutionDepthWise");
// set param
ncnn::ParamDict pd;
pd.set(0, 3);// num_output
pd.set(1, 3);// kernel_w
pd.set(5, 0);// bias_term
pd.set(6, 3*3*3);// weight_data_size
pd.set(7, 3);// group
op->load_param(pd);
// set weights
ncnn::Mat weights[1];
weights[0].create(3*3*3);// weight_data
for (int i=0; i<3*3*3; i++)
{
weights[0][i] = 1.f / 9;
}
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
// forward
op->forward(rgb, out, opt);
op->destroy_pipeline(opt);
delete op;
}
```
# transpose Mat, chw to cwh
* input must be fp32 storage with/without packing
* output is expected to be fp32 storage packed
```cpp
void transpose(const ncnn::Mat& in, ncnn::Mat& out)
{
ncnn::Option opt;
opt.num_threads = 2;
opt.use_fp16_storage = false;
opt.use_packing_layout = true;
ncnn::Layer* op = ncnn::create_layer("Permute");
// set param
ncnn::ParamDict pd;
pd.set(0, 1);// order_type
op->load_param(pd);
op->create_pipeline(opt);
ncnn::Mat in_packed = in;
{
// resolve dst_elempack
int dims = in.dims;
int elemcount = 0;
if (dims == 1) elemcount = in.elempack * in.w;
if (dims == 2) elemcount = in.elempack * in.h;
if (dims == 3) elemcount = in.elempack * in.c;
int dst_elempack = 1;
if (op->support_packing)
{
if (elemcount % 8 == 0 && (ncnn::cpu_support_x86_avx2() || ncnn::cpu_support_x86_avx()))
dst_elempack = 8;
else if (elemcount % 4 == 0)
dst_elempack = 4;
}
if (in.elempack != dst_elempack)
{
convert_packing(in, in_packed, dst_elempack, opt);
}
}
// forward
op->forward(in_packed, out, opt);
op->destroy_pipeline(opt);
delete op;
}
```
# apply instance normalization
// x = (x - mean) / sqrt(var)
* input can be fp32/fp16 storage with/without packing
* output is expected to be fp16 storage packed when supported, or fp32 storage packed otherwise
```cpp
void normalize(const ncnn::Mat& in, ncnn::Mat& out)
{
ncnn::Option opt;
opt.num_threads = 2;
opt.use_fp16_storage = true;
opt.use_packing_layout = true;
ncnn::Layer* op = ncnn::create_layer("InstanceNorm");
// set param
ncnn::ParamDict pd;
pd.set(0, in.c);// channels
pd.set(1, 0.f);// eps
op->load_param(pd);
// set weights
ncnn::Mat weights[2];
weights[0].create(in.c);// gamma_data
weights[1].create(in.c);// beta_data
weights[0].fill(1.f);
weights[1].fill(0.f);
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
ncnn::Mat in_fp16 = in;
if (in.elembits() == 32 && op->support_fp16_storage)
{
cast_float32_to_float16(in, in_fp16, opt);
}
if (in.elembits() == 16 && !op->support_fp16_storage)
{
cast_float16_to_float32(in, in_fp16, opt);
}
ncnn::Mat in_fp16_packed = in_fp16;
{
// resolve dst_elempack
int dims = in_fp16.dims;
int elemcount = 0;
if (dims == 1) elemcount = in_fp16.elempack * in_fp16.w;
if (dims == 2) elemcount = in_fp16.elempack * in_fp16.h;
if (dims == 3) elemcount = in_fp16.elempack * in_fp16.c;
int dst_elempack = 1;
if (op->support_packing)
{
if (elemcount % 8 == 0 && (ncnn::cpu_support_x86_avx2() || ncnn::cpu_support_x86_avx()))
dst_elempack = 8;
else if (elemcount % 4 == 0)
dst_elempack = 4;
}
if (in_fp16.elempack != dst_elempack)
{
convert_packing(in_fp16, in_fp16_packed, dst_elempack, opt);
}
}
// forward
op->forward(in_fp16_packed, out, opt);
op->destroy_pipeline(opt);
delete op;
}
```
# cpu -> gpu -> forward -> gpu -> cpu
```cpp
ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
ncnn::VkWeightAllocator* weight_vkallocator = new ncnn::VkWeightAllocator(vkdev);
ncnn::VkWeightStagingAllocator* weight_staging_vkallocator = new ncnn::VkWeightStagingAllocator(vkdev);
// create layer
ncnn::Layer* convolution = ncnn::create_layer("Convolution");
convolution->vkdev = vkdev;
// set option
ncnn::Option opt;
opt.num_threads = 4;
opt.use_vulkan_compute = true;
opt.blob_vkallocator = blob_vkallocator;
opt.workspace_vkallocator = blob_vkallocator;
opt.staging_vkallocator = staging_vkallocator;
// load param
{
ncnn::ParamDict pd;
pd.set(0, outch);
pd.set(1, ksize);
pd.set(6, outch*inch*ksize*ksize);
pd.use_vulkan_compute = 1;
convolution->load_param(pd);
}
// load model
{
ncnn::Mat weights[2];
weights[0] = random_mat(outch*inch*ksize*ksize);
weights[1] = random_mat(outch);
ncnn::ModelBinFromMatArray mb(weights);
convolution->load_model(mb);
}
// create pipeline
convolution->create_pipeline(opt);
// upload model
{
ncnn::VkTransfer cmd(vkdev);
ncnn::Option opt_upload = opt;
opt_upload.blob_vkallocator = weight_vkallocator;
opt_upload.workspace_vkallocator = weight_vkallocator;
opt_upload.staging_vkallocator = weight_staging_vkallocator;
convolution->upload_model(cmd, opt_upload);
cmd.submit_and_wait();
}
ncnn::Mat bottom = random_mat(w, h, inch);
ncnn::Mat top;
// forward
{
ncnn::VkCompute cmd(vkdev);
ncnn::VkMat bottom_gpu;
cmd.record_upload(bottom, bottom_gpu, opt);
ncnn::VkMat top_gpu;
convolution->forward(bottom_gpu, top_gpu, cmd, opt);
cmd.record_download(top_gpu, top, opt);
cmd.submit_and_wait();
}
convolution->destroy_pipeline(opt);
delete convolution;
vkdev->reclaim_blob_allocator(blob_vkallocator);
vkdev->reclaim_staging_allocator(staging_vkallocator);
weight_vkallocator->clear();
weight_staging_vkallocator->clear();
delete weight_vkallocator;
delete weight_staging_vkallocator;
```
| /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/docs/developer-guide/low-level-operation-api.md | 0.559892 | 0.655384 | low-level-operation-api.md | pypi |
|operation|param id|param phase|default value|weight order|
|:---:|:---:|:---:|:---:|:---:|
|AbsVal|||
|ArgMax|0|out_max_val|0|
||1|topk|1|
|BatchNorm|0|channels|0|slope mean variance bias|
||1|eps|0.f|
|Bias|0|bias_data_size|0|
|BinaryOp|0|op_type|0|
||1|with_scalar|0|
||2|b|0.f|
|BNLL|||
|Cast|0|type_from|0|
||1|type_to|0|
|Clip|0|min|-FLT_MAX|
||1|max|FLT_MAX|
|Concat|0|axis|0|
|Convolution|0|num_output|0|weight bias|
||1|kernel_w|0|
||2|dilation_w|1|
||3|stride_w|1|
||4|pad_left|0|
||5|bias_term|0|
||6|weight_data_size|0|
||8|int8_scale_term|0|
||9|activation_type|0|
||10|activation_params|[ ]|
||11|kernel_h|kernel_w|
||12|dilation_h|dilation_w|
||13|stride_h|stride_w|
||15|pad_right|pad_left|
||14|pad_top|pad_left|
||16|pad_bottom|pad_top|
||17|impl_type|0|
||18|pad_value|0.f|
|ConvolutionDepthWise|0|num_output|0|weight bias|
||1|kernel_w|0|
||2|dilation_w|1|
||3|stride_w|1|
||4|pad_left|0|
||5|bias_term|0|
||6|weight_data_size|0|
||7|group|1|
||8|int8_scale_term|0|
||9|activation_type|0|
||10|activation_params|[ ]|
||11|kernel_h|kernel_w|
||12|dilation_h|dilation_w|
||13|stride_h|stride_w|
||15|pad_right|pad_left|
||14|pad_top|pad_left|
||16|pad_bottom|pad_top|
||18|pad_value|0.f|
|Crop|0|woffset|0|
||1|hoffset|0|
||2|coffset|0|
||3|outw|0|
||4|outh|0|
||5|outc|0|
||6|woffset2|0|
||7|hoffset2|0|
||8|coffset2|0|
||9|starts|[ ]|
||10|ends|[ ]|
||11|axes|[ ]|
|Deconvolution|0|num_output|0|weight bias|
||1|kernel_w|0|
||2|dilation_w|1|
||3|stride_w|1|
||4|pad_left|0|
||5|bias_term|0|
||6|weight_data_size|0|
||9|activation_type|0|
||10|activation_params|[ ]|
||11|kernel_h|kernel_w|
||12|dilation_h|dilation_w|
||13|stride_h|stride_w|
||15|pad_right|pad_left|
||14|pad_top|pad_left|
||16|pad_bottom|pad_top|
||18|output_pad_right|0|
||19|output_pad_bottom|output_pad_right|
||20|output_w|0|
||21|output_h|output_w|
|DeconvolutionDepthWise|0|num_output|0|weight bias|
||1|kernel_w|0|
||2|dilation_w|1|
||3|stride_w|1|
||4|pad_left|0|
||5|bias_term|0|
||6|weight_data_size|0|
||7|group|1|
||9|activation_type|0|
||10|activation_params|[ ]|
||11|kernel_h|kernel_w|
||12|dilation_h|dilation_w|
||13|stride_h|stride_w|
||15|pad_right|pad_left|
||14|pad_top|pad_left|
||16|pad_bottom|pad_top|
||18|output_pad_right|0|
||19|output_pad_bottom|output_pad_right|
||20|output_w|0|
||21|output_h|output_w|
|Dequantize|0|scale|1.f|bias|
||1|bias_term|0|
||2|bias_data_size|0|
|DetectionOutput|0|num_class|0|
||1|nms_threshold|0.05f|
||2|nms_top_k|300|
||3|keep_top_k|100|
||4|confidence_threshold|0.5f|
||5|variances[0]|0.1f|
||6|variances[1]|0.1f|
||7|variances[2]|0.2f|
||8|variances[3]|0.2f|
|Dropout|0|scale|1.f|
|Eltwise|0|op_type|0|
||1|coeffs|[ ]|
|ELU|0|alpha|0.1f|
|Embed|0|num_output|0|weight bias|
||1|input_dim|0|
||2|bias_term|0|
||3|weight_data_size|0|
|Exp|0|base|-1.f|
||1|scale|1.f|
||2|shift|0.f|
|ExpandDims|0|expand_w|0|
||1|expand_h|0|
||2|expand_c|0|
||3|axes|[ ]|
|Flatten|||
|HardSigmoid|0|alpha|0.2f||
||1|beta|0.5f|
|HardSwish|0|alpha|0.2f||
||1|beta|0.5f|
|InnerProduct|0|num_output|0|weight bias|
||1|bias_term|0|
||2|weight_data_size|0|
||8|int8_scale_term|0|
||9|activation_type|0|
||10|activation_params|[ ]|
|Input|0|w|0|
||1|h|0|
||2|c|0|
|InstanceNorm|0|channels|0|gamma bias|
||1|eps|0.001f|
|Interp|0|resize_type|0|
||1|height_scale|1.f|
||2|width_scale|1.f|
||3|output_height|0|
||4|output_width|0|
|Log|0|base|-1.f|
||1|scale|1.f|
||2|shift|0.f|
|LRN|0|region_type|0|
||1|local_size|5|
||2|alpha|1.f|
||3|beta|0.75f|
||4|bias|1.f|
|LSTM|0|num_output|0|
||1|weight_data_size|1|
||2|direction|0|
|MemoryData|0|w|0|
||1|h|0|
||2|c|0|
|Mish|||
|MVN|0|normalize_variance|0|
||1|across_channels|0|
||2|eps|0.0001f|
|Noop|||
|Normalize|0|across_spatial|0|scale|
||4|across_channel|0|
||1|channel_shared|0|
||2|eps|0.0001f|
||9|eps_mode|0|
||3|scale_data_size|0|
|Packing|0|out_packing|1|
||1|use_padding|0|
||2|cast_type_from|0|
||3|cast_type_to|0|
||4|storage_type_from|0|
||5|storage_type_to|0|
|Padding|0|top|0|per_channel_pad_data|
||1|bottom|0|
||2|left|0|
||3|right|0|
||4|type|0|
||5|value|0.f|
||6|per_channel_pad_data_size|0|
||7|front|0|
||8|behind|0|
|Permute|0|order_type|0|
|PixelShuffle|0|upscale_factor|1|
|Pooling|0|pooling_type(0: max 1: avg)|0|
||1|kernel_w|0|
||11|kernel_h|kernel_w|
||2|stride_w|1|
||12|stride_h|stride_w|
||3|pad_left|0|
||14|pad_right|pad_left|
||13|pad_top|pad_left|
||15|pad_bottom|pad_top|
||4|global_pooling|0|
||5|pad_mode|0|
|Power|0|power|1.f|
||1|scale|1.f|
||2|shift|0.f|
|PReLU|0|num_slope|0|slope|
|PriorBox|0|min_sizes|[ ]|
||1|max_sizes|[ ]|
||2|aspect_ratios|[ ]|
||3|varainces[0]|0.f|
||4|varainces[1]|0.f|
||5|varainces[2]|0.f|
||6|varainces[3]|0.f|
||7|flip|1|
||8|clip|0|
||9|image_width|0|
||10|image_height|0|
||11|step_width|-233.f|
||12|step_height|-233.f|
||13|offset|0.f|
||14|step_mmdetection|0|
||15|center_mmdetection|0|
|Proposal|0|feat_stride|16|
||1|base_size|16|
||2|pre_nms_topN|6000|
||3|after_nms_topN|300|
||4|num_thresh|0.7f|
||5|min_size|16|
|PSROIPooling|0|pooled_width|7|
||1|pooled_height|7|
||2|spatial_scale|0.0625f|
||3|output_dim|0|
|Quantize|0|scale|1.f|
|Reduction|0|operation|0|
||1|dim|0|
||2|coeff|1.f|
||3|axes|[ ]|
||4|keepdims|0|
|ReLU|0|slope|0.f|
|Reorg|0|stride|0|
|Requantize|0|scale_in|1.f|bias|
||1|scale_out|1.f|
||2|bias_term|0|
||3|bias_data_size|0|
||4|fusion_relu|0|
|Reshape|0|w|-233|
||1|h|-233|
||2|c|-233|
||3|permute|0|
|ROIAlign|0|pooled_width|0|
||1|pooled_height|0|
||2|spatial_scale|1.f|
||3|sampling_ratio|0|
||4|aligned|0|
||5|version|0|
|ROIPooling|0|pooled_width|0|
||1|pooled_height|0|
||2|spatial_scale|1.f|
|Scale|0|scale_data_size|0|scale bias|
||1|bias_term|0|
|SELU|0|alpha|1.67326324f||
||1|lambda|1.050700987f|
|ShuffleChannel|0|group|1|
|Sigmoid|||
|Slice|0|slices|[ ]|
||1|axis|0|
|Softmax|0|axis|0|
|Split|||
|SPP|0|pooling_type|0|
||1|pyramid_height|1|
|Squeeze|0|squeeze_w|0|
||1|squeeze_h|0|
||2|squeeze_c|0|
||3|axes|[ ]|
|StatisticsPooling|0|include_stddev|0|
|Swish|||
|TanH|||
|Threshold|0|threshold|0.f|
|Tile|0|dim|0|
||1|tiles|1|
|UnaryOp|0|op_type|0|
|YoloDetectionOutput|0|num_class|20|
||1|num_box|5|
||2|confidence_threshold|0.01f|
||3|num_threshold|0.45f|
||4|biases|[]|
|Yolov3DetectionOutput|0|num_class|20|
||1|num_box|5|
||2|confidence_threshold|0.01f|
||3|num_threshold|0.45f|
||4|biases|[]|
||5|mask|[]|
||6|anchors_scale|[]|
|RNN|0|num_output|0|
||1|weight_data_size|0|
||2|direction|0|
|MultiHeadAttention|0|embed_dim|0|
||1|num_head|1|
||2|weight_data_size|0|
| /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/docs/developer-guide/operation-param-weight-table.md | 0.833596 | 0.776411 | operation-param-weight-table.md | pypi |
* [AbsVal](#absval)
* [ArgMax](#argmax)
* [BatchNorm](#batchnorm)
* [Bias](#bias)
* [BinaryOp](#binaryop)
* [BNLL](#bnll)
* [Cast](#cast)
* [Clip](#clip)
* [Concat](#concat)
* [Convolution](#convolution)
* [Convolution1D](#convolution1d)
* [Convolution3D](#convolution3d)
* [ConvolutionDepthWise](#convolutiondepthwise)
* [ConvolutionDepthWise1D](#convolutiondepthwise1d)
* [ConvolutionDepthWise3D](#convolutiondepthwise3d)
* [Crop](#crop)
* [Deconvolution](#deconvolution)
* [Deconvolution1D](#deconvolution1d)
* [Deconvolution3D](#deconvolution3d)
* [DeconvolutionDepthWise](#deconvolutiondepthwise)
* [DeconvolutionDepthWise1D](#deconvolutiondepthwise1d)
* [DeconvolutionDepthWise3D](#deconvolutiondepthwise3d)
* [DeformableConv2D](#deformableconv2d)
* [Dequantize](#dequantize)
* [Dropout](#dropout)
* [Eltwise](#eltwise)
* [ELU](#elu)
* [Exp](#exp)
* [Flatten](#flatten)
* [GELU](#gelu)
* [Gemm](#gemm)
* [GroupNorm](#groupnorm)
* [GRU](#gru)
* [HardSigmoid](#hardsigmoid)
* [HardSwish](#hardswish)
* [InnerProduct](#innerproduct)
* [Input](#input)
* [InstanceNorm](#instancenorm)
* [Interp](#interp)
* [LayerNorm](#layernorm)
* [Log](#log)
* [LRN](#lrn)
* [LSTM](#lstm)
* [MemoryData](#memorydata)
* [Mish](#mish)
* [MultiHeadAttention](#multiheadattention)
* [MVN](#mvn)
* [Noop](#noop)
* [Normalize](#normalize)
* [Packing](#packing)
* [Padding](#padding)
* [Permute](#permute)
* [PixelShuffle](#pixelshuffle)
* [Pooling](#pooling)
* [Pooling1D](#pooling1d)
* [Pooling3D](#pooling3d)
* [Power](#power)
* [PReLU](#prelu)
* [Quantize](#quantize)
* [Reduction](#reduction)
* [ReLU](#relu)
* [Reorg](#reorg)
* [Requantize](#requantize)
* [Reshape](#reshape)
* [RNN](#rnn)
* [Scale](#scale)
* [SELU](#selu)
* [ShuffleChannel](#shufflechannel)
* [Sigmoid](#sigmoid)
* [Slice](#slice)
* [Softmax](#softmax)
* [Softplus](#softplus)
* [Split](#split)
* [Swish](#swish)
* [TanH](#tanh)
* [Threshold](#threshold)
* [Tile](#tile)
* [UnaryOp](#unaryop)
# AbsVal
```
y = abs(x)
```
* one_blob_only
* support_inplace
# ArgMax
```
y = argmax(x, out_max_val, topk)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | out_max_val | int | 0 | |
| 1 | topk | int | 1 | |
# BatchNorm
```
y = (x - mean) / sqrt(var + eps) * slope + bias
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | channels | int | 0 | |
| 1 | eps | float | 0.f | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| slope_data | float | [channels] |
| mean_data | float | [channels] |
| var_data | float | [channels] |
| bias_data | float | [channels] |
# Bias
```
y = x + bias
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | bias_data_size| int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| bias_data | float | [channels] |
# BinaryOp
This operation is used for binary computation, and the calculation rule depends on the [broadcasting rule](https://github.com/Tencent/ncnn/wiki/binaryop-broadcasting).
```
C = binaryop(A, B)
```
if with_scalar = 1:
- one_blob_only
- support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | op_type | int | 0 | Operation type as follows |
| 1 | with_scalar | int | 0 | with_scalar=0 B is a matrix, with_scalar=1 B is a scalar |
| 2 | b | float | 0.f | When B is a scalar, B = b |
Operation type:
- 0 = ADD
- 1 = SUB
- 2 = MUL
- 3 = DIV
- 4 = MAX
- 5 = MIN
- 6 = POW
- 7 = RSUB
- 8 = RDIV
# BNLL
```
y = log(1 + e^(-x)) , x > 0
y = log(1 + e^x), x < 0
```
* one_blob_only
* support_inplace
# Cast
```
y = cast(x)
```
* one_blob_only
* support_packing
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | type_from | int | 0 | |
| 1 | type_to | int | 0 | |
Element type:
- 0 = auto
- 1 = float32
- 2 = float16
- 3 = int8
- 4 = bfloat16
# Clip
```
y = clamp(x, min, max)
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | min | float | -FLT_MAX | |
| 1 | max | float | FLT_MAX | |
# Concat
```
y = concat(x0, x1, x2, ...) by axis
```
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | axis | int | 0 | |
# Convolution
```
x2 = pad(x, pads, pad_value)
x3 = conv(x2, weight, kernel, stride, dilation) + bias
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 8 | int8_scale_term| int | 0 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 11 | kernel_h | int | kernel_w | |
| 12 | dilation_h | int | dilation_w | |
| 13 | stride_h | int | stride_w | |
| 14 | pad_top | int | pad_left | |
| 15 | pad_right | int | pad_left | |
| 16 | pad_bottom | int | pad_top | |
| 18 | pad_value | float | 0.f | |
| 19 | dynamic_weight| int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16/int8 | [kernel_w, kernel_h, num_input, num_output] |
| bias_data | float | [num_output] |
| weight_data_int8_scales| float | [num_output] |
| bottom_blob_int8_scales| float | [1] |
| top_blob_int8_scales| float | [1] |
# Convolution1D
```
x2 = pad(x, pads, pad_value)
x3 = conv1d(x2, weight, kernel, stride, dilation) + bias
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 15 | pad_right | int | pad_left | |
| 18 | pad_value | float | 0.f | |
| 19 | dynamic_weight| int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16/int8 | [kernel_w, num_input, num_output] |
| bias_data | float | [num_output] |
# Convolution3D
```
x2 = pad(x, pads, pad_value)
x3 = conv3d(x2, weight, kernel, stride, dilation) + bias
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 11 | kernel_h | int | kernel_w | |
| 12 | dilation_h | int | dilation_w | |
| 13 | stride_h | int | stride_w | |
| 14 | pad_top | int | pad_left | |
| 15 | pad_right | int | pad_left | |
| 16 | pad_bottom | int | pad_top | |
| 17 | pad_behind | int | pad_front | |
| 18 | pad_value | float | 0.f | |
| 21 | kernel_d | int | kernel_w | |
| 22 | dilation_d | int | dilation_w | |
| 23 | stride_d | int | stride_w | |
| 24 | pad_front | int | pad_left | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16/int8 | [kernel_w, kernel_h, kernel_d, num_input, num_output] |
| bias_data | float | [num_output] |
# ConvolutionDepthWise
```
x2 = pad(x, pads, pad_value)
x3 = conv(x2, weight, kernel, stride, dilation, group) + bias
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 7 | group | int | 1 | |
| 8 | int8_scale_term| int | 0 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 11 | kernel_h | int | kernel_w | |
| 12 | dilation_h | int | dilation_w | |
| 13 | stride_h | int | stride_w | |
| 14 | pad_top | int | pad_left | |
| 15 | pad_right | int | pad_left | |
| 16 | pad_bottom | int | pad_top | |
| 18 | pad_value | float | 0.f | |
| 19 | dynamic_weight| int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16/int8 | [kernel_w, kernel_h, num_input / group, num_output / group, group] |
| bias_data | float | [num_output] |
| weight_data_int8_scales| float | [group] |
| bottom_blob_int8_scales| float | [1] |
| top_blob_int8_scales| float | [1] |
# ConvolutionDepthWise1D
```
x2 = pad(x, pads, pad_value)
x3 = conv1d(x2, weight, kernel, stride, dilation, group) + bias
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 7 | group | int | 1 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 15 | pad_right | int | pad_left | |
| 18 | pad_value | float | 0.f | |
| 19 | dynamic_weight| int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16/int8 | [kernel_w, num_input / group, num_output / group, group] |
| bias_data | float | [num_output] |
# ConvolutionDepthWise3D
```
x2 = pad(x, pads, pad_value)
x3 = conv3d(x2, weight, kernel, stride, dilation, group) + bias
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 7 | group | int | 1 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 11 | kernel_h | int | kernel_w | |
| 12 | dilation_h | int | dilation_w | |
| 13 | stride_h | int | stride_w | |
| 14 | pad_top | int | pad_left | |
| 15 | pad_right | int | pad_left | |
| 16 | pad_bottom | int | pad_top | |
| 17 | pad_behind | int | pad_front | |
| 18 | pad_value | float | 0.f | |
| 21 | kernel_d | int | kernel_w | |
| 22 | dilation_d | int | dilation_w | |
| 23 | stride_d | int | stride_w | |
| 24 | pad_front | int | pad_left | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16/int8 | [kernel_w, kernel_h, kernel_d, num_input / group, num_output / group, group] |
| bias_data | float | [num_output] |
# Crop
```
y = crop(x)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | woffset | int | 0 | |
| 1 | hoffset | int | 0 | |
| 2 | coffset | int | 1 | |
| 3 | outw | int | 1 | |
| 4 | outh | int | 0 | |
| 5 | outc | int | 0 | |
| 6 | woffset2 | int | 0 | |
| 7 | hoffset2 | int | 1 | |
| 8 | coffset2 | int | 0 | |
| 9 | starts | array | [ ] | |
| 10 | ends | array | [ ] | |
| 11 | axes | array | [ ] | |
# Deconvolution
```
x2 = deconv(x, weight, kernel, stride, dilation) + bias
x3 = depad(x2, pads, pad_value)
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 11 | kernel_h | int | kernel_w | |
| 12 | dilation_h | int | dilation_w | |
| 13 | stride_h | int | stride_w | |
| 14 | pad_top | int | pad_left | |
| 15 | pad_right | int | pad_left | |
| 16 | pad_bottom | int | pad_top | |
| 18 | output_pad_right| int | 0 | |
| 19 | output_pad_bottom| int | output_pad_right | |
| 20 | output_w | int | 0 | |
| 21 | output_h | int | output_w | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16 | [kernel_w, kernel_h, num_input, num_output] |
| bias_data | float | [num_output] |
# Deconvolution1D
```
x2 = deconv1d(x, weight, kernel, stride, dilation) + bias
x3 = depad(x2, pads, pad_value)
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 15 | pad_right | int | pad_left | |
| 18 | output_pad_right| int | 0 | |
| 20 | output_w | int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16 | [kernel_w, num_input, num_output] |
| bias_data | float | [num_output] |
# Deconvolution3D
```
x2 = deconv3d(x, weight, kernel, stride, dilation) + bias
x3 = depad(x2, pads, pad_value)
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 11 | kernel_h | int | kernel_w | |
| 12 | dilation_h | int | dilation_w | |
| 13 | stride_h | int | stride_w | |
| 14 | pad_top | int | pad_left | |
| 15 | pad_right | int | pad_left | |
| 16 | pad_bottom | int | pad_top | |
| 17 | pad_behind | int | pad_front | |
| 18 | output_pad_right| int | 0 | |
| 19 | output_pad_bottom| int | output_pad_right | |
| 20 | output_pad_behind| int | output_pad_right | |
| 21 | kernel_d | int | kernel_w | |
| 22 | dilation_d | int | dilation_w | |
| 23 | stride_d | int | stride_w | |
| 24 | pad_front | int | pad_left | |
| 25 | output_w | int | 0 | |
| 26 | output_h | int | output_w | |
| 27 | output_d | int | output_w | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16 | [kernel_w, kernel_h, kernel_d, num_input, num_output] |
| bias_data | float | [num_output] |
# DeconvolutionDepthWise
```
x2 = deconv(x, weight, kernel, stride, dilation, group) + bias
x3 = depad(x2, pads, pad_value)
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 7 | group | int | 1 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 11 | kernel_h | int | kernel_w | |
| 12 | dilation_h | int | dilation_w | |
| 13 | stride_h | int | stride_w | |
| 14 | pad_top | int | pad_left | |
| 15 | pad_right | int | pad_left | |
| 16 | pad_bottom | int | pad_top | |
| 18 | output_pad_right| int | 0 | |
| 19 | output_pad_bottom| int | output_pad_right | |
| 20 | output_w | int | 0 | |
| 21 | output_h | int | output_w | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16 | [kernel_w, kernel_h, num_input / group, num_output / group, group] |
| bias_data | float | [num_output] |
# DeconvolutionDepthWise1D
```
x2 = deconv1d(x, weight, kernel, stride, dilation, group) + bias
x3 = depad(x2, pads, pad_value)
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 7 | group | int | 1 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 15 | pad_right | int | pad_left | |
| 18 | output_pad_right| int | 0 | |
| 20 | output_w | int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16 | [kernel_w, num_input / group, num_output / group, group] |
| bias_data | float | [num_output] |
# DeconvolutionDepthWise3D
```
x2 = deconv3d(x, weight, kernel, stride, dilation, group) + bias
x3 = depad(x2, pads, pad_value)
y = activation(x3, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 7 | group | int | 1 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 11 | kernel_h | int | kernel_w | |
| 12 | dilation_h | int | dilation_w | |
| 13 | stride_h | int | stride_w | |
| 14 | pad_top | int | pad_left | |
| 15 | pad_right | int | pad_left | |
| 16 | pad_bottom | int | pad_top | |
| 17 | pad_behind | int | pad_front | |
| 18 | output_pad_right| int | 0 | |
| 19 | output_pad_bottom| int | output_pad_right | |
| 20 | output_pad_behind| int | output_pad_right | |
| 21 | kernel_d | int | kernel_w | |
| 22 | dilation_d | int | dilation_w | |
| 23 | stride_d | int | stride_w | |
| 24 | pad_front | int | pad_left | |
| 25 | output_w | int | 0 | |
| 26 | output_h | int | output_w | |
| 27 | output_d | int | output_w | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16 | [kernel_w, kernel_h, kernel_d, num_input / group, num_output / group, group] |
| bias_data | float | [num_output] |
# DeformableConv2D
```
x2 = deformableconv2d(x, offset, mask, weight, kernel, stride, dilation) + bias
y = activation(x2, act_type, act_params)
```
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | dilation_w | int | 1 | |
| 3 | stride_w | int | 1 | |
| 4 | pad_left | int | 0 | |
| 5 | bias_term | int | 0 | |
| 6 | weight_data_size| int | 0 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| 11 | kernel_h | int | kernel_w | |
| 12 | dilation_h | int | dilation_w | |
| 13 | stride_h | int | stride_w | |
| 14 | pad_top | int | pad_left | |
| 15 | pad_right | int | pad_left | |
| 16 | pad_bottom | int | pad_top | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16/int8 | [kernel_w, kernel_h, num_input, num_output] |
| bias_data | float | [num_output] |
# Dequantize
```
y = x * scale + bias
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | scale_data_size| int | 1 | |
| 1 | bias_data_size| int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| scale_data | float | [scale_data_size] |
| bias_data | float | [bias_data_size] |
# Dropout
```
y = x * scale
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | scale | float | 1.f | |
# Eltwise
```
y = elementwise_op(x0, x1, ...)
```
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | op_type | int | 0 | |
| 1 | coeffs | array | [ ] | |
Operation type:
- 0 = PROD
- 1 = SUM
- 2 = MAX
# ELU
```
if x < 0 y = (exp(x) - 1) * alpha
else y = x
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | alpha | float | 0.1f | |
# Exp
```
if base == -1 y = exp(shift + x * scale)
else y = pow(base, (shift + x * scale))
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | base | float | -1.f | |
| 1 | scale | float | 1.f | |
| 2 | shift | float | 0.f | |
# Flatten
Reshape blob to 1 dimension
* one_blob_only
# GELU
```
if fast_gelu == 1 y = 0.5 * x * (1 + tanh(0.79788452 * (x + 0.044715 * x * x * x)));
else y = 0.5 * x * erfc(-0.70710678 * x)
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | fast_gelu | int | 0 | use approximation |
# Gemm
```
a = transA ? transpose(x0) : x0
b = transb ? transpose(x1) : x1
c = x2
y = gemm(a, b) * alpha + c * beta
```
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | alpha | float | 1.f | |
| 1 | beta | float | 1.f | |
| 2 | transA | int | 0 | |
| 3 | transb | int | 0 | |
# GroupNorm
```
split x along channel axis into group x0, x1 ...
l2 normalize for each group x0, x1 ...
y = x * gamma + beta
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | group | int | 1 | |
| 1 | channels | int | 0 | |
| 2 | eps | float | 0.001f | x = x / sqrt(var + eps) |
| 3 | affine | int | 1 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| gamma_data | float | [channels] |
| beta_data | float | [channels] |
# GRU
Apply a single-layer GRU to a feature sequence of `T` timesteps. The input blob shape is `[w=input_size, h=T]` and the output blob shape is `[w=num_output, h=T]`.
```
y = gru(x)
y0, hidden y1 = gru(x0, hidden x1)
```
* one_blob_only if bidirectional
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | hidden size of output |
| 1 | weight_data_size| int | 0 | total size of weight matrix |
| 2 | direction | int | 0 | 0=forward, 1=reverse, 2=bidirectional |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_xc_data| float/fp16/int8 | [input_size, num_output * 3, num_directions] |
| bias_c_data | float/fp16/int8 | [num_output, 4, num_directions] |
| weight_hc_data| float/fp16/int8 | [num_output, num_output * 3, num_directions] |
Direction flag:
- 0 = forward only
- 1 = reverse only
- 2 = bidirectional
# HardSigmoid
```
y = clamp(x * alpha + beta, 0, 1)
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | alpha | float | 0.2f | |
| 1 | beta | float | 0.5f | |
# HardSwish
```
y = x * clamp(x * alpha + beta, 0, 1)
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | alpha | float | 0.2f | |
| 1 | beta | float | 0.5f | |
# InnerProduct
```
x2 = innerproduct(x, weight) + bias
y = activation(x2, act_type, act_params)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | |
| 1 | bias_term | int | 0 | |
| 2 | weight_data_size| int | 0 | |
| 8 | int8_scale_term| int | 0 | |
| 9 | activation_type| int | 0 | |
| 10 | activation_params| array | [ ] | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_data | float/fp16/int8 | [num_input, num_output] |
| bias_data | float | [num_output] |
| weight_data_int8_scales| float | [num_output] |
| bottom_blob_int8_scales| float | [1] |
# Input
```
y = input
```
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | w | int | 0 | |
| 1 | h | int | 0 | |
| 11 | d | int | 0 | |
| 2 | c | int | 0 | |
# InstanceNorm
```
split x along channel axis into instance x0, x1 ...
l2 normalize for each channel instance x0, x1 ...
y = x * gamma + beta
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | channels | int | 0 | |
| 1 | eps | float | 0.001f | x = x / sqrt(var + eps) |
| 2 | affine | int | 1 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| gamma_data | float | [channels] |
| beta_data | float | [channels] |
# Interp
```
if dynamic_target_size == 0 y = resize(x) by fixed size or scale
else y = resize(x0, size(x1))
```
* one_blob_only if dynamic_target_size == 0
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | resize_type | int | 0 | |
| 1 | height_scale | float | 1.f | |
| 2 | width_scale | float | 1.f | |
| 3 | output_height | int | 0 | |
| 4 | output_width | int | 0 | |
| 5 | dynamic_target_size| int | 0 | |
| 6 | align_corner | int | 0 | |
Resize type:
- 1 = Nearest
- 2 = Bilinear
- 3 = Bicubic
# LayerNorm
```
split x along outmost axis into part x0, x1 ...
l2 normalize for each part x0, x1 ...
y = x * gamma + beta by elementwise
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | affine_size | int | 0 | |
| 1 | eps | float | 0.001f | x = x / sqrt(var + eps) |
| 2 | affine | int | 1 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| gamma_data | float | [affine_size] |
| beta_data | float | [affine_size] |
# Log
```
if base == -1 y = log(shift + x * scale)
else y = log(shift + x * scale) / log(base)
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | base | float | -1.f | |
| 1 | scale | float | 1.f | |
| 2 | shift | float | 0.f | |
# LRN
```
if region_type == ACROSS_CHANNELS square_sum = sum of channel window of local_size
if region_type == WITHIN_CHANNEL square_sum = sum of spatial window of local_size
y = x * pow(bias + alpha * square_sum / (local_size * local_size), -beta)
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | region_type | int | 0 | |
| 1 | local_size | int | 5 | |
| 2 | alpha | float | 1.f | |
| 3 | beta | float | 0.75f | |
| 4 | bias | float | 1.f | |
Region type:
- 0 = ACROSS_CHANNELS
- 1 = WITHIN_CHANNEL
# LSTM
Apply a single-layer LSTM to a feature sequence of `T` timesteps. The input blob shape is `[w=input_size, h=T]` and the output blob shape is `[w=num_output, h=T]`.
```
y = lstm(x)
y0, hidden y1, cell y2 = lstm(x0, hidden x1, cell x2)
```
* one_blob_only if bidirectional
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | hidden size of output |
| 1 | weight_data_size| int | 0 | total size of IFOG weight matrix |
| 2 | direction | int | 0 | 0=forward, 1=reverse, 2=bidirectional |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_xc_data| float/fp16/int8 | [input_size, num_output * 4, num_directions] |
| bias_c_data | float/fp16/int8 | [num_output, 4, num_directions] |
| weight_hc_data| float/fp16/int8 | [num_output, num_output * 4, num_directions] |
Direction flag:
- 0 = forward only
- 1 = reverse only
- 2 = bidirectional
# MemoryData
```
y = data
```
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | w | int | 0 | |
| 1 | h | int | 0 | |
| 11 | d | int | 0 | |
| 2 | c | int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| data | float | [w, h, d, c] |
# Mish
```
y = x * tanh(log(exp(x) + 1))
```
* one_blob_only
* support_inplace
# MultiHeadAttention
```
split q k v into num_head part q0, k0, v0, q1, k1, v1 ...
for each num_head part
xq = affine(q) / (embed_dim / num_head)
xk = affine(k)
xv = affine(v)
xqk = xq * xk
softmax_inplace(xqk)
xqkv = xqk * xv
merge xqkv to out
y = affine(out)
```
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | embed_dim | int | 0 | |
| 1 | num_head | int | 1 | |
| 2 | weight_data_size| int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| q_weight_data | float/fp16/int8 | [weight_data_size] |
| q_bias_data | float | [embed_dim] |
| k_weight_data | float/fp16/int8 | [weight_data_size] |
| k_bias_data | float | [embed_dim] |
| v_weight_data | float/fp16/int8 | [weight_data_size] |
| v_bias_data | float | [embed_dim] |
| out_weight_data| float/fp16/int8 | [weight_data_size] |
| out_bias_data | float | [embed_dim] |
# MVN
```
if normalize_variance == 1 && across_channels == 1 y = (x - mean) / (sqrt(var) + eps) of whole blob
if normalize_variance == 1 && across_channels == 0 y = (x - mean) / (sqrt(var) + eps) of each channel
if normalize_variance == 0 && across_channels == 1 y = x - mean of whole blob
if normalize_variance == 0 && across_channels == 0 y = x - mean of each channel
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | normalize_variance| int | 0 | |
| 1 | across_channels| int | 0 | |
| 2 | eps | float | 0.0001f | x = x / (sqrt(var) + eps) |
# Noop
```
y = x
```
# Normalize
```
if across_spatial == 1 && across_channel == 1 x2 = normalize(x) of whole blob
if across_spatial == 1 && across_channel == 0 x2 = normalize(x) of each channel
if across_spatial == 0 && across_channel == 1 x2 = normalize(x) of each position
y = x2 * scale
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | across_spatial| int | 0 | |
| 1 | channel_shared| int | 0 | |
| 2 | eps | float | 0.0001f | see eps mode |
| 3 | scale_data_size| int | 0 | |
| 4 | across_channel| int | 0 | |
| 9 | eps_mode | int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| scale_data | float | [scale_data_size] |
Eps Mode:
- 0 = caffe/mxnet x = x / sqrt(var + eps)
- 1 = pytorch x = x / max(sqrt(var), eps)
- 2 = tensorflow x = x / sqrt(max(var, eps))
# Packing
```
y = wrap_packing(x)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | out_elempack | int | 1 | |
| 1 | use_padding | int | 0 | |
| 2 | cast_type_from| int | 0 | |
| 3 | cast_type_to | int | 0 | |
| 4 | storage_type_from| int | 0 | |
| 5 | storage_type_to| int | 0 | |
# Padding
```
y = pad(x, pads)
```
| param id | name | type | default | description |
| --------- | ------------- | ---- | --------- | ----------------- |
| 0 | top | int | 0 | |
| 1 | bottom | int | 0 | |
| 2 | left | int | 0 | |
| 3 | right | int | 0 | |
| 4 | type | int | 0 | |
| 5 | value | float | 0 | |
| 6 | per_channel_pad_data_size| int | 0 | |
| 7 | front | int | stride_w | |
| 8 | behind | int | pad_left | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| per_channel_pad_data| float | [per_channel_pad_data_size] |
Padding type:
- 0 = CONSTANT
- 1 = REPLICATE
- 2 = REFLECT
# Permute
```
y = reorder(x)
```
| param id | name | type | default | description |
| --------- | ------------- | ---- | --------- | ----------------- |
| 0 | order_type | int | 0 | |
Order Type:
- 0 = WH WHC WHDC
- 1 = HW HWC HWDC
- 2 = WCH WDHC
- 3 = CWH DWHC
- 4 = HCW HDWC
- 5 = CHW DHWC
- 6 = WHCD
- 7 = HWCD
- 8 = WCHD
- 9 = CWHD
- 10 = HCWD
- 11 = CHWD
- 12 = WDCH
- 13 = DWCH
- 14 = WCDH
- 15 = CWDH
- 16 = DCWH
- 17 = CDWH
- 18 = HDCW
- 19 = DHCW
- 20 = HCDW
- 21 = CHDW
- 22 = DCHW
- 23 = CDHW
# PixelShuffle
```
if mode == 0 y = depth_to_space(x) where x channel order is sw-sh-outc
if mode == 1 y = depth_to_space(x) where x channel order is outc-sw-sh
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ---- | --------- | ----------------- |
| 0 | upscale_factor| int | 1 | |
| 1 | mode | int | 0 | |
# Pooling
```
x2 = pad(x, pads)
x3 = pooling(x2, kernel, stride)
```
| param id | name | type | default | description |
| --------- | --------------| ---- | --------- | ----------------- |
| 0 | pooling_type | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | stride_w | int | 1 | |
| 3 | pad_left | int | 0 | |
| 4 | global_pooling| int | 0 | |
| 5 | pad_mode | int | 0 | |
| 6 | avgpool_count_include_pad| int | 0 | |
| 7 | adaptive_pooling| int | 0 | |
| 8 | out_w | int | 0 | |
| 11 | kernel_h | int | kernel_w | |
| 12 | stride_h | int | stride_w | |
| 13 | pad_top | int | pad_left | |
| 14 | pad_right | int | pad_left | |
| 15 | pad_bottom | int | pad_top | |
| 18 | out_h | int | out_w | |
Pooling type:
- 0 = MAX
- 1 = AVG
Pad mode:
- 0 = full padding
- 1 = valid padding
- 2 = tensorflow padding=SAME or onnx padding=SAME_UPPER
- 3 = onnx padding=SAME_LOWER
# Pooling1D
```
x2 = pad(x, pads)
x3 = pooling1d(x2, kernel, stride)
```
| param id | name | type | default | description |
| --------- | --------------| ---- | --------- | ----------------- |
| 0 | pooling_type | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | stride_w | int | 1 | |
| 3 | pad_left | int | 0 | |
| 4 | global_pooling| int | 0 | |
| 5 | pad_mode | int | 0 | |
| 6 | avgpool_count_include_pad| int | 0 | |
| 7 | adaptive_pooling| int | 0 | |
| 8 | out_w | int | 0 | |
| 14 | pad_right | int | pad_left | |
Pooling type:
- 0 = MAX
- 1 = AVG
Pad mode:
- 0 = full padding
- 1 = valid padding
- 2 = tensorflow padding=SAME or onnx padding=SAME_UPPER
- 3 = onnx padding=SAME_LOWER
# Pooling3D
```
x2 = pad(x, pads)
x3 = pooling3d(x2, kernel, stride)
```
| param id | name | type | default | description |
| --------- | --------------| ---- | --------- | ----------------- |
| 0 | pooling_type | int | 0 | |
| 1 | kernel_w | int | 0 | |
| 2 | stride_w | int | 1 | |
| 3 | pad_left | int | 0 | |
| 4 | global_pooling| int | 0 | |
| 5 | pad_mode | int | 0 | |
| 6 | avgpool_count_include_pad| int | 0 | |
| 7 | adaptive_pooling| int | 0 | |
| 8 | out_w | int | 0 | |
| 11 | kernel_h | int | kernel_w | |
| 12 | stride_h | int | stride_w | |
| 13 | pad_top | int | pad_left | |
| 14 | pad_right | int | pad_left | |
| 15 | pad_bottom | int | pad_top | |
| 16 | pad_behind | int | pad_front | |
| 18 | out_h | int | out_w | |
| 21 | kernel_d | int | kernel_w | |
| 22 | stride_d | int | stride_w | |
| 23 | pad_front | int | pad_left | |
| 28 | out_d | int | out_w | |
Pooling type:
- 0 = MAX
- 1 = AVG
Pad mode:
- 0 = full padding
- 1 = valid padding
- 2 = tensorflow padding=SAME or onnx padding=SAME_UPPER
- 3 = onnx padding=SAME_LOWER
# Power
```
y = pow((shift + x * scale), power)
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | power | float | 1.f | |
| 1 | scale | float | 1.f | |
| 2 | shift | float | 0.f | |
# PReLU
```
if x < 0 y = x * slope
else y = x
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_slope | int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| slope_data | float | [num_slope] |
# Quantize
```
y = float2int8(x * scale)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | scale_data_size| int | 1 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| scale_data | float | [scale_data_size] |
# Reduction
```
y = reduce_op(x * coeff)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | operation | int | 0 | |
| 1 | reduce_all | int | 1 | |
| 2 | coeff | float | 1.f | |
| 3 | axes | array | [ ] | |
| 4 | keepdims | int | 0 | |
Operation type:
- 0 = SUM
- 1 = ASUM
- 2 = SUMSQ
- 3 = MEAN
- 4 = MAX
- 5 = MIN
- 6 = PROD
- 7 = L1
- 8 = L2
- 9 = LogSum
- 10 = LogSumExp
# ReLU
```
if x < 0 y = x * slope
else y = x
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | slope | float | 0.f | |
# Reorg
```
if mode == 0 y = space_to_depth(x) where x channel order is sw-sh-outc
if mode == 1 y = space_to_depth(x) where x channel order is outc-sw-sh
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ---- | --------- | ----------------- |
| 0 | stride | int | 1 | |
| 1 | mode | int | 0 | |
# Requantize
```
x2 = x * scale_in + bias
x3 = activation(x2)
y = float2int8(x3 * scale_out)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | scale_in_data_size| int | 1 | |
| 1 | scale_out_data_size| int | 1 | |
| 2 | bias_data_size| int | 0 | |
| 3 | activation_type| int | 0 | |
| 4 | activation_params| int | [ ] | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| scale_in_data | float | [scale_in_data_size] |
| scale_out_data| float | [scale_out_data_size] |
| bias_data | float | [bias_data_size] |
# Reshape
```
if permute == 1 y = hwc2chw(reshape(chw2hwc(x)))
else y = reshape(x)
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | w | int | -233 | |
| 1 | h | int | -233 | |
| 11 | d | int | -233 | |
| 2 | c | int | -233 | |
| 3 | permute | int | 0 | |
Reshape flag:
- 0 = copy from bottom
- -1 = remaining
- -233 = drop this dim(default)
# RNN
Apply a single-layer RNN to a feature sequence of `T` timesteps. The input blob shape is `[w=input_size, h=T]` and the output blob shape is `[w=num_output, h=T]`.
```
y = rnn(x)
y0, hidden y1 = rnn(x0, hidden x1)
```
* one_blob_only if bidirectional
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | num_output | int | 0 | hidden size of output |
| 1 | weight_data_size| int | 0 | total size of weight matrix |
| 2 | direction | int | 0 | 0=forward, 1=reverse, 2=bidirectional |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| weight_xc_data| float/fp16/int8 | [input_size, num_output, num_directions] |
| bias_c_data | float/fp16/int8 | [num_output, 1, num_directions] |
| weight_hc_data| float/fp16/int8 | [num_output, num_output, num_directions] |
Direction flag:
- 0 = forward only
- 1 = reverse only
- 2 = bidirectional
# Scale
```
if scale_data_size == -233 y = x0 * x1
else y = x * scale + bias
```
* one_blob_only if scale_data_size != -233
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | scale_data_size| int | 0 | |
| 1 | bias_term | int | 0 | |
| weight | type | shape |
| ------------- | ----- | --------------------- |
| scale_data | float | [scale_data_size] |
| bias_data | float | [scale_data_size] |
# SELU
```
if x < 0 y = (exp(x) - 1.f) * alpha * lambda
else y = x * lambda
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | alpha | float | 1.67326324f| |
| 1 | lambda | float | 1.050700987f| |
# ShuffleChannel
```
if reverse == 0 y = shufflechannel(x) by group
if reverse == 1 y = shufflechannel(x) by channel / group
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ---- | --------- | ----------------- |
| 0 | group | int | 1 | |
| 1 | reverse | int | 0 | |
# Sigmoid
```
y = 1 / (1 + exp(-x))
```
* one_blob_only
* support_inplace
# Slice
```
split x along axis into slices, each part slice size is based on slices array
```
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | slices | array | [ ] | |
| 1 | axis | int | 0 | |
# Softmax
```
softmax(x, axis)
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | axis | int | 0 | |
| 1 | fixbug0 | int | 0 | hack for bug fix, should be 1 |
# Softplus
```
y = log(exp(x) + 1)
```
* one_blob_only
* support_inplace
# Split
```
y0, y1 ... = x
```
# Swish
```
y = x / (1 + exp(-x))
```
* one_blob_only
* support_inplace
# TanH
```
y = tanh(x)
```
* one_blob_only
* support_inplace
# Threshold
```
if x > threshold y = 1
else y = 0
```
* one_blob_only
* support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | threshold | float | 0.f | |
# Tile
```
y = repeat tiles along axis for x
```
* one_blob_only
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | axis | int | 0 | |
| 1 | tiles | int | 1 | |
| 2 | repeats | array | [ ] | |
# UnaryOp
```
y = unaryop(x)
```
- one_blob_only
- support_inplace
| param id | name | type | default | description |
| --------- | ------------- | ----- | --------- | ----------------- |
| 0 | op_type | int | 0 | Operation type as follows |
Operation type:
- 0 = ABS
- 1 = NEG
- 2 = FLOOR
- 3 = CEIL
- 4 = SQUARE
- 5 = SQRT
- 6 = RSQ
- 7 = EXP
- 8 = LOG
- 9 = SIN
- 10 = COS
- 11 = TAN
- 12 = ASIN
- 13 = ACOS
- 14 = ATAN
- 15 = RECIPROCAL
- 16 = TANH
| /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/docs/developer-guide/operators.md | 0.547464 | 0.975225 | operators.md | pypi |
.. figure:: https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png
:alt: pybind11 logo
**pybind11 — Seamless operability between C++11 and Python**
|Latest Documentation Status| |Stable Documentation Status| |Gitter chat| |GitHub Discussions| |CI| |Build status|
|Repology| |PyPI package| |Conda-forge| |Python Versions|
`Setuptools example <https://github.com/pybind/python_example>`_
• `Scikit-build example <https://github.com/pybind/scikit_build_example>`_
• `CMake example <https://github.com/pybind/cmake_example>`_
.. start
**pybind11** is a lightweight header-only library that exposes C++ types
in Python and vice versa, mainly to create Python bindings of existing
C++ code. Its goals and syntax are similar to the excellent
`Boost.Python <http://www.boost.org/doc/libs/1_58_0/libs/python/doc/>`_
library by David Abrahams: to minimize boilerplate code in traditional
extension modules by inferring type information using compile-time
introspection.
The main issue with Boost.Python—and the reason for creating such a
similar project—is Boost. Boost is an enormously large and complex suite
of utility libraries that works with almost every C++ compiler in
existence. This compatibility has its cost: arcane template tricks and
workarounds are necessary to support the oldest and buggiest of compiler
specimens. Now that C++11-compatible compilers are widely available,
this heavy machinery has become an excessively large and unnecessary
dependency.
Think of this library as a tiny self-contained version of Boost.Python
with everything stripped away that isn’t relevant for binding
generation. Without comments, the core header files only require ~4K
lines of code and depend on Python (2.7 or 3.5+, or PyPy) and the C++
standard library. This compact implementation was possible thanks to
some of the new C++11 language features (specifically: tuples, lambda
functions and variadic templates). Since its creation, this library has
grown beyond Boost.Python in many ways, leading to dramatically simpler
binding code in many common situations.
Tutorial and reference documentation is provided at
`pybind11.readthedocs.io <https://pybind11.readthedocs.io/en/latest>`_.
A PDF version of the manual is available
`here <https://pybind11.readthedocs.io/_/downloads/en/latest/pdf/>`_.
And the source code is always available at
`github.com/pybind/pybind11 <https://github.com/pybind/pybind11>`_.
Core features
-------------
pybind11 can map the following core C++ features to Python:
- Functions accepting and returning custom data structures per value,
reference, or pointer
- Instance methods and static methods
- Overloaded functions
- Instance attributes and static attributes
- Arbitrary exception types
- Enumerations
- Callbacks
- Iterators and ranges
- Custom operators
- Single and multiple inheritance
- STL data structures
- Smart pointers with reference counting like ``std::shared_ptr``
- Internal references with correct reference counting
- C++ classes with virtual (and pure virtual) methods can be extended
in Python
Goodies
-------
In addition to the core functionality, pybind11 provides some extra
goodies:
- Python 2.7, 3.5+, and PyPy/PyPy3 7.3 are supported with an
implementation-agnostic interface.
- It is possible to bind C++11 lambda functions with captured
variables. The lambda capture data is stored inside the resulting
Python function object.
- pybind11 uses C++11 move constructors and move assignment operators
whenever possible to efficiently transfer custom data types.
- It’s easy to expose the internal storage of custom data types through
Pythons’ buffer protocols. This is handy e.g. for fast conversion
between C++ matrix classes like Eigen and NumPy without expensive
copy operations.
- pybind11 can automatically vectorize functions so that they are
transparently applied to all entries of one or more NumPy array
arguments.
- Python's slice-based access and assignment operations can be
supported with just a few lines of code.
- Everything is contained in just a few header files; there is no need
to link against any additional libraries.
- Binaries are generally smaller by a factor of at least 2 compared to
equivalent bindings generated by Boost.Python. A recent pybind11
conversion of PyRosetta, an enormous Boost.Python binding project,
`reported <https://graylab.jhu.edu/Sergey/2016.RosettaCon/PyRosetta-4.pdf>`_
a binary size reduction of **5.4x** and compile time reduction by
**5.8x**.
- Function signatures are precomputed at compile time (using
``constexpr``), leading to smaller binaries.
- With little extra effort, C++ types can be pickled and unpickled
similar to regular Python objects.
Supported compilers
-------------------
1. Clang/LLVM 3.3 or newer (for Apple Xcode’s clang, this is 5.0.0 or
newer)
2. GCC 4.8 or newer
3. Microsoft Visual Studio 2015 Update 3 or newer
4. Intel classic C++ compiler 18 or newer (ICC 20.2 tested in CI)
5. Cygwin/GCC (previously tested on 2.5.1)
6. NVCC (CUDA 11.0 tested in CI)
7. NVIDIA PGI (20.9 tested in CI)
About
-----
This project was created by `Wenzel
Jakob <http://rgl.epfl.ch/people/wjakob>`_. Significant features and/or
improvements to the code were contributed by Jonas Adler, Lori A. Burns,
Sylvain Corlay, Eric Cousineau, Aaron Gokaslan, Ralf Grosse-Kunstleve, Trent Houliston, Axel
Huebl, @hulucc, Yannick Jadoul, Sergey Lyskov Johan Mabille, Tomasz Miąsko,
Dean Moldovan, Ben Pritchard, Jason Rhinelander, Boris Schäling, Pim
Schellart, Henry Schreiner, Ivan Smirnov, Boris Staletic, and Patrick Stewart.
We thank Google for a generous financial contribution to the continuous
integration infrastructure used by this project.
Contributing
~~~~~~~~~~~~
See the `contributing
guide <https://github.com/pybind/pybind11/blob/master/.github/CONTRIBUTING.md>`_
for information on building and contributing to pybind11.
License
~~~~~~~
pybind11 is provided under a BSD-style license that can be found in the
`LICENSE <https://github.com/pybind/pybind11/blob/master/LICENSE>`_
file. By using, distributing, or contributing to this project, you agree
to the terms and conditions of this license.
.. |Latest Documentation Status| image:: https://readthedocs.org/projects/pybind11/badge?version=latest
:target: http://pybind11.readthedocs.org/en/latest
.. |Stable Documentation Status| image:: https://img.shields.io/badge/docs-stable-blue.svg
:target: http://pybind11.readthedocs.org/en/stable
.. |Gitter chat| image:: https://img.shields.io/gitter/room/gitterHQ/gitter.svg
:target: https://gitter.im/pybind/Lobby
.. |CI| image:: https://github.com/pybind/pybind11/workflows/CI/badge.svg
:target: https://github.com/pybind/pybind11/actions
.. |Build status| image:: https://ci.appveyor.com/api/projects/status/riaj54pn4h08xy40?svg=true
:target: https://ci.appveyor.com/project/wjakob/pybind11
.. |PyPI package| image:: https://img.shields.io/pypi/v/pybind11.svg
:target: https://pypi.org/project/pybind11/
.. |Conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pybind11.svg
:target: https://github.com/conda-forge/pybind11-feedstock
.. |Repology| image:: https://repology.org/badge/latest-versions/python:pybind11.svg
:target: https://repology.org/project/python:pybind11/versions
.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pybind11.svg
:target: https://pypi.org/project/pybind11/
.. |GitHub Discussions| image:: https://img.shields.io/static/v1?label=Discussions&message=Ask&color=blue&logo=github
:target: https://github.com/pybind/pybind11/discussions
| /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/pybind11/README.rst | 0.922478 | 0.822011 | README.rst | pypi |
import numpy as np
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = np.zeros_like(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def make_grid(nx=20, ny=20):
xv1, yv1 = np.meshgrid(np.arange(nx), np.arange(ny))
z1 = np.stack((xv1, yv1), 2).reshape((1, ny, nx, 2)).astype(np.float32)
return z1
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def softmax(x):
max_value = np.max(x, axis=-1)
x -= max_value.reshape((x.shape[0], 1))
x = np.exp(x)
sum_value = np.sum(x, axis=-1)
x /= sum_value.reshape((x.shape[0], 1))
return x
def iou_of(boxes0, boxes1, eps=1e-5):
"""Return intersection-over-union (Jaccard index) of boxes.
Args:
boxes0 (N, 4): ground truth boxes.
boxes1 (N or 1, 4): predicted boxes.
eps: a small number to avoid 0 as denominator.
Returns:
iou (N): IoU values.
"""
overlap_left_top = np.maximum(boxes0[..., :2], boxes1[..., :2])
overlap_right_bottom = np.minimum(boxes0[..., 2:], boxes1[..., 2:])
overlap_area = area_of(overlap_left_top, overlap_right_bottom)
area0 = area_of(boxes0[..., :2], boxes0[..., 2:])
area1 = area_of(boxes1[..., :2], boxes1[..., 2:])
return overlap_area / (area0 + area1 - overlap_area + eps)
def area_of(left_top, right_bottom):
"""Compute the areas of rectangles given two corners.
Args:
left_top (N, 2): left top corner.
right_bottom (N, 2): right bottom corner.
Returns:
area (N): return the area.
"""
hw = np.clip(right_bottom - left_top, 0.0, None)
return hw[..., 0] * hw[..., 1]
def nms(boxes, scores, iou_threshold, top_k=-1, candidate_size=200):
"""
Args:
box_scores (N, 5): boxes in corner-form(x1, y1, x2, y2) and probabilities.
iou_threshold: intersection over union threshold.
top_k: keep top_k results. If k <= 0, keep all the results.
candidate_size: only consider the candidates with the highest scores.
Returns:
picked: a list of indexes of the kept boxes
"""
picked = []
indexes = np.argsort(scores)
indexes = indexes[-candidate_size:]
while len(indexes) > 0:
current = indexes[-1]
picked.append(current)
if 0 < top_k == len(picked) or len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[:-1]
rest_boxes = boxes[indexes, :]
iou = iou_of(
rest_boxes,
np.expand_dims(current_box, axis=0),
)
indexes = indexes[iou <= iou_threshold]
return picked | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/utils/functional.py | 0.921394 | 0.821975 | functional.py | pypi |
import os
import hashlib
import requests
from tqdm import tqdm
def check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, "rb") as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
sha1_file = sha1.hexdigest()
l = min(len(sha1_file), len(sha1_hash))
return sha1.hexdigest()[0:l] == sha1_hash[0:l]
def download(url, path=None, overwrite=False, sha1_hash=None):
"""Download an given URL
Parameters
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
Returns
-------
str
The file path of the downloaded file.
"""
if path is None:
fname = url.split("/")[-1]
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split("/")[-1])
else:
fname = path
if (
overwrite
or not os.path.exists(fname)
or (sha1_hash and not check_sha1(fname, sha1_hash))
):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
print("Downloading %s from %s..." % (fname, url))
r = requests.get(url, stream=True)
if r.status_code != 200:
raise RuntimeError("Failed downloading url %s" % url)
total_length = r.headers.get("content-length")
with open(fname, "wb") as f:
if total_length is None: # no content length header
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
else:
total_length = int(total_length)
for chunk in tqdm(
r.iter_content(chunk_size=1024),
total=int(total_length / 1024.0 + 0.5),
unit="KB",
unit_scale=False,
dynamic_ncols=True,
):
f.write(chunk)
if sha1_hash and not check_sha1(fname, sha1_hash):
raise UserWarning(
"File {} is downloaded but the content hash does not match. "
"The repo may be outdated or download may be incomplete. "
'If the "repo_url" is overridden, consider switching to '
"the default repo.".format(fname)
)
return fname | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/utils/download.py | 0.696062 | 0.314024 | download.py | pypi |
from math import sqrt
import numpy as np
import cv2
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
from ..utils.functional import sigmoid, nms
class Yolact:
def __init__(
self,
target_size=550,
confidence_threshold=0.05,
nms_threshold=0.5,
keep_top_k=200,
num_threads=1,
use_gpu=False,
):
self.target_size = target_size
self.confidence_threshold = confidence_threshold
self.nms_threshold = nms_threshold
self.keep_top_k = keep_top_k
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [123.68, 116.78, 103.94]
self.norm_vals = [1.0 / 58.40, 1.0 / 57.12, 1.0 / 57.38]
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
self.net.opt.num_threads = self.num_threads
# original model converted from https://github.com/dbolya/yolact
# yolact_resnet50_54_800000.pth
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("yolact.param"))
self.net.load_model(get_model_file("yolact.bin"))
self.conv_ws = [69, 35, 18, 9, 5]
self.conv_hs = [69, 35, 18, 9, 5]
self.aspect_ratios = [1, 0.5, 2]
self.scales = [24, 48, 96, 192, 384]
self.priors = None
self.last_img_size = None
self.make_priors()
self.class_names = [
"background",
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
def __del__(self):
self.net = None
def __call__(self, img):
img_h = img.shape[0]
img_w = img.shape[1]
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR2RGB,
img_w,
img_h,
self.target_size,
self.target_size,
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
ex = self.net.create_extractor()
ex.input("input.1", mat_in)
ret1, proto_data = ex.extract("619") # 138x138 x 32
ret2, loc_data = ex.extract("816") # 4 x 19248
ret3, mask_data = ex.extract("818") # maskdim 32 x 19248
ret4, conf_data = ex.extract("820") # 81 x 19248
proto_data = np.array(proto_data)
loc_data = np.array(loc_data)
mask_data = np.array(mask_data)
conf_data = np.array(conf_data)
prior_data = self.make_priors()
# decoded_boxes = self.decode(loc_data, prior_data)
boxes, masks, classes, scores = self.detect(
conf_data, loc_data, prior_data, mask_data, img_w, img_h
)
# generate mask
masks = proto_data.transpose(1, 2, 0) @ masks.T
masks = sigmoid(masks)
# Scale masks up to the full image
masks = cv2.resize(masks, (img_w, img_h), interpolation=cv2.INTER_LINEAR)
# transpose into the correct output shape [num_dets, proto_h, proto_w]
masks = masks.transpose(2, 0, 1)
masks = masks > 0.5
return boxes, masks, classes, scores
def make_priors(self):
""" Note that priors are [x,y,width,height] where (x,y) is the center of the box. """
if self.last_img_size != (self.target_size, self.target_size):
prior_data = []
for conv_w, conv_h, scale in zip(self.conv_ws, self.conv_hs, self.scales):
for i in range(conv_h):
for j in range(conv_w):
# +0.5 because priors are in center-size notation
cx = (j + 0.5) / conv_w
cy = (i + 0.5) / conv_h
for ar in self.aspect_ratios:
ar = sqrt(ar)
w = scale * ar / self.target_size
h = scale / ar / self.target_size
# This is for backward compatibility with a bug where I made everything square by accident
h = w
prior_data += [cx, cy, w, h]
self.priors = np.array(prior_data).reshape(-1, 4)
self.last_img_size = (self.target_size, self.target_size)
return self.priors
def decode(self, loc, priors, img_w, img_h):
"""
Decode predicted bbox coordinates using the same scheme
employed by Yolov2: https://arxiv.org/pdf/1612.08242.pdf
b_x = (sigmoid(pred_x) - .5) / conv_w + prior_x
b_y = (sigmoid(pred_y) - .5) / conv_h + prior_y
b_w = prior_w * exp(loc_w)
b_h = prior_h * exp(loc_h)
Note that loc is inputed as [(s(x)-.5)/conv_w, (s(y)-.5)/conv_h, w, h]
while priors are inputed as [x, y, w, h] where each coordinate
is relative to size of the image (even sigmoid(x)). We do this
in the network by dividing by the 'cell size', which is just
the size of the convouts.
Also note that prior_x and prior_y are center coordinates which
is why we have to subtract .5 from sigmoid(pred_x and pred_y).
Args:
- loc: The predicted bounding boxes of size [num_priors, 4]
- priors: The priorbox coords with size [num_priors, 4]
Returns: A tensor of decoded relative coordinates in point form
form with size [num_priors, 4(x, y, w, h)]
"""
variances = [0.1, 0.2]
boxes = np.concatenate(
(
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * np.exp(loc[:, 2:] * variances[1]),
),
1,
)
boxes[:, :2] -= boxes[:, 2:] / 2
# boxes[:, 2:] += boxes[:, :2]
# crop
np.where(boxes[:, 0] < 0, 0, boxes[:, 0])
np.where(boxes[:, 1] < 0, 0, boxes[:, 1])
np.where(boxes[:, 2] > 1, 1, boxes[:, 2])
np.where(boxes[:, 3] > 1, 1, boxes[:, 3])
# decode to img size
boxes[:, 0] *= img_w
boxes[:, 1] *= img_h
boxes[:, 2] = boxes[:, 2] * img_w + 1
boxes[:, 3] = boxes[:, 3] * img_h + 1
return boxes
def detect(self, conf_preds, loc_data, prior_data, mask_data, img_w, img_h):
""" Perform nms for only the max scoring class that isn't background (class 0) """
cur_scores = conf_preds[:, 1:]
num_class = cur_scores.shape[1]
classes = np.argmax(cur_scores, axis=1)
conf_scores = cur_scores[range(cur_scores.shape[0]), classes]
# filte by confidence_threshold
keep = conf_scores > self.confidence_threshold
conf_scores = conf_scores[keep]
classes = classes[keep]
loc_data = loc_data[keep, :]
prior_data = prior_data[keep, :]
masks = mask_data[keep, :]
# decode x, y, w, h
boxes = self.decode(loc_data, prior_data, img_w, img_h)
# nms for every class
boxes_result = []
masks_result = []
classes_result = []
conf_scores_result = []
for i in range(num_class):
where = np.where(classes == i)
if len(where) == 0:
continue
boxes_tmp = boxes[where]
masks_tmp = masks[where]
classes_tmp = classes[where]
conf_scores_tmp = conf_scores[where]
score_mask = conf_scores_tmp > self.confidence_threshold
boxes_tmp = boxes_tmp[score_mask]
masks_tmp = masks_tmp[score_mask]
classes_tmp = classes_tmp[score_mask]
conf_scores_tmp = conf_scores_tmp[score_mask]
indexes = nms(
boxes_tmp,
conf_scores_tmp,
iou_threshold=self.nms_threshold,
top_k=self.keep_top_k,
)
for index in indexes:
boxes_result.append(boxes_tmp[index])
masks_result.append(masks_tmp[index])
classes_result.append(classes_tmp[index] + 1)
conf_scores_result.append(conf_scores_tmp[index])
# keep top k
if len(conf_scores_result) > self.keep_top_k:
indexes = np.argsort(conf_scores_result)
indexes = indexes[: self.keep_top_k]
boxes_result = boxes_result[indexes]
masks_result = masks_result[indexes]
classes_result = classes_result[indexes]
conf_scores_result = conf_scores_result[indexes]
return (
np.array(boxes_result),
np.array(masks_result),
np.array(classes_result),
np.array(conf_scores_result),
) | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/yolact.py | 0.726037 | 0.244961 | yolact.py | pypi |
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
class YoloV4_Base:
def __init__(self, tiny, target_size, num_threads=1, use_gpu=False):
self.target_size = target_size
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = []
self.norm_vals = [1 / 255.0, 1 / 255.0, 1 / 255.0]
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
self.net.opt.num_threads = self.num_threads
# original pretrained model from https://github.com/AlexeyAB/darknet
# the ncnn model https://drive.google.com/drive/folders/1YzILvh0SKQPS_lrb33dmGNq7aVTKPWS0?usp=sharing
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
if tiny == True:
self.net.load_param(get_model_file("yolov4-tiny-opt.param"))
self.net.load_model(get_model_file("yolov4-tiny-opt.bin"))
else:
self.net.load_param(get_model_file("yolov4-opt.param"))
self.net.load_model(get_model_file("yolov4-opt.bin"))
self.class_names = [
"background",
"person",
"bicycle",
"car",
"motorbike",
"aeroplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"sofa",
"pottedplant",
"bed",
"diningtable",
"toilet",
"tvmonitor",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
def __del__(self):
self.net = None
def __call__(self, img):
img_h = img.shape[0]
img_w = img.shape[1]
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR2RGB,
img.shape[1],
img.shape[0],
self.target_size,
self.target_size,
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
ex = self.net.create_extractor()
ex.input("data", mat_in)
ret, mat_out = ex.extract("output")
objects = []
# method 1, use ncnn.Mat.row to get the result, no memory copy
for i in range(mat_out.h):
values = mat_out.row(i)
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
#method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too
out = np.array(mat_out)
for i in range(len(out)):
values = out[i]
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.x = values[2] * img_w
obj.y = values[3] * img_h
obj.w = values[4] * img_w - obj.x
obj.h = values[5] * img_h - obj.y
objects.append(obj)
"""
return objects
class YoloV4_Tiny(YoloV4_Base):
def __init__(self, **kwargs):
super(YoloV4_Tiny, self).__init__(True, 416, **kwargs)
class YoloV4(YoloV4_Base):
def __init__(self, **kwargs):
super(YoloV4, self).__init__(False, 608, **kwargs) | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/yolov4.py | 0.654895 | 0.247828 | yolov4.py | pypi |
import numpy as np
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
def clamp(v, lo, hi):
if v < lo:
return lo
elif hi < v:
return hi
else:
return v
class MobileNetV3_SSDLite:
def __init__(self, target_size=300, num_threads=1, use_gpu=False):
self.target_size = target_size
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [123.675, 116.28, 103.53]
self.norm_vals = [1.0, 1.0, 1.0]
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# converted ncnn model from https://github.com/ujsyehao/mobilenetv3-ssd
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("mobilenetv3_ssdlite_voc.param"))
self.net.load_model(get_model_file("mobilenetv3_ssdlite_voc.bin"))
self.class_names = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
def __del__(self):
self.net = None
def __call__(self, img):
img_h = img.shape[0]
img_w = img.shape[1]
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR2RGB,
img.shape[1],
img.shape[0],
self.target_size,
self.target_size,
)
mat_in.substract_mean_normalize([], self.norm_vals)
mat_in.substract_mean_normalize(self.mean_vals, [])
ex = self.net.create_extractor()
ex.set_light_mode(True)
ex.set_num_threads(self.num_threads)
ex.input("input", mat_in)
ret, mat_out = ex.extract("detection_out")
objects = []
# printf("%d %d %d\n", mat_out.w, mat_out.h, mat_out.c)
# method 1, use ncnn.Mat.row to get the result, no memory copy
for i in range(mat_out.h):
values = mat_out.row(i)
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
x1 = (
clamp(values[2] * self.target_size, 0.0, float(self.target_size - 1))
/ self.target_size
* img_w
)
y1 = (
clamp(values[3] * self.target_size, 0.0, float(self.target_size - 1))
/ self.target_size
* img_h
)
x2 = (
clamp(values[4] * self.target_size, 0.0, float(self.target_size - 1))
/ self.target_size
* img_w
)
y2 = (
clamp(values[5] * self.target_size, 0.0, float(self.target_size - 1))
/ self.target_size
* img_h
)
if np.isnan(x1) or np.isnan(y1) or np.isnan(x2) or np.isnan(y2):
continue
obj.rect.x = x1
obj.rect.y = y1
obj.rect.w = x2 - x1
obj.rect.h = y2 - y1
objects.append(obj)
"""
#method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too
out = np.array(mat_out)
for i in range(len(out)):
values = out[i]
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
x1 = clamp(values[2] * self.img_width, 0.0, float(self.img_width - 1)) / self.img_width * img_w
y1 = clamp(values[3] * self.img_height, 0.0, float(self.img_height - 1)) / self.img_height * img_h
x2 = clamp(values[4] * self.img_width, 0.0, float(self.img_width - 1)) / self.img_width * img_w
y2 = clamp(values[5] * self.img_height, 0.0, float(self.img_height - 1)) / self.img_height * img_h
obj.rect.x = x1
obj.rect.y = y1
obj.rect.w = x2 - x1
obj.rect.h = y2 - y1
objects.append(obj)
"""
return objects | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/mobilenetv3ssdlite.py | 0.643329 | 0.300962 | mobilenetv3ssdlite.py | pypi |
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
class PeleeNet_SSD:
def __init__(self, target_size=304, num_threads=1, use_gpu=False):
self.target_size = target_size
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [103.9, 116.7, 123.6]
self.norm_vals = [0.017, 0.017, 0.017]
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# model is converted from https://github.com/eric612/MobileNet-YOLO
# and can be downloaded from https://drive.google.com/open?id=1Wt6jKv13sBRMHgrGAJYlOlRF-o80pC0g
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("pelee.param"))
self.net.load_model(get_model_file("pelee.bin"))
self.class_names = [
"background",
"person",
"rider",
"car",
"bus",
"truck",
"bike",
"motor",
"traffic light",
"traffic sign",
"train",
]
def __del__(self):
self.net = None
def __call__(self, img):
img_h = img.shape[0]
img_w = img.shape[1]
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR,
img.shape[1],
img.shape[0],
self.target_size,
self.target_size,
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
ex = self.net.create_extractor()
ex.set_num_threads(self.num_threads)
ex.input("data", mat_in)
ret, mat_out = ex.extract("detection_out")
objects = []
# printf("%d %d %d\n", mat_out.w, mat_out.h, mat_out.c)
# method 1, use ncnn.Mat.row to get the result, no memory copy
for i in range(mat_out.h):
values = mat_out.row(i)
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
#method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too
out = np.array(mat_out)
for i in range(len(out)):
values = out[i]
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
ret, seg_out = ex.extract("sigmoid")
resized = ncnn.Mat()
ncnn.resize_bilinear(seg_out, resized, img_w, img_h)
return objects, resized | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/peleenetssd.py | 0.556641 | 0.264198 | peleenetssd.py | pypi |
"""Model store which provides pretrained models."""
from __future__ import print_function
__all__ = ["get_model_file", "purge"]
import os
import zipfile
import logging
import portalocker
from ..utils import download, check_sha1
_model_sha1 = {
name: checksum
for checksum, name in [
("4ff279e78cdb0b8bbc9363181df6f094ad46dc36", "mobilenet_yolo.param"),
("1528cf08b9823fc01aaebfc932ec8c8d4a3b1613", "mobilenet_yolo.bin"),
("3f5b78b0c982f8bdf3a2c3a27e6136d4d2680e96", "mobilenetv2_yolov3.param"),
("0705b0f8fe5a77718561b9b7d6ed4f33fcd3d455", "mobilenetv2_yolov3.bin"),
("de59186323ebad5650631e12a6cc66b526ec7df4", "yolov4-tiny-opt.param"),
("1765c3b251c041dd6ac59d2ec3ddf7b983fe9ee9", "yolov4-tiny-opt.bin"),
("e92d3a3a8ac5e6a6c08c433aa2252b0680124328", "yolov4-opt.param"),
("69d128b42b70fb790e9d3ccabcf1b6e8cc2859fe", "yolov4-opt.bin"),
("6fa8ccc8cabc0f5633ab3c6ffa268e6042b8888f", "yolov5s.param"),
("0cbab3664deb090480ea748c1305f6fe850b9ac4", "yolov5s.bin"),
("e65bae7052d9e9b9d45e1214a8d1b5fe6f64e8af", "yolact.param"),
("9bda99f50b1c14c98c5c6bbc08d4f782eed66548", "yolact.bin"),
("3723ce3e312db6a102cff1a5c39dae80e1de658e", "mobilenet_ssd_voc_ncnn.param"),
("8e2d2139550dcbee1ce5e200b7697b25aab29656", "mobilenet_ssd_voc_ncnn.bin"),
("52c669821dc32ef5b7ab30749fa71a3bc27786b8", "squeezenet_ssd_voc.param"),
("347e31d1cbe469259fa8305860a7c24a95039202", "squeezenet_ssd_voc.bin"),
("52dab628ecac8137e61ce3aea1a912f9c5a0a638", "mobilenetv2_ssdlite_voc.param"),
("9fea06f74f7c60d753cf703ea992f92e50a986d4", "mobilenetv2_ssdlite_voc.bin"),
("f36661eff1eda1e36185e7f2f28fc722ad8b66bb", "mobilenetv3_ssdlite_voc.param"),
("908f63ca9bff0061a499512664b9c533a0b7f485", "mobilenetv3_ssdlite_voc.bin"),
("a63d779a1f789af976bc4e2eae86fdd9b0bb6c2c", "squeezenet_v1.1.param"),
("262f0e33e37aeac69021b5a3556664be65fc0aeb", "squeezenet_v1.1.bin"),
("3ba57cccd1d4a583f6eb76eae25a2dbda7ce7f74", "ZF_faster_rcnn_final.param"),
("1095fbb5f846a1f311b40941add5fef691acaf8d", "ZF_faster_rcnn_final.bin"),
("3586ec3d663b1cc8ec8c662768caa9c7fbcf4fdc", "pelee.param"),
("2442ad483dc546940271591b86db0d9c8b1c7118", "pelee.bin"),
("6cfeda08d5494a1274199089fda77c421be1ecac", "mnet.25-opt.param"),
("3ff9a51dc81cdf506a87543dbf752071ffc50b8d", "mnet.25-opt.bin"),
("50acebff393c91468a73a7b7c604ef231429d068", "rfcn_end2end.param"),
("9a68cd937959b4dda9c5bf9c99181cb0e40f266b", "rfcn_end2end.bin"),
("d6b289cda068e9a9d8a171fb909352a05a39a494", "shufflenet_v2_x0.5.param"),
("2ccd631d04a1b7e05483cd8a8def76bca7d330a8", "shufflenet_v2_x0.5.bin"),
("7c8f8d72c60aab6802985423686b36c61be2f68c", "pose.param"),
("7f691540972715298c611a3e595b20c59c2147ce", "pose.bin"),
("979d09942881cf1207a93cbfa9853005a434469b", "nanodet_m.param"),
("51d868905361e4ba9c45bd12e8a5608e7aadd1bd", "nanodet_m.bin"),
]
}
_split_model_bins = {
"ZF_faster_rcnn_final.bin": 3,
"rfcn_end2end.bin": 2,
"yolov4-opt.bin": 7,
}
github_repo_url = "https://github.com/nihui/ncnn-assets/raw/master/models/"
_url_format = "{repo_url}{file_name}"
def merge_file(root, files_in, file_out, remove=True):
with open(file_out, "wb") as fd_out:
for file_in in files_in:
file = os.path.join(root, file_in)
with open(file, "rb") as fd_in:
fd_out.write(fd_in.read())
if remove == True:
os.remove(file)
def short_hash(name):
if name not in _model_sha1:
raise ValueError(
"Pretrained model for {name} is not available.".format(name=name)
)
return _model_sha1[name][:8]
def get_model_file(name, tag=None, root=os.path.join("~", ".ncnn", "models")):
r"""Return location for the pretrained on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
The root directory will be created if it doesn't exist.
Parameters
----------
name : str
Name of the model.
root : str, default '~/.ncnn/models'
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
if "NCNN_HOME" in os.environ:
root = os.path.join(os.environ["NCNN_HOME"], "models")
use_tag = isinstance(tag, str)
if use_tag:
file_name = "{name}-{short_hash}".format(name=name, short_hash=tag)
else:
file_name = "{name}".format(name=name)
root = os.path.expanduser(root)
params_path = os.path.join(root, file_name)
lockfile = os.path.join(root, file_name + ".lock")
if use_tag:
sha1_hash = tag
else:
sha1_hash = _model_sha1[name]
if not os.path.exists(root):
os.makedirs(root)
with portalocker.Lock(
lockfile, timeout=int(os.environ.get("NCNN_MODEL_LOCK_TIMEOUT", 300))
):
if os.path.exists(params_path):
if check_sha1(params_path, sha1_hash):
return params_path
else:
logging.warning(
"Hash mismatch in the content of model file '%s' detected. "
"Downloading again.",
params_path,
)
else:
logging.info("Model file not found. Downloading.")
zip_file_path = os.path.join(root, file_name)
if file_name in _split_model_bins:
file_name_parts = [
"%s.part%02d" % (file_name, i + 1)
for i in range(_split_model_bins[file_name])
]
for file_name_part in file_name_parts:
file_path = os.path.join(root, file_name_part)
repo_url = os.environ.get("NCNN_REPO", github_repo_url)
if repo_url[-1] != "/":
repo_url = repo_url + "/"
download(
_url_format.format(repo_url=repo_url, file_name=file_name_part),
path=file_path,
overwrite=True,
)
merge_file(root, file_name_parts, zip_file_path)
else:
repo_url = os.environ.get("NCNN_REPO", github_repo_url)
if repo_url[-1] != "/":
repo_url = repo_url + "/"
download(
_url_format.format(repo_url=repo_url, file_name=file_name),
path=zip_file_path,
overwrite=True,
)
if zip_file_path.endswith(".zip"):
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(root)
os.remove(zip_file_path)
# Make sure we write the model file on networked filesystems
try:
os.sync()
except AttributeError:
pass
if check_sha1(params_path, sha1_hash):
return params_path
else:
raise ValueError("Downloaded file has different hash. Please try again.")
def purge(root=os.path.join("~", ".ncnn", "models")):
r"""Purge all pretrained model files in local file store.
Parameters
----------
root : str, default '~/.ncnn/models'
Location for keeping the model parameters.
"""
root = os.path.expanduser(root)
files = os.listdir(root)
for f in files:
if f.endswith(".params"):
os.remove(os.path.join(root, f)) | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/model_store.py | 0.674479 | 0.179351 | model_store.py | pypi |
import numpy as np
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
class Faster_RCNN:
def __init__(
self,
img_width=600,
img_height=600,
num_threads=1,
use_gpu=False,
max_per_image=100,
confidence_thresh=0.05,
nms_threshold=0.3,
):
self.img_width = img_width
self.img_height = img_height
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [102.9801, 115.9465, 122.7717]
self.norm_vals = []
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# original pretrained model from https://github.com/rbgirshick/py-faster-rcnn
# py-faster-rcnn/models/pascal_voc/ZF/faster_rcnn_alt_opt/faster_rcnn_test.pt
# https://dl.dropboxusercontent.com/s/o6ii098bu51d139/faster_rcnn_models.tgz?dl=0
# ZF_faster_rcnn_final.caffemodel
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("ZF_faster_rcnn_final.param"))
self.net.load_model(get_model_file("ZF_faster_rcnn_final.bin"))
self.max_per_image = max_per_image
self.confidence_thresh = confidence_thresh
self.nms_threshold = nms_threshold
self.class_names = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
def __del__(self):
self.net = None
def __call__(self, img):
# scale to target detect size
h = img.shape[0]
w = img.shape[1]
scale = 1.0
if w < h:
scale = float(self.img_width) / w
w = self.img_width
h = int(h * scale)
else:
scale = float(self.img_height) / h
h = self.img_height
w = int(w * scale)
mat_in = ncnn.Mat.from_pixels_resize(
img, ncnn.Mat.PixelType.PIXEL_BGR, img.shape[1], img.shape[0], w, h
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
# method 1 use numpy to Mat interface
# im_info = ncnn.Mat(np.array([h, w, scale], dtype=np.float32))
# method 2 use ncnn.Mat interface
im_info = ncnn.Mat(3)
im_info[0] = h
im_info[1] = w
im_info[2] = scale
ex1 = self.net.create_extractor()
ex1.set_num_threads(self.num_threads)
ex1.input("data", mat_in)
ex1.input("im_info", im_info)
ret1, conv5_relu5 = ex1.extract("conv5_relu5")
ret2, rois = ex1.extract("rois")
class_candidates = []
for i in range(rois.c):
ex2 = self.net.create_extractor()
roi = rois.channel(i) # get single roi
ex2.input("conv5_relu5", conv5_relu5)
ex2.input("rois", roi)
ret1, bbox_pred = ex2.extract("bbox_pred")
ret2, cls_prob = ex2.extract("cls_prob")
num_class = cls_prob.w
while len(class_candidates) < num_class:
class_candidates.append([])
# find class id with highest score
label = 0
score = 0.0
for j in range(num_class):
class_score = cls_prob[j]
if class_score > score:
label = j
score = class_score
# ignore background or low score
if label == 0 or score <= self.confidence_thresh:
continue
# fprintf(stderr, "%d = %f\n", label, score);
# unscale to image size
x1 = roi[0] / scale
y1 = roi[1] / scale
x2 = roi[2] / scale
y2 = roi[3] / scale
pb_w = x2 - x1 + 1
pb_h = y2 - y1 + 1
# apply bbox regression
dx = bbox_pred[label * 4]
dy = bbox_pred[label * 4 + 1]
dw = bbox_pred[label * 4 + 2]
dh = bbox_pred[label * 4 + 3]
cx = x1 + pb_w * 0.5
cy = y1 + pb_h * 0.5
obj_cx = cx + pb_w * dx
obj_cy = cy + pb_h * dy
obj_w = pb_w * np.exp(dw)
obj_h = pb_h * np.exp(dh)
obj_x1 = obj_cx - obj_w * 0.5
obj_y1 = obj_cy - obj_h * 0.5
obj_x2 = obj_cx + obj_w * 0.5
obj_y2 = obj_cy + obj_h * 0.5
# clip
obj_x1 = np.maximum(np.minimum(obj_x1, float(img.shape[1] - 1)), 0.0)
obj_y1 = np.maximum(np.minimum(obj_y1, float(img.shape[0] - 1)), 0.0)
obj_x2 = np.maximum(np.minimum(obj_x2, float(img.shape[1] - 1)), 0.0)
obj_y2 = np.maximum(np.minimum(obj_y2, float(img.shape[0] - 1)), 0.0)
# append object
obj = Detect_Object()
obj.rect.x = obj_x1
obj.rect.y = obj_y1
obj.rect.w = obj_x2 - obj_x1 + 1
obj.rect.h = obj_y2 - obj_y1 + 1
obj.label = label
obj.prob = score
class_candidates[label].append(obj)
# post process
objects = []
for candidates in class_candidates:
if len(candidates) == 0:
continue
candidates.sort(key=lambda obj: obj.prob, reverse=True)
picked = self.nms_sorted_bboxes(candidates, self.nms_threshold)
for j in range(len(picked)):
z = picked[j]
objects.append(candidates[z])
objects.sort(key=lambda obj: obj.prob, reverse=True)
objects = objects[: self.max_per_image]
return objects
def nms_sorted_bboxes(self, objects, nms_threshold):
picked = []
n = len(objects)
areas = np.zeros((n,), dtype=np.float32)
for i in range(n):
areas[i] = objects[i].rect.area()
for i in range(n):
a = objects[i]
keep = True
for j in range(len(picked)):
b = objects[picked[j]]
# intersection over union
inter_area = a.rect.intersection_area(b.rect)
union_area = areas[i] + areas[picked[j]] - inter_area
# float IoU = inter_area / union_area
if inter_area / union_area > nms_threshold:
keep = False
if keep:
picked.append(i)
return picked | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/fasterrcnn.py | 0.632049 | 0.205475 | fasterrcnn.py | pypi |
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
class MobileNet_SSD:
def __init__(self, target_size=300, num_threads=1, use_gpu=False):
self.target_size = target_size
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [127.5, 127.5, 127.5]
self.norm_vals = [0.007843, 0.007843, 0.007843]
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# model is converted from https://github.com/chuanqi305/MobileNet-SSD
# and can be downloaded from https://drive.google.com/open?id=0ByaKLD9QaPtucWk0Y0dha1VVY0U
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("mobilenet_ssd_voc_ncnn.param"))
self.net.load_model(get_model_file("mobilenet_ssd_voc_ncnn.bin"))
self.class_names = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
def __del__(self):
self.net = None
def __call__(self, img):
img_h = img.shape[0]
img_w = img.shape[1]
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR,
img.shape[1],
img.shape[0],
self.target_size,
self.target_size,
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
ex = self.net.create_extractor()
ex.set_num_threads(self.num_threads)
ex.input("data", mat_in)
ret, mat_out = ex.extract("detection_out")
objects = []
# printf("%d %d %d\n", mat_out.w, mat_out.h, mat_out.c)
# method 1, use ncnn.Mat.row to get the result, no memory copy
for i in range(mat_out.h):
values = mat_out.row(i)
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
#method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too
out = np.array(mat_out)
for i in range(len(out)):
values = out[i]
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
return objects | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/mobilenetssd.py | 0.523664 | 0.258993 | mobilenetssd.py | pypi |
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
class SqueezeNet_SSD:
def __init__(self, target_size=300, num_threads=1, use_gpu=False):
self.target_size = target_size
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [104.0, 117.0, 123.0]
self.norm_vals = []
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# original pretrained model from https://github.com/chuanqi305/SqueezeNet-SSD
# squeezenet_ssd_voc_deploy.prototxt
# https://drive.google.com/open?id=0B3gersZ2cHIxdGpyZlZnbEQ5Snc
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("squeezenet_ssd_voc.param"))
self.net.load_model(get_model_file("squeezenet_ssd_voc.bin"))
self.class_names = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
def __del__(self):
self.net = None
def __call__(self, img):
img_h = img.shape[0]
img_w = img.shape[1]
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR,
img.shape[1],
img.shape[0],
self.target_size,
self.target_size,
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
ex = self.net.create_extractor()
ex.set_num_threads(self.num_threads)
ex.input("data", mat_in)
ret, mat_out = ex.extract("detection_out")
objects = []
# printf("%d %d %d\n", mat_out.w, mat_out.h, mat_out.c)
# method 1, use ncnn.Mat.row to get the result, no memory copy
for i in range(mat_out.h):
values = mat_out.row(i)
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
#method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too
out = np.array(mat_out)
for i in range(len(out)):
values = out[i]
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
return objects | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/squeezenetssd.py | 0.527317 | 0.264952 | squeezenetssd.py | pypi |
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
class MobileNet_YoloV2:
def __init__(self, target_size=416, num_threads=1, use_gpu=False):
self.target_size = target_size
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [1.0, 1.0, 1.0]
self.norm_vals = [0.007843, 0.007843, 0.007843]
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# original pretrained model from https://github.com/eric612/MobileNet-YOLO
# https://github.com/eric612/MobileNet-YOLO/blob/master/models/yolov2/mobilenet_yolo_deploy.prototxt
# https://github.com/eric612/MobileNet-YOLO/blob/master/models/yolov2/mobilenet_yolo_deploy_iter_80000.caffemodel
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("mobilenet_yolo.param"))
self.net.load_model(get_model_file("mobilenet_yolo.bin"))
self.class_names = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
def __del__(self):
self.net = None
def __call__(self, img):
img_h = img.shape[0]
img_w = img.shape[1]
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR,
img.shape[1],
img.shape[0],
self.target_size,
self.target_size,
)
mat_in.substract_mean_normalize([], self.norm_vals)
mat_in.substract_mean_normalize(self.mean_vals, [])
ex = self.net.create_extractor()
ex.set_num_threads(self.num_threads)
ex.input("data", mat_in)
ret, mat_out = ex.extract("detection_out")
objects = []
# printf("%d %d %d\n", mat_out.w, mat_out.h, mat_out.c)
# method 1, use ncnn.Mat.row to get the result, no memory copy
for i in range(mat_out.h):
values = mat_out.row(i)
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
#method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too
out = np.array(mat_out)
for i in range(len(out)):
values = out[i]
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
return objects | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/yolov2.py | 0.635788 | 0.262369 | yolov2.py | pypi |
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
class MobileNetV2_YoloV3:
def __init__(self, target_size=352, num_threads=1, use_gpu=False):
self.target_size = target_size
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [127.5, 127.5, 127.5]
self.norm_vals = [0.007843, 0.007843, 0.007843]
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# original pretrained model from https://github.com/eric612/MobileNet-YOLO
# param : https://drive.google.com/open?id=1V9oKHP6G6XvXZqhZbzNKL6FI_clRWdC-
# bin : https://drive.google.com/open?id=1DBcuFCr-856z3FRQznWL_S5h-Aj3RawA
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("mobilenetv2_yolov3.param"))
self.net.load_model(get_model_file("mobilenetv2_yolov3.bin"))
self.class_names = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
def __del__(self):
self.net = None
def __call__(self, img):
img_h = img.shape[0]
img_w = img.shape[1]
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR,
img.shape[1],
img.shape[0],
self.target_size,
self.target_size,
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
ex = self.net.create_extractor()
ex.set_num_threads(self.num_threads)
ex.input("data", mat_in)
ret, mat_out = ex.extract("detection_out")
objects = []
# printf("%d %d %d\n", mat_out.w, mat_out.h, mat_out.c)
# method 1, use ncnn.Mat.row to get the result, no memory copy
for i in range(mat_out.h):
values = mat_out.row(i)
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
#method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too
out = np.array(mat_out)
for i in range(len(out)):
values = out[i]
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.x = values[2] * img_w
obj.y = values[3] * img_h
obj.w = values[4] * img_w - obj.x
obj.h = values[5] * img_h - obj.y
objects.append(obj)
"""
return objects | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/yolov3.py | 0.521471 | 0.263884 | yolov3.py | pypi |
import time
import numpy as np
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
from ..utils.functional import *
import cv2
class NanoDet:
def __init__(
self,
target_size=320,
prob_threshold=0.4,
nms_threshold=0.3,
num_threads=1,
use_gpu=False,
):
self.target_size = target_size
self.prob_threshold = prob_threshold
self.nms_threshold = nms_threshold
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [103.53, 116.28, 123.675]
self.norm_vals = [0.017429, 0.017507, 0.017125]
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
self.net.opt.num_threads = self.num_threads
# original pretrained model from https://github.com/RangiLyu/nanodet
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("nanodet_m.param"))
self.net.load_model(get_model_file("nanodet_m.bin"))
self.reg_max = 7
self.strides = [8, 16, 32]
self.num_candidate = 1000
self.top_k = -1
self.class_names = [
"person",
"bicycle",
"car",
"motorcycle",
"airplane",
"bus",
"train",
"truck",
"boat",
"traffic light",
"fire hydrant",
"stop sign",
"parking meter",
"bench",
"bird",
"cat",
"dog",
"horse",
"sheep",
"cow",
"elephant",
"bear",
"zebra",
"giraffe",
"backpack",
"umbrella",
"handbag",
"tie",
"suitcase",
"frisbee",
"skis",
"snowboard",
"sports ball",
"kite",
"baseball bat",
"baseball glove",
"skateboard",
"surfboard",
"tennis racket",
"bottle",
"wine glass",
"cup",
"fork",
"knife",
"spoon",
"bowl",
"banana",
"apple",
"sandwich",
"orange",
"broccoli",
"carrot",
"hot dog",
"pizza",
"donut",
"cake",
"chair",
"couch",
"potted plant",
"bed",
"dining table",
"toilet",
"tv",
"laptop",
"mouse",
"remote",
"keyboard",
"cell phone",
"microwave",
"oven",
"toaster",
"sink",
"refrigerator",
"book",
"clock",
"vase",
"scissors",
"teddy bear",
"hair drier",
"toothbrush",
]
def __del__(self):
self.net = None
def __call__(self, img):
img_w = img.shape[1]
img_h = img.shape[0]
w = img_w
h = img_h
scale = 1.0
if w > h:
scale = float(self.target_size) / w
w = self.target_size
h = int(h * scale)
else:
scale = float(self.target_size) / h
h = self.target_size
w = int(w * scale)
mat_in = ncnn.Mat.from_pixels_resize(
img, ncnn.Mat.PixelType.PIXEL_BGR, img_w, img_h, w, h
)
# pad to target_size rectangle
wpad = (w + 31) // 32 * 32 - w
hpad = (h + 31) // 32 * 32 - h
mat_in_pad = ncnn.copy_make_border(
mat_in,
hpad // 2,
hpad - hpad // 2,
wpad // 2,
wpad - wpad // 2,
ncnn.BorderType.BORDER_CONSTANT,
0,
)
mat_in_pad.substract_mean_normalize(self.mean_vals, self.norm_vals)
ex = self.net.create_extractor()
ex.input("input.1", mat_in_pad)
score_out_name = ["792", "814", "836"]
scores = [ex.extract(x)[1] for x in score_out_name]
scores = [np.reshape(x, (-1, 80)) for x in scores]
boxes_out_name = ["795", "817", "839"]
raw_boxes = [ex.extract(x)[1] for x in boxes_out_name]
raw_boxes = [np.reshape(x, (-1, 32)) for x in raw_boxes]
# generate centers
decode_boxes = []
select_scores = []
for stride, box_distribute, score in zip(self.strides, raw_boxes, scores):
# centers
if mat_in_pad.w > mat_in_pad.h:
fm_w = mat_in_pad.w // stride
fm_h = score.shape[0] // fm_w
else:
fm_h = mat_in_pad.h // stride
fm_w = score.shape[1] // fm_h
h_range = np.arange(fm_h)
w_range = np.arange(fm_w)
ww, hh = np.meshgrid(w_range, h_range)
ct_row = (hh.flatten() + 0.5) * stride
ct_col = (ww.flatten() + 0.5) * stride
center = np.stack((ct_col, ct_row, ct_col, ct_row), axis=1)
# box distribution to distance
reg_range = np.arange(self.reg_max + 1)
box_distance = box_distribute.reshape((-1, self.reg_max + 1))
box_distance = softmax(box_distance)
box_distance = box_distance * np.expand_dims(reg_range, axis=0)
box_distance = np.sum(box_distance, axis=1).reshape((-1, 4))
box_distance = box_distance * stride
# top K candidate
topk_idx = np.argsort(score.max(axis=1))[::-1]
topk_idx = topk_idx[: self.num_candidate]
center = center[topk_idx]
score = score[topk_idx]
box_distance = box_distance[topk_idx]
# decode box
decode_box = center + [-1, -1, 1, 1] * box_distance
select_scores.append(score)
decode_boxes.append(decode_box)
# nms
bboxes = np.concatenate(decode_boxes, axis=0)
confidences = np.concatenate(select_scores, axis=0)
picked_box = []
picked_probs = []
picked_labels = []
for class_index in range(0, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > self.prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = bboxes[mask, :]
picked = nms(
subset_boxes,
probs,
iou_threshold=self.nms_threshold,
top_k=self.top_k,
)
picked_box.append(subset_boxes[picked])
picked_probs.append(probs[picked])
picked_labels.extend([class_index] * len(picked))
if not picked_box:
return []
picked_box = np.concatenate(picked_box)
picked_probs = np.concatenate(picked_probs)
# result with clip
objects = [
Detect_Object(
label,
score,
(bbox[0] - wpad / 2) / scale if bbox[0] > 0 else 0,
(bbox[1] - hpad / 2) / scale if bbox[1] > 0 else 0,
(bbox[2] - bbox[0]) / scale
if bbox[2] < mat_in_pad.w
else (mat_in_pad.w - bbox[0]) / scale,
(bbox[3] - bbox[1]) / scale
if bbox[3] < mat_in_pad.h
else (mat_in_pad.h - bbox[1]) / scale,
)
for label, score, bbox in zip(picked_labels, picked_probs, picked_box)
]
return objects | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/nanodet.py | 0.645567 | 0.212579 | nanodet.py | pypi |
import numpy as np
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
class RFCN:
def __init__(
self,
target_size=224,
max_per_image=100,
confidence_thresh=0.6,
nms_threshold=0.3,
num_threads=1,
use_gpu=False,
):
self.target_size = target_size
self.max_per_image = max_per_image
self.confidence_thresh = confidence_thresh
self.nms_threshold = nms_threshold
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [102.9801, 115.9465, 122.7717]
self.norm_vals = []
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# original pretrained model from https://github.com/YuwenXiong/py-R-FCN
# https://github.com/YuwenXiong/py-R-FCN/blob/master/models/pascal_voc/ResNet-50/rfcn_end2end/test_agnostic.prototxt
# https://1drv.ms/u/s!AoN7vygOjLIQqUWHpY67oaC7mopf
# resnet50_rfcn_final.caffemodel
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("rfcn_end2end.param"))
self.net.load_model(get_model_file("rfcn_end2end.bin"))
self.class_names = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
def __del__(self):
self.net = None
def __call__(self, img):
h = img.shape[0]
w = img.shape[1]
scale = 1.0
if w < h:
scale = float(self.target_size) / w
w = self.target_size
h = h * scale
else:
scale = float(self.target_size) / h
h = self.target_size
w = w * scale
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR,
img.shape[1],
img.shape[0],
int(w),
int(h),
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
im_info = ncnn.Mat(3)
im_info[0] = h
im_info[1] = w
im_info[2] = scale
# step1, extract feature and all rois
ex1 = self.net.create_extractor()
ex1.set_num_threads(self.num_threads)
ex1.input("data", mat_in)
ex1.input("im_info", im_info)
ret1, rfcn_cls = ex1.extract("rfcn_cls")
ret2, rfcn_bbox = ex1.extract("rfcn_bbox")
ret3, rois = ex1.extract("rois") # all rois
# step2, extract bbox and score for each roi
class_candidates = []
for i in range(rois.c):
ex2 = self.net.create_extractor()
roi = rois.channel(i) # get single roi
ex2.input("rfcn_cls", rfcn_cls)
ex2.input("rfcn_bbox", rfcn_bbox)
ex2.input("rois", roi)
ret1, bbox_pred = ex2.extract("bbox_pred")
ret2, cls_prob = ex2.extract("cls_prob")
num_class = cls_prob.w
while len(class_candidates) < num_class:
class_candidates.append([])
# find class id with highest score
label = 0
score = 0.0
for j in range(num_class):
class_score = cls_prob[j]
if class_score > score:
label = j
score = class_score
# ignore background or low score
if label == 0 or score <= self.confidence_thresh:
continue
# fprintf(stderr, "%d = %f\n", label, score)
# unscale to image size
x1 = roi[0] / scale
y1 = roi[1] / scale
x2 = roi[2] / scale
y2 = roi[3] / scale
pb_w = x2 - x1 + 1
pb_h = y2 - y1 + 1
# apply bbox regression
dx = bbox_pred[4]
dy = bbox_pred[4 + 1]
dw = bbox_pred[4 + 2]
dh = bbox_pred[4 + 3]
cx = x1 + pb_w * 0.5
cy = y1 + pb_h * 0.5
obj_cx = cx + pb_w * dx
obj_cy = cy + pb_h * dy
obj_w = pb_w * np.exp(dw)
obj_h = pb_h * np.exp(dh)
obj_x1 = obj_cx - obj_w * 0.5
obj_y1 = obj_cy - obj_h * 0.5
obj_x2 = obj_cx + obj_w * 0.5
obj_y2 = obj_cy + obj_h * 0.5
# clip
obj_x1 = np.maximum(np.minimum(obj_x1, float(img.shape[1] - 1)), 0.0)
obj_y1 = np.maximum(np.minimum(obj_y1, float(img.shape[0] - 1)), 0.0)
obj_x2 = np.maximum(np.minimum(obj_x2, float(img.shape[1] - 1)), 0.0)
obj_y2 = np.maximum(np.minimum(obj_y2, float(img.shape[0] - 1)), 0.0)
# append object
obj = Detect_Object()
obj.rect.x = obj_x1
obj.rect.y = obj_y1
obj.rect.w = obj_x2 - obj_x1 + 1
obj.rect.h = obj_y2 - obj_y1 + 1
obj.label = label
obj.prob = score
class_candidates[label].append(obj)
# post process
objects = []
for candidates in class_candidates:
if len(candidates) == 0:
continue
candidates.sort(key=lambda obj: obj.prob, reverse=True)
picked = self.nms_sorted_bboxes(candidates, self.nms_threshold)
for j in range(len(picked)):
z = picked[j]
objects.append(candidates[z])
objects.sort(key=lambda obj: obj.prob, reverse=True)
objects = objects[: self.max_per_image]
return objects
def nms_sorted_bboxes(self, objects, nms_threshold):
picked = []
n = len(objects)
areas = np.zeros((n,), dtype=np.float32)
for i in range(n):
areas[i] = objects[i].rect.area()
for i in range(n):
a = objects[i]
keep = True
for j in range(len(picked)):
b = objects[picked[j]]
# intersection over union
inter_area = a.rect.intersection_area(b.rect)
union_area = areas[i] + areas[picked[j]] - inter_area
# float IoU = inter_area / union_area
if inter_area / union_area > nms_threshold:
keep = False
if keep:
picked.append(i)
return picked | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/rfcn.py | 0.680242 | 0.242026 | rfcn.py | pypi |
import numpy as np
import ncnn
from .model_store import get_model_file
from ..utils.objects import Point, Face_Object
class RetinaFace:
def __init__(
self, prob_threshold=0.8, nms_threshold=0.4, num_threads=1, use_gpu=False
):
self.prob_threshold = prob_threshold
self.nms_threshold = nms_threshold
self.num_threads = num_threads
self.use_gpu = use_gpu
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# model is converted from
# https://github.com/deepinsight/insightface/tree/master/RetinaFace#retinaface-pretrained-models
# https://github.com/deepinsight/insightface/issues/669
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("mnet.25-opt.param"))
self.net.load_model(get_model_file("mnet.25-opt.bin"))
def __del__(self):
self.net = None
def __call__(self, img):
img_h = img.shape[0]
img_w = img.shape[1]
mat_in = ncnn.Mat.from_pixels(
img, ncnn.Mat.PixelType.PIXEL_BGR2RGB, img_w, img_h
)
ex = self.net.create_extractor()
ex.set_num_threads(self.num_threads)
ex.input("data", mat_in)
faceobjects32 = self.detect_stride32(ex)
faceobjects16 = self.detect_stride16(ex)
faceobjects8 = self.detect_stride8(ex)
faceproposals = [*faceobjects32, *faceobjects16, *faceobjects8]
# sort all proposals by score from highest to lowest
faceproposals.sort(key=lambda obj: obj.prob, reverse=True)
# apply nms with nms_threshold
picked = self.nms_sorted_bboxes(faceproposals, self.nms_threshold)
face_count = len(picked)
faceobjects = []
for i in range(face_count):
faceobjects.append(faceproposals[picked[i]])
# clip to image size
x0 = faceobjects[i].rect.x
y0 = faceobjects[i].rect.y
x1 = x0 + faceobjects[i].rect.w
y1 = y0 + faceobjects[i].rect.h
x0 = np.maximum(np.minimum(x0, float(img_w) - 1), 0.0)
y0 = np.maximum(np.minimum(y0, float(img_h) - 1), 0.0)
x1 = np.maximum(np.minimum(x1, float(img_w) - 1), 0.0)
y1 = np.maximum(np.minimum(y1, float(img_h) - 1), 0.0)
faceobjects[i].rect.x = x0
faceobjects[i].rect.y = y0
faceobjects[i].rect.w = x1 - x0
faceobjects[i].rect.h = y1 - y0
return faceobjects
def detect_stride32(self, ex):
ret1, score_blob = ex.extract("face_rpn_cls_prob_reshape_stride32")
ret2, bbox_blob = ex.extract("face_rpn_bbox_pred_stride32")
ret3, landmark_blob = ex.extract("face_rpn_landmark_pred_stride32")
base_size = 16
feat_stride = 32
ratios = ncnn.Mat(1)
ratios[0] = 1.0
scales = ncnn.Mat(2)
scales[0] = 32.0
scales[1] = 16.0
anchors = self.generate_anchors(base_size, ratios, scales)
faceobjects32 = self.generate_proposals(
anchors,
feat_stride,
score_blob,
bbox_blob,
landmark_blob,
self.prob_threshold,
)
return faceobjects32
def detect_stride16(self, ex):
ret1, score_blob = ex.extract("face_rpn_cls_prob_reshape_stride16")
ret2, bbox_blob = ex.extract("face_rpn_bbox_pred_stride16")
ret3, landmark_blob = ex.extract("face_rpn_landmark_pred_stride16")
base_size = 16
feat_stride = 16
ratios = ncnn.Mat(1)
ratios[0] = 1.0
scales = ncnn.Mat(2)
scales[0] = 8.0
scales[1] = 4.0
anchors = self.generate_anchors(base_size, ratios, scales)
faceobjects16 = self.generate_proposals(
anchors,
feat_stride,
score_blob,
bbox_blob,
landmark_blob,
self.prob_threshold,
)
return faceobjects16
def detect_stride8(self, ex):
ret1, score_blob = ex.extract("face_rpn_cls_prob_reshape_stride8")
ret2, bbox_blob = ex.extract("face_rpn_bbox_pred_stride8")
ret3, landmark_blob = ex.extract("face_rpn_landmark_pred_stride8")
base_size = 16
feat_stride = 8
ratios = ncnn.Mat(1)
ratios[0] = 1.0
scales = ncnn.Mat(2)
scales[0] = 2.0
scales[1] = 1.0
anchors = self.generate_anchors(base_size, ratios, scales)
faceobjects8 = self.generate_proposals(
anchors,
feat_stride,
score_blob,
bbox_blob,
landmark_blob,
self.prob_threshold,
)
return faceobjects8
def generate_anchors(self, base_size, ratios, scales):
num_ratio = ratios.w
num_scale = scales.w
# anchors = ncnn.Mat()
# anchors.create(w=4, h=num_ratio * num_scale)
anchors_np = np.zeros((2, 4), dtype=np.float32)
cx = base_size * 0.5
cy = base_size * 0.5
for i in range(num_ratio):
ar = ratios[i]
r_w = np.round(base_size / np.sqrt(ar))
r_h = np.round(r_w * ar) # round(base_size * np.sqrt(ar))
for j in range(num_scale):
scale = scales[j]
rs_w = r_w * scale
rs_h = r_h * scale
anchor = anchors_np[i * num_scale + j]
anchor[0] = cx - rs_w * 0.5
anchor[1] = cy - rs_h * 0.5
anchor[2] = cx + rs_w * 0.5
anchor[3] = cy + rs_h * 0.5
anchors = ncnn.Mat(anchors_np)
return anchors
def generate_proposals(
self, anchors, feat_stride, score_blob, bbox_blob, landmark_blob, prob_threshold
):
faceobjects = []
w = score_blob.w
h = score_blob.h
# generate face proposal from bbox deltas and shifted anchors
num_anchors = anchors.h
for q in range(num_anchors):
anchor = anchors.row(q)
score = score_blob.channel(q + num_anchors)
bbox = bbox_blob.channel_range(q * 4, 4)
landmark = landmark_blob.channel_range(q * 10, 10)
# shifted anchor
anchor_y = anchor[1]
anchor_w = anchor[2] - anchor[0]
anchor_h = anchor[3] - anchor[1]
for i in range(h):
anchor_x = anchor[0]
for j in range(w):
index = i * w + j
prob = score[index]
if prob >= prob_threshold:
# apply center size
dx = bbox.channel(0)[index]
dy = bbox.channel(1)[index]
dw = bbox.channel(2)[index]
dh = bbox.channel(3)[index]
cx = anchor_x + anchor_w * 0.5
cy = anchor_y + anchor_h * 0.5
pb_cx = cx + anchor_w * dx
pb_cy = cy + anchor_h * dy
pb_w = anchor_w * np.exp(dw)
pb_h = anchor_h * np.exp(dh)
x0 = pb_cx - pb_w * 0.5
y0 = pb_cy - pb_h * 0.5
x1 = pb_cx + pb_w * 0.5
y1 = pb_cy + pb_h * 0.5
obj = Face_Object()
obj.rect.x = x0
obj.rect.y = y0
obj.rect.w = x1 - x0 + 1
obj.rect.h = y1 - y0 + 1
obj.landmark = [Point(), Point(), Point(), Point(), Point()]
obj.landmark[0].x = (
cx + (anchor_w + 1) * landmark.channel(0)[index]
)
obj.landmark[0].y = (
cy + (anchor_h + 1) * landmark.channel(1)[index]
)
obj.landmark[1].x = (
cx + (anchor_w + 1) * landmark.channel(2)[index]
)
obj.landmark[1].y = (
cy + (anchor_h + 1) * landmark.channel(3)[index]
)
obj.landmark[2].x = (
cx + (anchor_w + 1) * landmark.channel(4)[index]
)
obj.landmark[2].y = (
cy + (anchor_h + 1) * landmark.channel(5)[index]
)
obj.landmark[3].x = (
cx + (anchor_w + 1) * landmark.channel(6)[index]
)
obj.landmark[3].y = (
cy + (anchor_h + 1) * landmark.channel(7)[index]
)
obj.landmark[4].x = (
cx + (anchor_w + 1) * landmark.channel(8)[index]
)
obj.landmark[4].y = (
cy + (anchor_h + 1) * landmark.channel(9)[index]
)
obj.prob = prob
faceobjects.append(obj)
anchor_x += feat_stride
anchor_y += feat_stride
return faceobjects
def nms_sorted_bboxes(self, faceobjects, nms_threshold):
picked = []
n = len(faceobjects)
areas = []
for i in range(n):
areas.append(faceobjects[i].rect.area())
for i in range(n):
a = faceobjects[i]
keep = True
for j in range(len(picked)):
b = faceobjects[picked[j]]
# intersection over union
inter_area = a.rect.intersection_area(b.rect)
union_area = areas[i] + areas[picked[j]] - inter_area
# float IoU = inter_area / union_area
if inter_area / union_area > nms_threshold:
keep = False
if keep:
picked.append(i)
return picked | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/retinaface.py | 0.765243 | 0.245334 | retinaface.py | pypi |
import ncnn
from .model_store import get_model_file
from ..utils.objects import Detect_Object
class Noop(ncnn.Layer):
pass
def Noop_layer_creator():
return Noop()
class MobileNetV2_SSDLite:
def __init__(self, target_size=300, num_threads=1, use_gpu=False):
self.target_size = target_size
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [127.5, 127.5, 127.5]
self.norm_vals = [0.007843, 0.007843, 0.007843]
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# self.net.register_custom_layer("Silence", Noop_layer_creator)
# original pretrained model from https://github.com/chuanqi305/MobileNetv2-SSDLite
# https://github.com/chuanqi305/MobileNetv2-SSDLite/blob/master/ssdlite/voc/deploy.prototxt
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("mobilenetv2_ssdlite_voc.param"))
self.net.load_model(get_model_file("mobilenetv2_ssdlite_voc.bin"))
self.class_names = [
"background",
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
def __del__(self):
self.net = None
def __call__(self, img):
img_h = img.shape[0]
img_w = img.shape[1]
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR,
img_w,
img_h,
self.target_size,
self.target_size,
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
ex = self.net.create_extractor()
ex.set_light_mode(True)
ex.set_num_threads(self.num_threads)
ex.input("data", mat_in)
ret, mat_out = ex.extract("detection_out")
objects = []
# printf("%d %d %d\n", mat_out.w, mat_out.h, mat_out.c)
# method 1, use ncnn.Mat.row to get the result, no memory copy
for i in range(mat_out.h):
values = mat_out.row(i)
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
#method 2, use ncnn.Mat->numpy.array to get the result, no memory copy too
out = np.array(mat_out)
for i in range(len(out)):
values = out[i]
obj = Detect_Object()
obj.label = values[0]
obj.prob = values[1]
obj.rect.x = values[2] * img_w
obj.rect.y = values[3] * img_h
obj.rect.w = values[4] * img_w - obj.rect.x
obj.rect.h = values[5] * img_h - obj.rect.y
objects.append(obj)
"""
return objects | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/mobilenetv2ssdlite.py | 0.625438 | 0.280296 | mobilenetv2ssdlite.py | pypi |
import ncnn
from .model_store import get_model_file
from ..utils.objects import KeyPoint
class SimplePose:
def __init__(
self, target_width=192, target_height=256, num_threads=1, use_gpu=False
):
self.target_width = target_width
self.target_height = target_height
self.num_threads = num_threads
self.use_gpu = use_gpu
self.mean_vals = [0.485 * 255.0, 0.456 * 255.0, 0.406 * 255.0]
self.norm_vals = [1 / 0.229 / 255.0, 1 / 0.224 / 255.0, 1 / 0.225 / 255.0]
self.net = ncnn.Net()
self.net.opt.use_vulkan_compute = self.use_gpu
# the simple baseline human pose estimation from gluon-cv
# https://gluon-cv.mxnet.io/build/examples_pose/demo_simple_pose.html
# mxnet model exported via
# pose_net.hybridize()
# pose_net.export('pose')
# then mxnet2ncnn
# the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
self.net.load_param(get_model_file("pose.param"))
self.net.load_model(get_model_file("pose.bin"))
def __del__(self):
self.net = None
def __call__(self, img):
h = img.shape[0]
w = img.shape[1]
mat_in = ncnn.Mat.from_pixels_resize(
img,
ncnn.Mat.PixelType.PIXEL_BGR2RGB,
img.shape[1],
img.shape[0],
self.target_width,
self.target_height,
)
mat_in.substract_mean_normalize(self.mean_vals, self.norm_vals)
ex = self.net.create_extractor()
ex.set_num_threads(self.num_threads)
ex.input("data", mat_in)
ret, mat_out = ex.extract("conv3_fwd")
keypoints = []
for p in range(mat_out.c):
m = mat_out.channel(p)
max_prob = 0.0
max_x = 0
max_y = 0
for y in range(mat_out.h):
ptr = m.row(y)
for x in range(mat_out.w):
prob = ptr[x]
if prob > max_prob:
max_prob = prob
max_x = x
max_y = y
keypoint = KeyPoint()
keypoint.p.x = max_x * w / float(mat_out.w)
keypoint.p.y = max_y * h / float(mat_out.h)
keypoint.prob = max_prob
keypoints.append(keypoint)
return keypoints | /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/python/ncnn/model_zoo/simplepose.py | 0.632503 | 0.15704 | simplepose.py | pypi |
# PNNX
PyTorch Neural Network eXchange(PNNX) is an open standard for PyTorch model interoperability. PNNX provides an open model format for PyTorch. It defines computation graph as well as high level operators strictly matches PyTorch.
# Rationale
PyTorch is currently one of the most popular machine learning frameworks. We need to deploy the trained AI model to various hardware and environments more conveniently and easily.
Before PNNX, we had the following methods:
1. export to ONNX, and deploy with ONNX-runtime
2. export to ONNX, and convert onnx to inference-framework specific format, and deploy with TensorRT/OpenVINO/ncnn/etc.
3. export to TorchScript, and deploy with libtorch
As far as we know, ONNX has the ability to express the PyTorch model and it is an open standard. People usually use ONNX as an intermediate representation between PyTorch and the inference platform. However, ONNX still has the following fatal problems, which makes the birth of PNNX necessary:
1. ONNX does not have a human-readable and editable file representation, making it difficult for users to easily modify the computation graph or add custom operators.
2. The operator definition of ONNX is not completely in accordance with PyTorch. When exporting some PyTorch operators, glue operators are often added passively by ONNX, which makes the computation graph inconsistent with PyTorch and may impact the inference efficiency.
3. There are a large number of additional parameters designed to be compatible with various ML frameworks in the operator definition in ONNX. These parameters increase the burden of inference implementation on hardware and software.
PNNX tries to define a set of operators and a simple and easy-to-use format that are completely contrasted with the python api of PyTorch, so that the conversion and interoperability of PyTorch models are more convenient.
# Features
1. [Human readable and editable format](#the-pnnxparam-format)
2. [Plain model binary in storage zip](#the-pnnxbin-format)
3. [One-to-one mapping of PNNX operators and PyTorch python api](#pnnx-operator)
4. [Preserve math expression as one operator](#pnnx-expression-operator)
5. [Preserve torch function as one operator](#pnnx-torch-function-operator)
6. [Preserve miscellaneous module as one operator](#pnnx-module-operator)
7. [Inference via exported PyTorch python code](#pnnx-python-inference)
8. [Tensor shape propagation](#pnnx-shape-propagation)
9. [Model optimization](#pnnx-model-optimization)
10. [Custom operator support](#pnnx-custom-operator)
# Build TorchScript to PNNX converter
1. Install PyTorch and TorchVision c++ library
2. Build PNNX with cmake
# Usage
1. Export your model to TorchScript
```python
import torch
import torchvision.models as models
net = models.resnet18(pretrained=True)
net = net.eval()
x = torch.rand(1, 3, 224, 224)
# You could try disabling checking when tracing raises error
# mod = torch.jit.trace(net, x, check_trace=False)
mod = torch.jit.trace(net, x)
mod.save("resnet18.pt")
```
2. Convert TorchScript to PNNX
```shell
pnnx resnet18.pt inputshape=[1,3,224,224]
```
Normally, you will get six files
```resnet18.pnnx.param``` PNNX graph definition
```resnet18.pnnx.bin``` PNNX model weight
```resnet18_pnnx.py``` PyTorch script for inference, the python code for model construction and weight initialization
```resnet18.ncnn.param``` ncnn graph definition
```resnet18.ncnn.bin``` ncnn model weight
```resnet18_ncnn.py``` pyncnn script for inference
3. Visualize PNNX with Netron
Open https://netron.app/ in browser, and drag resnet18.pnnx.param into it.
4. PNNX command line options
```
Usage: pnnx [model.pt] [(key=value)...]
pnnxparam=model.pnnx.param
pnnxbin=model.pnnx.bin
pnnxpy=model_pnnx.py
ncnnparam=model.ncnn.param
ncnnbin=model.ncnn.bin
ncnnpy=model_ncnn.py
optlevel=2
device=cpu/gpu
inputshape=[1,3,224,224],...
inputshape2=[1,3,320,320],...
customop=/home/nihui/.cache/torch_extensions/fused/fused.so,...
moduleop=models.common.Focus,models.yolo.Detect,...
Sample usage: pnnx mobilenet_v2.pt inputshape=[1,3,224,224]
pnnx yolov5s.pt inputshape=[1,3,640,640] inputshape2=[1,3,320,320] device=gpu moduleop=models.common.Focus,models.yolo.Detect
```
Parameters:
`pnnxparam` (default="*.pnnx.param", * is the model name): PNNX graph definition file
`pnnxbin` (default="*.pnnx.bin"): PNNX model weight
`pnnxpy` (default="*_pnnx.py"): PyTorch script for inference, including model construction and weight initialization code
`ncnnparam` (default="*.ncnn.param"): ncnn graph definition
`ncnnbin` (default="*.ncnn.bin"): ncnn model weight
`ncnnpy` (default="*_ncnn.py"): pyncnn script for inference
`optlevel` (default=2): graph optimization level
| Option | Optimization level |
|--------|---------------------------------|
| 0 | do not apply optimization |
| 1 | optimization for inference |
| 2 | optimization more for inference |
`device` (default="cpu"): device type for the input in TorchScript model, cpu or gpu
`inputshape` (Optional): shapes of model inputs. It is used to resolve tensor shapes in model graph. for example, `[1,3,224,224]` for the model with only 1 input, `[1,3,224,224],[1,3,224,224]` for the model that have 2 inputs.
`inputshape2` (Optional): shapes of alternative model inputs, the format is identical to `inputshape`. Usually, it is used with `inputshape` to resolve dynamic shape (-1) in model graph.
`customop` (Optional): list of Torch extensions (dynamic library) for custom operators, separated by ",". For example, `/home/nihui/.cache/torch_extensions/fused/fused.so,...`
`moduleop` (Optional): list of modules to keep as one big operator, separated by ",". for example, `models.common.Focus,models.yolo.Detect`
# The pnnx.param format
### example
```
7767517
4 3
pnnx.Input input 0 1 0
nn.Conv2d conv_0 1 1 0 1 bias=1 dilation=(1,1) groups=1 in_channels=12 kernel_size=(3,3) out_channels=16 padding=(0,0) stride=(1,1) @bias=(16)f32 @weight=(16,12,3,3)f32
nn.Conv2d conv_1 1 1 1 2 bias=1 dilation=(1,1) groups=1 in_channels=16 kernel_size=(2,2) out_channels=20 padding=(2,2) stride=(2,2) @bias=(20)f32 @weight=(20,16,2,2)f32
pnnx.Output output 1 0 2
```
### overview
```
[magic]
```
* magic number : 7767517
```
[operator count] [operand count]
```
* operator count : count of the operator line follows
* operand count : count of all operands
### operator line
```
[type] [name] [input count] [output count] [input operands] [output operands] [operator params]
```
* type : type name, such as Conv2d ReLU etc
* name : name of this operator
* input count : count of the operands this operator needs as input
* output count : count of the operands this operator produces as output
* input operands : name list of all the input blob names, separated by space
* output operands : name list of all the output blob names, separated by space
* operator params : key=value pair list, separated by space, operator weights are prefixed by ```@``` symbol, tensor shapes are prefixed by ```#``` symbol, input parameter keys are prefixed by ```$```
# The pnnx.bin format
pnnx.bin file is a zip file with store-only mode(no compression)
weight binary file has its name composed by operator name and weight name
For example, ```nn.Conv2d conv_0 1 1 0 1 bias=1 dilation=(1,1) groups=1 in_channels=12 kernel_size=(3,3) out_channels=16 padding=(0,0) stride=(1,1) @bias=(16) @weight=(16,12,3,3)``` would pull conv_0.weight and conv_0.bias into pnnx.bin zip archive.
weight binaries can be listed or modified with any archive application eg. 7zip

# PNNX operator
PNNX always preserve operators from what PyTorch python api provides.
Here is the netron visualization comparision among ONNX, TorchScript and PNNX with the original PyTorch python code shown.
```python
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.attention = nn.MultiheadAttention(embed_dim=256, num_heads=32)
def forward(self, x):
x, _ = self.attention(x, x, x)
return x
```
|ONNX|TorchScript|PNNX|
|----|---|---|
||||
# PNNX expression operator
PNNX trys to preserve expression from what PyTorch python code writes.
Here is the netron visualization comparision among ONNX, TorchScript and PNNX with the original PyTorch python code shown.
```python
import torch
def foo(x, y):
return torch.sqrt((2 * x + y) / 12)
```
|ONNX|TorchScript|PNNX|
|---|---|---|
||||
# PNNX torch function operator
PNNX trys to preserve torch functions and Tensor member functions as one operator from what PyTorch python api provides.
Here is the netron visualization comparision among ONNX, TorchScript and PNNX with the original PyTorch python code shown.
```python
import torch
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
x = F.normalize(x, eps=1e-3)
return x
```
|ONNX|TorchScript|PNNX|
|---|---|---|
||||
# PNNX module operator
Users could ask PNNX to keep module as one big operator when it has complex logic.
The process is optional and could be enabled via moduleop command line option.
After pass_level0, all modules will be presented in terminal output, then you can pick the intersting ones as module operators.
```
############# pass_level0
inline module = models.common.Bottleneck
inline module = models.common.C3
inline module = models.common.Concat
inline module = models.common.Conv
inline module = models.common.Focus
inline module = models.common.SPP
inline module = models.yolo.Detect
inline module = utils.activations.SiLU
```
```bash
pnnx yolov5s.pt inputshape=[1,3,640,640] moduleop=models.common.Focus,models.yolo.Detect
```
Here is the netron visualization comparision among ONNX, TorchScript and PNNX with the original PyTorch python code shown.
```python
import torch
import torch.nn as nn
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super().__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
```
|ONNX|TorchScript|PNNX|PNNX with module operator|
|---|---|---|---|
|||||
# PNNX python inference
A python script will be generated by default when converting torchscript to pnnx.
This script is the python code representation of PNNX and can be used for model inference.
There are some utility functions for loading weight binary from pnnx.bin.
You can even export the model torchscript AGAIN from this generated code!
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear_0 = nn.Linear(in_features=128, out_features=256, bias=True)
self.linear_1 = nn.Linear(in_features=256, out_features=4, bias=True)
def forward(self, x):
x = self.linear_0(x)
x = F.leaky_relu(x, 0.15)
x = self.linear_1(x)
return x
```
```python
import os
import numpy as np
import tempfile, zipfile
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear_0 = nn.Linear(bias=True, in_features=128, out_features=256)
self.linear_1 = nn.Linear(bias=True, in_features=256, out_features=4)
archive = zipfile.ZipFile('../../function.pnnx.bin', 'r')
self.linear_0.bias = self.load_pnnx_bin_as_parameter(archive, 'linear_0.bias', (256), 'float32')
self.linear_0.weight = self.load_pnnx_bin_as_parameter(archive, 'linear_0.weight', (256,128), 'float32')
self.linear_1.bias = self.load_pnnx_bin_as_parameter(archive, 'linear_1.bias', (4), 'float32')
self.linear_1.weight = self.load_pnnx_bin_as_parameter(archive, 'linear_1.weight', (4,256), 'float32')
archive.close()
def load_pnnx_bin_as_parameter(self, archive, key, shape, dtype):
return nn.Parameter(self.load_pnnx_bin_as_tensor(archive, key, shape, dtype))
def load_pnnx_bin_as_tensor(self, archive, key, shape, dtype):
_, tmppath = tempfile.mkstemp()
tmpf = open(tmppath, 'wb')
with archive.open(key) as keyfile:
tmpf.write(keyfile.read())
tmpf.close()
m = np.memmap(tmppath, dtype=dtype, mode='r', shape=shape).copy()
os.remove(tmppath)
return torch.from_numpy(m)
def forward(self, v_x_1):
v_7 = self.linear_0(v_x_1)
v_input_1 = F.leaky_relu(input=v_7, negative_slope=0.150000)
v_12 = self.linear_1(v_input_1)
return v_12
```
# PNNX shape propagation
Users could ask PNNX to resolve all tensor shapes in model graph and constify some common expressions involved when tensor shapes are known.
The process is optional and could be enabled via inputshape command line option.
```bash
pnnx shufflenet_v2_x1_0.pt inputshape=[1,3,224,224]
```
```python
def channel_shuffle(x: Tensor, groups: int) -> Tensor:
batchsize, num_channels, height, width = x.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
```
|without shape propagation|with shape propagation|
|---|---|
|||
# PNNX model optimization
|ONNX|TorchScript|PNNX without optimization|PNNX with optimization|
|---|---|---|---|
|||||
# PNNX custom operator
```python
import os
import torch
from torch.autograd import Function
from torch.utils.cpp_extension import load, _import_module_from_library
module_path = os.path.dirname(__file__)
upfirdn2d_op = load(
'upfirdn2d',
sources=[
os.path.join(module_path, 'upfirdn2d.cpp'),
os.path.join(module_path, 'upfirdn2d_kernel.cu'),
],
is_python_module=False
)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
pad_x0 = pad[0]
pad_x1 = pad[1]
pad_y0 = pad[0]
pad_y1 = pad[1]
kernel_h, kernel_w = kernel.shape
batch, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
out_h = (in_h * up + pad_y0 + pad_y1 - kernel_h) // down + 1
out_w = (in_w * up + pad_x0 + pad_x1 - kernel_w) // down + 1
out = torch.ops.upfirdn2d_op.upfirdn2d(input, kernel, up, up, down, down, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
```
```cpp
#include <torch/extension.h>
torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel,
int64_t up_x, int64_t up_y, int64_t down_x, int64_t down_y,
int64_t pad_x0, int64_t pad_x1, int64_t pad_y0, int64_t pad_y1) {
// operator body
}
TORCH_LIBRARY(upfirdn2d_op, m) {
m.def("upfirdn2d", upfirdn2d);
}
```
<img src="https://raw.githubusercontent.com/nihui/ncnn-assets/master/pnnx/customop.pnnx.png" width="400" />
# Supported PyTorch operator status
| torch.nn | Is Supported | Export to ncnn |
|---------------------------|----|---|
|nn.AdaptiveAvgPool1d | :heavy_check_mark: | :heavy_check_mark: |
|nn.AdaptiveAvgPool2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.AdaptiveAvgPool3d | :heavy_check_mark: | :heavy_check_mark: |
|nn.AdaptiveMaxPool1d | :heavy_check_mark: | :heavy_check_mark: |
|nn.AdaptiveMaxPool2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.AdaptiveMaxPool3d | :heavy_check_mark: | :heavy_check_mark: |
|nn.AlphaDropout | :heavy_check_mark: | :heavy_check_mark: |
|nn.AvgPool1d | :heavy_check_mark: | :heavy_check_mark:* |
|nn.AvgPool2d | :heavy_check_mark: | :heavy_check_mark:* |
|nn.AvgPool3d | :heavy_check_mark: | :heavy_check_mark:* |
|nn.BatchNorm1d | :heavy_check_mark: | :heavy_check_mark: |
|nn.BatchNorm2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.BatchNorm3d | :heavy_check_mark: | :heavy_check_mark: |
|nn.Bilinear | |
|nn.CELU | :heavy_check_mark: |
|nn.ChannelShuffle | :heavy_check_mark: | :heavy_check_mark: |
|nn.ConstantPad1d | :heavy_check_mark: | :heavy_check_mark: |
|nn.ConstantPad2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.ConstantPad3d | :heavy_check_mark: | :heavy_check_mark: |
|nn.Conv1d | :heavy_check_mark: | :heavy_check_mark: |
|nn.Conv2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.Conv3d | :heavy_check_mark: | :heavy_check_mark: |
|nn.ConvTranspose1d | :heavy_check_mark: | :heavy_check_mark: |
|nn.ConvTranspose2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.ConvTranspose3d | :heavy_check_mark: | :heavy_check_mark: |
|nn.CosineSimilarity | |
|nn.Dropout | :heavy_check_mark: | :heavy_check_mark: |
|nn.Dropout2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.Dropout3d | :heavy_check_mark: | :heavy_check_mark: |
|nn.ELU | :heavy_check_mark: | :heavy_check_mark: |
|nn.Embedding | :heavy_check_mark: | :heavy_check_mark: |
|nn.EmbeddingBag | |
|nn.Flatten | :heavy_check_mark: |
|nn.Fold | |
|nn.FractionalMaxPool2d | |
|nn.FractionalMaxPool3d | |
|nn.GELU | :heavy_check_mark: | :heavy_check_mark: |
|nn.GroupNorm | :heavy_check_mark: | :heavy_check_mark: |
|nn.GRU | :heavy_check_mark: | :heavy_check_mark: |
|nn.GRUCell | |
|nn.Hardshrink | :heavy_check_mark: |
|nn.Hardsigmoid | :heavy_check_mark: | :heavy_check_mark: |
|nn.Hardswish | :heavy_check_mark: | :heavy_check_mark: |
|nn.Hardtanh | :heavy_check_mark: | :heavy_check_mark: |
|nn.Identity | |
|nn.InstanceNorm1d | :heavy_check_mark: |
|nn.InstanceNorm2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.InstanceNorm3d | :heavy_check_mark: |
|nn.LayerNorm | :heavy_check_mark: | :heavy_check_mark: |
|nn.LazyBatchNorm1d | |
|nn.LazyBatchNorm2d | |
|nn.LazyBatchNorm3d | |
|nn.LazyConv1d | |
|nn.LazyConv2d | |
|nn.LazyConv3d | |
|nn.LazyConvTranspose1d | |
|nn.LazyConvTranspose2d | |
|nn.LazyConvTranspose3d | |
|nn.LazyLinear | |
|nn.LeakyReLU | :heavy_check_mark: | :heavy_check_mark: |
|nn.Linear | :heavy_check_mark: | :heavy_check_mark: |
|nn.LocalResponseNorm | :heavy_check_mark: | :heavy_check_mark: |
|nn.LogSigmoid | :heavy_check_mark: |
|nn.LogSoftmax | :heavy_check_mark: |
|nn.LPPool1d | :heavy_check_mark: |
|nn.LPPool2d | :heavy_check_mark: |
|nn.LSTM | :heavy_check_mark: | :heavy_check_mark: |
|nn.LSTMCell | |
|nn.MaxPool1d | :heavy_check_mark: | :heavy_check_mark: |
|nn.MaxPool2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.MaxPool3d | :heavy_check_mark: | :heavy_check_mark: |
|nn.MaxUnpool1d | |
|nn.MaxUnpool2d | |
|nn.MaxUnpool3d | |
|nn.Mish | :heavy_check_mark: | :heavy_check_mark: |
|nn.MultiheadAttention | :heavy_check_mark: | :heavy_check_mark:* |
|nn.PairwiseDistance | |
|nn.PixelShuffle | :heavy_check_mark: | :heavy_check_mark: |
|nn.PixelUnshuffle | :heavy_check_mark: | :heavy_check_mark: |
|nn.PReLU | :heavy_check_mark: | :heavy_check_mark: |
|nn.ReflectionPad1d | :heavy_check_mark: | :heavy_check_mark: |
|nn.ReflectionPad2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.ReLU | :heavy_check_mark: | :heavy_check_mark: |
|nn.ReLU6 | :heavy_check_mark: | :heavy_check_mark: |
|nn.ReplicationPad1d | :heavy_check_mark: | :heavy_check_mark: |
|nn.ReplicationPad2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.ReplicationPad3d | :heavy_check_mark: |
|nn.RNN | :heavy_check_mark: | :heavy_check_mark:* |
|nn.RNNBase | |
|nn.RNNCell | |
|nn.RReLU | :heavy_check_mark: |
|nn.SELU | :heavy_check_mark: | :heavy_check_mark: |
|nn.Sigmoid | :heavy_check_mark: | :heavy_check_mark: |
|nn.SiLU | :heavy_check_mark: | :heavy_check_mark: |
|nn.Softmax | :heavy_check_mark: | :heavy_check_mark: |
|nn.Softmax2d | |
|nn.Softmin | :heavy_check_mark: |
|nn.Softplus | :heavy_check_mark: |
|nn.Softshrink | :heavy_check_mark: |
|nn.Softsign | :heavy_check_mark: |
|nn.SyncBatchNorm | |
|nn.Tanh | :heavy_check_mark: | :heavy_check_mark: |
|nn.Tanhshrink | :heavy_check_mark: |
|nn.Threshold | :heavy_check_mark: |
|nn.Transformer | |
|nn.TransformerDecoder | |
|nn.TransformerDecoderLayer | |
|nn.TransformerEncoder | |
|nn.TransformerEncoderLayer | |
|nn.Unflatten | |
|nn.Unfold | |
|nn.Upsample | :heavy_check_mark: | :heavy_check_mark: |
|nn.UpsamplingBilinear2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.UpsamplingNearest2d | :heavy_check_mark: | :heavy_check_mark: |
|nn.ZeroPad2d | :heavy_check_mark: | :heavy_check_mark: |
| torch.nn.functional | Is Supported | Export to ncnn |
|---------------------------|----|----|
|F.adaptive_avg_pool1d | :heavy_check_mark: | :heavy_check_mark: |
|F.adaptive_avg_pool2d | :heavy_check_mark: | :heavy_check_mark: |
|F.adaptive_avg_pool3d | :heavy_check_mark: | :heavy_check_mark: |
|F.adaptive_max_pool1d | :heavy_check_mark: | :heavy_check_mark: |
|F.adaptive_max_pool2d | :heavy_check_mark: | :heavy_check_mark: |
|F.adaptive_max_pool3d | :heavy_check_mark: | :heavy_check_mark: |
|F.affine_grid | :heavy_check_mark: |
|F.alpha_dropout | :heavy_check_mark: | :heavy_check_mark: |
|F.avg_pool1d | :heavy_check_mark: | :heavy_check_mark:* |
|F.avg_pool2d | :heavy_check_mark: | :heavy_check_mark:* |
|F.avg_pool3d | :heavy_check_mark: | :heavy_check_mark:* |
|F.batch_norm | :heavy_check_mark: | :heavy_check_mark: |
|F.bilinear | |
|F.celu | :heavy_check_mark: |
|F.conv1d | :heavy_check_mark: | :heavy_check_mark: |
|F.conv2d | :heavy_check_mark: | :heavy_check_mark: |
|F.conv3d | :heavy_check_mark: | :heavy_check_mark: |
|F.conv_transpose1d | :heavy_check_mark: | :heavy_check_mark: |
|F.conv_transpose2d | :heavy_check_mark: | :heavy_check_mark: |
|F.conv_transpose3d | :heavy_check_mark: | :heavy_check_mark: |
|F.cosine_similarity | |
|F.dropout | :heavy_check_mark: | :heavy_check_mark: |
|F.dropout2d | :heavy_check_mark: | :heavy_check_mark: |
|F.dropout3d | :heavy_check_mark: | :heavy_check_mark: |
|F.elu | :heavy_check_mark: | :heavy_check_mark: |
|F.elu_ | :heavy_check_mark: | :heavy_check_mark: |
|F.embedding | :heavy_check_mark: | :heavy_check_mark: |
|F.embedding_bag | |
|F.feature_alpha_dropout | :heavy_check_mark: | :heavy_check_mark: |
|F.fold | |
|F.fractional_max_pool2d | |
|F.fractional_max_pool3d | |
|F.gelu | :heavy_check_mark: | :heavy_check_mark: |
|F.glu | |
|F.grid_sample | :heavy_check_mark: |
|F.group_norm | :heavy_check_mark: | :heavy_check_mark: |
|F.gumbel_softmax | |
|F.hardshrink | :heavy_check_mark: |
|F.hardsigmoid | :heavy_check_mark: | :heavy_check_mark: |
|F.hardswish | :heavy_check_mark: | :heavy_check_mark: |
|F.hardtanh | :heavy_check_mark: | :heavy_check_mark: |
|F.hardtanh_ | :heavy_check_mark: | :heavy_check_mark: |
|F.instance_norm | :heavy_check_mark: | :heavy_check_mark: |
|F.interpolate | :heavy_check_mark: | :heavy_check_mark: |
|F.layer_norm | :heavy_check_mark: | :heavy_check_mark: |
|F.leaky_relu | :heavy_check_mark: | :heavy_check_mark: |
|F.leaky_relu_ | :heavy_check_mark: | :heavy_check_mark: |
|F.linear | :heavy_check_mark: | :heavy_check_mark:* |
|F.local_response_norm | :heavy_check_mark: | :heavy_check_mark: |
|F.logsigmoid | :heavy_check_mark: |
|F.log_softmax | :heavy_check_mark: |
|F.lp_pool1d | :heavy_check_mark: |
|F.lp_pool2d | :heavy_check_mark: |
|F.max_pool1d | :heavy_check_mark: | :heavy_check_mark: |
|F.max_pool2d | :heavy_check_mark: | :heavy_check_mark: |
|F.max_pool3d | :heavy_check_mark: | :heavy_check_mark: |
|F.max_unpool1d | |
|F.max_unpool2d | |
|F.max_unpool3d | |
|F.mish | :heavy_check_mark: | :heavy_check_mark: |
|F.normalize | :heavy_check_mark: | :heavy_check_mark: |
|F.one_hot | |
|F.pad | :heavy_check_mark: | :heavy_check_mark: |
|F.pairwise_distance | |
|F.pdist | |
|F.pixel_shuffle | :heavy_check_mark: | :heavy_check_mark: |
|F.pixel_unshuffle | :heavy_check_mark: | :heavy_check_mark: |
|F.prelu | :heavy_check_mark: | :heavy_check_mark: |
|F.relu | :heavy_check_mark: | :heavy_check_mark: |
|F.relu_ | :heavy_check_mark: | :heavy_check_mark: |
|F.relu6 | :heavy_check_mark: | :heavy_check_mark: |
|F.rrelu | :heavy_check_mark: |
|F.rrelu_ | :heavy_check_mark: |
|F.selu | :heavy_check_mark: | :heavy_check_mark: |
|F.sigmoid | :heavy_check_mark: | :heavy_check_mark: |
|F.silu | :heavy_check_mark: | :heavy_check_mark: |
|F.softmax | :heavy_check_mark: | :heavy_check_mark: |
|F.softmin | :heavy_check_mark: |
|F.softplus | :heavy_check_mark: |
|F.softshrink | :heavy_check_mark: |
|F.softsign | :heavy_check_mark: |
|F.tanh | :heavy_check_mark: | :heavy_check_mark: |
|F.tanhshrink | :heavy_check_mark: |
|F.threshold | :heavy_check_mark: |
|F.threshold_ | :heavy_check_mark: |
|F.unfold | |
|F.upsample | :heavy_check_mark: | :heavy_check_mark: |
|F.upsample_bilinear | :heavy_check_mark: | :heavy_check_mark: |
|F.upsample_nearest | :heavy_check_mark: | :heavy_check_mark: |
| /rife-ncnn-vulkan-python-1.2.1.tar.gz/rife-ncnn-vulkan-python-1.2.1/rife_ncnn_vulkan_python/rife-ncnn-vulkan/src/ncnn/tools/pnnx/README.md | 0.809464 | 0.983769 | README.md | pypi |
import base64
from random import randbytes
from time import time
from rift.fift.fift import Fift
from rift.fift.types.cell import Cell
from rift.fift.types.factory import Factory
from rift.fift.types.null import Null
from rift.fift.types.tuple import Tuple
from rift.fift.types.util import create_entry
from rift.fift.utils import calc_method_id
from rift.types.addr import MsgAddress
b64 = str
class C7Register:
unixtime: int
balance: int
myself: MsgAddress
randSeed: int
actions: int
messages_sent: int
block_lt: int
trans_lt: int
global_config: Cell
def __init__(self) -> None:
self.unixtime = int(time())
self.balance = 1000
self.myself = MsgAddress.std(0, 1)
self.rand_seed = int.from_bytes(randbytes(32), byteorder="big")
self.actions = 0
self.messages_sent = 0
self.block_lt = self.unixtime
self.trans_lt = self.unixtime
self.global_config = Cell()
def as_tuple(self) -> Tuple:
t = Tuple()
balance = Tuple()
balance.append(self.balance)
balance.append(Null())
t.append(
0x076EF1EA,
self.actions,
self.messages_sent,
self.unixtime,
self.block_lt,
self.trans_lt,
self.rand_seed,
balance,
self.myself,
self.global_config,
)
return t
class TVMConfig:
debug: bool = True
code: b64 = ""
data: b64 = ""
selector: int = 0
stack: list | None = None
c7: C7Register
def __init__(self, c7=None) -> None:
if c7 is None:
c7 = C7Register()
self.c7 = c7
def __entry__(self) -> dict:
t = Tuple()
t.append(self.c7.as_tuple())
return {
"debug": self.debug,
"code": self.code,
"data": self.data,
"function_selector": self.selector,
"init_stack": [create_entry(i) for i in self.stack],
"c7_register": t.__stack_entry__(),
}
class TVMResult:
data: Cell
actions: Cell
logs: str
gas: int
stack: list
class TVMError:
exit_code: int
logs: str
class ExecutionError:
error: str
class TVM:
@classmethod
def exec(cls, config: TVMConfig):
result = Fift.tvm(config.__entry__())
if result["ok"]:
res = TVMResult()
res.data = Factory.load("cell", result["data_cell"])
res.actions = Factory.load("cell", result["action_list_cell"])
res.logs = base64.b64decode(result["logs"]).decode("utf-8")
res.gas = result["gas_consumed"]
res.stack = [
Factory.load(r["type"], r.get("value", None))
for r in result["stack"]
]
return res
else:
if "exit_code" in result:
# This is a TVM Error
error = TVMError()
error.exit_code = result["exit_code"]
error.logs = base64.b64decode(result["logs"]).decode("utf-8")
return error
elif "error" in result:
# This is an execution error
error = ExecutionError()
error.error = result["error"]
return error
else:
# Unknown error
raise RuntimeError("Unexpected result: ", result)
@classmethod
def get_method(cls, code: str, data: str, method: str | int, *stack):
config = TVMConfig()
config.code = code
config.data = data
if isinstance(method, str):
method = calc_method_id(method)
config.selector = method
config.stack = list(stack)
return cls.exec(config) | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/fift/tvm.py | 0.430985 | 0.252782 | tvm.py | pypi |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from rift.fift.types.cell import Cell
from rift.fift.types.tuple import Tuple
from rift.fift.types._fift_base import _FiftBaseType
from rift.fift.types.factory import Factory
from rift.util import type_id
class Slice(_FiftBaseType):
__type_id__ = type_id("Slice")
def __init__(self, __value__: str = None, __factory__: bool = False):
if not __factory__ and __value__:
self.__load_data__(__value__)
@classmethod
def __type__(cls) -> str:
return "cell_slice"
def coin_(self) -> int:
r: int
s: Slice
r, s = self.cmd("Gram@+", self)
self.value = s.value
return r
def coin(self) -> int:
r: int = self.cmd("Gram@", self)[0]
return r
def uint_(self, bits: int) -> int:
r: int
s: Slice
r, s = self.cmd("u@+", self, bits)
self.value = s.value
return r
def uint(self, bits: int) -> int:
r: int = self.cmd("u@", self, bits)[0]
return r
def sint_(self, bits: int) -> int:
r: int
s: Slice
r, s = self.cmd("i@+", self, bits)
self.value = s.value
return r
def sint(self, bits: int) -> int:
r: int = self.cmd("i@", self, bits)[0]
return r
def hash(self) -> int:
from rift.fift.types.bytes import Bytes
r: Bytes = self.cmd("shash", self)[0]
return r.read_int(256)
def string_hash(self) -> int:
pass
def check_signature(self, signature: "Slice", public_key: int) -> int:
pass
def compute_data_size(self, max_cells: int) -> tuple[int, int, int]:
pass
def end_parse(self) -> None:
pass
def ref_(self) -> "Cell":
r: "Cell"
s: Slice
s, r = self.cmd("ref@+", self)
self.value = s.value
return r
def ref(self) -> "Cell":
r: "Cell" = self.cmd("ref@", self)[0]
return r
def bits_(self, len_: int) -> "Slice":
pass
def bits(self, len_: int) -> "Slice":
pass
def skip_n(self, len_: int) -> None:
pass
def skip_n_(self, len_: int) -> None:
pass
def first_bits(self, len_: int) -> "Slice":
pass
def skip_last_n(self, len_: int) -> None:
pass
def skip_last_n_(self, len_: int) -> None:
pass
def slice_last(self, len_: int) -> "Slice":
pass
def ldict_(self) -> "Cell":
x = self.uint_(1)
if x == 1:
return self.ref_()
return None
def ldict(self) -> "Cell":
return self.ref()
def skip_dict(self) -> None:
pass
def maybe_ref_(self) -> "Cell":
pass
def maybe_ref(self) -> "Cell":
pass
def refs_n(self) -> int:
return self.cmd("srefs", self)[0]
def bits_n(self) -> int:
return self.cmd("sbits", self)[0]
def bits_refs_n(self) -> tuple[int, int]:
pass
def is_empty(self) -> int:
pass
def is_data_empty(self) -> int:
pass
def are_refs_empty(self) -> int:
pass
def depth(self) -> int:
pass
def addr_(self) -> "Slice":
x = self.uint_(2)
if x == 0:
return None
if x == 2:
# TODO: return proper address object
w = self.uint_(9)
a = self.uint_(256)
return None
return None
def parse_addr(self) -> "Tuple":
pass
def parse_std_addr(self) -> tuple[int, int]:
pass
def parse_var_addr(self) -> tuple[int, "Slice"]:
pass
def is_equal(self, b: "Slice") -> int:
pass
Factory.register(Slice.__type__(), Slice) | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/fift/types/slice.py | 0.68784 | 0.348922 | slice.py | pypi |
import base64
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from rift.fift.types.slice import Slice
from rift.fift.types.builder import Builder
from rift.fift.types.int import Int
from rift.fift.types.bytes import Bytes
from rift.fift.types._fift_base import _FiftBaseType
from rift.fift.types.factory import Factory
from rift.network.v2_network import Network
from rift.util import type_id
class Cell(_FiftBaseType):
__type_id__ = type_id("Cell")
def __init__(self, __factory__: bool = False, __value__: str = None):
if not __factory__:
c: Cell = self.cmd("<b b>")[0]
self.value = c.value
if __value__ is not None:
self.__load_data__(__value__)
@classmethod
def __type__(cls) -> str:
return "cell"
def parse(self) -> "Slice":
return self.cmd("<s", self)[0]
def hash(self) -> "Int":
return self.cmd("hashu", self)[0]
def hashB(self) -> "Bytes":
return self.cmd("hashB", self)[0]
@classmethod
def __serialize__(
cls,
to: "Builder",
value: "_FiftBaseType",
) -> "Builder":
if value is None:
return to
s = value.parse()
return to.slice(s)
@classmethod
def __deserialize__(
cls,
from_: "Slice",
inplace: bool = True,
**kwargs,
):
return from_
def __eq__(self, __o: "Cell") -> bool:
return __o.value == self.value
def __bytes__(self) -> bytes:
return base64.b64decode(self.value)
def as_ref(self):
from rift.types.ref import Ref
return Ref[Cell](self)
def send(self, testnet=True):
n = Network(testnet=testnet)
data = bytes(self)
return n.send_boc(data)
@classmethod
def load_from(cls, file: str) -> "Cell":
with open(file, "rb") as f:
buf = f.read()
data = base64.b64encode(buf).decode("utf-8")
return Cell(__factory__=False, __value__=data)
Factory.register(Cell.__type__(), Cell) | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/fift/types/cell.py | 0.72331 | 0.24243 | cell.py | pypi |
from typing import TYPE_CHECKING, Union
from rift.fift.types.dict import Dict
if TYPE_CHECKING:
from rift.fift.types.slice import Slice
from rift.fift.types.builder import Builder
from rift.fift.types.int import Int
class GDict(Dict):
def __init__(self, __value__=None, __g_id__=None):
super().__init__(__value__=__value__)
self.__g_id__ = __g_id__
def set(
self,
n_bits: "Int",
x: "Int",
value: Union["Slice", "Builder"],
exists_ok=True,
):
from rift.fift.types.slice import Slice
g = self.generic_identifier()
cmd = f"{g}dict!" if isinstance(value, Slice) else f"b>{g}dict!"
if not exists_ok:
cmd += "+"
new_d, ok = self.cmd(cmd, value, x, self, n_bits)
new_d = GDict(__value__=new_d.value, __g_id__=g)
return new_d, ok
def set_(
self,
n_bits: "Int",
x: "Int",
value: Union["Slice", "Builder"],
exists_ok=True,
):
new_d, ok = self.set(n_bits, x, value, exists_ok)
if not ok:
raise RuntimeError()
self.value = new_d.value
def get(self, n_bits: "Int", x: "Int"):
g = self.generic_identifier()
stack_out = self.cmd(f"{g}dict@", x, self, n_bits)
if len(stack_out) == 1:
return None
else:
return stack_out[0]
def remove(self, n_bits: "Int", x: "Int"):
g = self.generic_identifier()
new_d, ok = self.cmd(f"{g}dict-", x, self, n_bits)
new_d = GDict(__value__=new_d.value, __g_id__=g)
return new_d, ok
def remove_(self, n_bits: "Int", x: "Int"):
new_d, ok = self.remove(n_bits, x)
if not ok:
raise RuntimeError()
self.value = new_d.value
def pop(self, n_bits: "Int", x: "Int"):
g = self.generic_identifier()
stack_out = self.cmd(f"{g}dict-", x, self, n_bits)
if len(stack_out) == 3:
new_d, v, ok = stack_out
else:
new_d, ok = stack_out
v = None
new_d = GDict(__value__=new_d.value, __g_id__=g)
return new_d, v, ok
def pop_(self, n_bits: "Int", x: "Int"):
new_d, v, ok = self.pop(n_bits, x)
if not ok:
raise RuntimeError()
self.value = new_d.value
return v
def __setitem__(self, key, value):
k, n, *left = key
if len(left) != 0:
exist_nok_flag = left[0]
else:
exist_nok_flag = False
self.set_(n, k, value, exists_ok=not exist_nok_flag)
def __getitem__(self, item):
k, n = item
return self.get(n, k)
def __delitem__(self, key):
k, n = key
self.remove_(n, k)
def generic_identifier(self):
return self.__g_id__ | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/fift/types/gdict.py | 0.748168 | 0.160463 | gdict.py | pypi |
from rift.ast.ref_table import ReferenceTable
from rift.ast.types import (
AsmMethod,
Contract,
IfFlow,
Method,
Node,
Statement,
WhileLoop,
)
class CallStacks(object):
"""Class responsible for tracking the calls."""
contracts = {}
current_contract: Contract = None
current_contract_name: str = None
@classmethod
def declare_contract(cls, name):
cls.current_contract = Contract(name)
cls.current_contract_name = name
cls.contracts[name] = cls.current_contract
@classmethod
def get_contract(cls, name):
return cls.contracts[name]
@classmethod
def declare_method(cls, name, args, annotations):
m = Method(name, args, annotations)
scope = f"{cls.current_contract_name}_{name}"
ReferenceTable.init_scope(scope)
cls.current_contract.add_method(m)
@classmethod
def declare_asm(cls, name, args, annotations, asm_annoations):
m = AsmMethod(name, args, annotations, asm_annoations)
cls.current_contract.add_method(m)
@classmethod
def add_raw_statement(cls, raw):
cls.current_contract.add_statement(raw)
@classmethod
def add_statement(cls, type, *args):
s = Statement(type, args)
s._scope = ReferenceTable.current_scope
cls.current_contract.add_statement(s)
return s.node_id()
@classmethod
def break_(cls):
# TODO: Add when FunC supports or we move to TVM Engine
# cls.add_statement(Statement.BREAK)
raise Exception("FunC doesn't support break keyword!")
@classmethod
def return_(cls, entity):
ReferenceTable.mark(entity)
cls.add_statement(Statement.RETURN, entity)
return entity
@classmethod
def end_method(cls, method):
cls.current_contract.end_method(method)
@classmethod
def hide_method(cls, method):
cls.current_contract.hide_method(method)
@classmethod
def begin_if(cls, cond):
ReferenceTable.mark(cond)
nif = IfFlow()
cls.current_contract.add_statement(nif)
nif.iff(cond)
return nif.node_id()
@classmethod
def else_if(cls, node_id, cond=None):
ReferenceTable.mark(cond)
nif: IfFlow = Node.find(node_id)
if cond:
nif.iff(cond)
else:
nif.else_()
@classmethod
def end_if(cls, node_id):
nif: IfFlow = Node.find(node_id)
cls.current_contract.current_method.end_statement(nif)
@classmethod
def begin_while(cls, cond):
ReferenceTable.mark(cond)
nif = WhileLoop(cond)
cls.current_contract.add_statement(nif)
return nif.node_id()
@classmethod
def end_while(cls, node_id):
nif: WhileLoop = Node.find(node_id)
cls.current_contract.current_method.end_statement(nif)
@classmethod
def assign(cls, name, value):
return cls.add_statement(Statement.ASSIGN, name, value)
@classmethod
def multi_assign(cls, names, values):
return cls.add_statement(Statement.M_ASSIGN, names, values)
@classmethod
def expression(cls, expr):
return cls.add_statement(Statement.EXPR, expr)
@classmethod
def call_(cls, name, *args, operand=None):
ReferenceTable.mark(*args)
if operand:
cls.add_statement(
Statement.METHOD_CALL,
name,
operand,
*args,
)
else:
cls.add_statement(Statement.FUNC_CALL, name, *args) | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/ast/calls.py | 0.649579 | 0.196884 | calls.py | pypi |
from rift.ast.ref_table import ReferenceTable
class Expr:
EXPR_AR2 = 0
EXPR_CALL = 1
EXPR_FUNC = 2
EXPR_AR1 = 3
EXPR_VAR = 4
EXPR_CONST = 5
def __init__(self, type, *args, annotations=None):
self.type = type
self.args = args
self.annotations = annotations
if self.annotations:
self.annotations = {**self.annotations}
@staticmethod
def call_expr(operand, method, *args, annotations=None):
ReferenceTable.mark(operand, *args)
e = Expr(
Expr.EXPR_CALL,
operand,
method,
*args,
annotations=annotations,
)
return e
@staticmethod
def call_func(method, *args, annotations=None):
ReferenceTable.mark(*args)
e = Expr(Expr.EXPR_FUNC, method, *args, annotations=annotations)
return e
@staticmethod
def binary_op(op, op1, op2, type_):
ReferenceTable.mark(op1, op2)
e = Expr(Expr.EXPR_AR2, op, op1, op2, annotations={"return": type_})
return e
@staticmethod
def unary_op(op, operand, type_):
ReferenceTable.mark(operand)
e = Expr(Expr.EXPR_AR1, op, operand, annotations={"return": type_})
return e
@staticmethod
def variable(x, type_=None):
ReferenceTable.ref(x)
e = Expr(Expr.EXPR_VAR, x, annotations={"return": type_})
return e
@staticmethod
def const(x):
ReferenceTable.mark(x)
e = Expr(Expr.EXPR_CONST, x)
if isinstance(x, int):
e.annotations = {"return": int}
return e
def __repr__(self):
def transform(x):
if isinstance(x, str):
return '"%s"' % x
return str(x)
if self.type == Expr.EXPR_AR2:
op = self.args[0]
wrap = False
if op == "&" or op == "|":
wrap = True
return "{wrap_s}{op1}{wrap_e} {op} {wrap_s}{op2}{wrap_e}".format(
op1=self.args[1],
op=self.args[0],
op2=self.args[2],
wrap_s="(" if wrap else "",
wrap_e=")" if wrap else "",
)
elif self.type == Expr.EXPR_AR1:
return "{op} ({ope})".format(op=self.args[0], ope=self.args[1])
elif self.type == Expr.EXPR_CALL:
return "{object}{op}{name}({args})".format(
op="~" if self.args[1].endswith("_") else ".",
object=self.args[0],
name=self.args[1].removesuffix("_"),
args=", ".join([transform(x) for x in self.args[2:]]),
)
elif self.type == Expr.EXPR_FUNC:
return "{op}{name}({args})".format(
op="~" if self.args[0].endswith("_") else "",
name=self.args[0].removesuffix("_"),
args=", ".join([transform(x) for x in self.args[1:]]),
)
elif self.type == Expr.EXPR_VAR:
return "{name}".format(name=self.args[0])
elif self.type == Expr.EXPR_CONST:
return "{value}".format(value=repr(self.args[0])) | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/ast/types/expr.py | 0.619241 | 0.532121 | expr.py | pypi |
from rift.ast.printer import Printer
from rift.ast.types.block import Block
from rift.ast.types.node import Node
from rift.ast.types.statement import Statement
from rift.ast.utils import _type_name
class Method(Node):
active_statement: list[Statement] = []
block: Block
def __init__(self, name, args, annotations):
super().__init__()
self.name = name
self.args = args
self.annotations = annotations
self.active_statement = []
self.block = Block()
def add_statement(self, statement):
if len(self.active_statement) > 0:
active = self.active_statement[0]
active.add_statement(statement)
if statement.activates():
self.active_statement.insert(0, statement)
else:
self.block.add_statement(statement)
if statement.activates():
self.active_statement.insert(0, statement)
def end_statement(self, statement):
self.active_statement.remove(statement)
def _get_specs(self):
sd = self.annotations.get("_method")
if not sd:
return ""
res = []
if sd["impure"]:
res.append("impure")
if sd["inline"]:
res.append("inline")
elif sd["inline_ref"]:
res.append("inline_ref")
elif sd["method_id"]:
if sd["method_id_v"]:
res.append("method_id(%d)" % sd["method_id_v"])
else:
res.append("method_id")
if len(res) == 0:
return ""
return " ".join(res) + " "
def print_func(self, printer: Printer):
type_namer = lambda x: "{type} {name}".format(
type=_type_name(x[0]),
name=x[1],
)
tupler = lambda x: (self.annotations[x], x)
arg_defs = list(map(type_namer, map(tupler, self.args)))
printer.print(
"{output} {name}({args}) {specs}{{",
output=_type_name(self.annotations["return"]),
name=self.name,
args=", ".join(arg_defs),
specs=self._get_specs(),
o="{",
)
printer.incr_indent()
self.block.print_func(printer)
printer.decr_indent()
printer.print("}}") | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/ast/types/method.py | 0.445047 | 0.199483 | method.py | pypi |
import ast
from typing import Any
class IfPatcher(ast.NodeTransformer):
_counter = 0
"""Transforms the AST to handle conditions."""
def visit_If(self, node: ast.If) -> Any:
# WHY?: This causes visitor to visit all children too,
# otherwise we had to visit manually
self.generic_visit(node)
if_data = []
current = node
if_data.append((current.test, current.body))
el_ = current.orelse
if len(el_) != 0:
if_data.append((None, el_))
head = IfPatcher._counter
IfPatcher._counter += 1
with_item = ast.withitem(
context_expr=ast.Call(
func=ast.Attribute(
value=ast.Name(id="helpers", ctx=ast.Load()),
attr="_cond",
ctx=ast.Load(),
),
args=[],
keywords=[],
),
optional_vars=ast.Name(id=f"c{head}", ctx=ast.Store()),
)
with_body = []
for if_test, if_body in if_data:
if if_test:
expr = ast.Expr(
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id=f"c{head}", ctx=ast.Load()),
attr="match",
ctx=ast.Load(),
),
args=[if_test],
keywords=[],
),
)
else:
expr = ast.Expr(
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id=f"c{head}", ctx=ast.Load()),
attr="otherwise",
ctx=ast.Load(),
),
args=[],
keywords=[],
),
)
with_body.append(expr)
with_body.extend(if_body)
with_ = ast.With(items=[with_item], body=with_body)
ast.fix_missing_locations(with_)
return with_ | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/ast/patchers/if_patcher.py | 0.657868 | 0.197832 | if_patcher.py | pypi |
import ast
from copy import deepcopy
from typing import Any
class AssignPatcher(ast.NodeTransformer):
"""Transforms the AST to capture assginments."""
def visit_Assign(self, node):
tg = node.targets[0]
if isinstance(tg, ast.Tuple):
node.targets = [
ast.Name(
id="__tmp__",
ctx=ast.Store(),
),
]
vars = [v.id for v in tg.dims]
e_expr = ast.Assign(
targets=[
ast.Tuple(
elts=[
ast.Name(
id=v,
ctx=ast.Store(),
)
for v in vars
],
ctx=ast.Store(),
),
],
value=ast.Name(
id="__tmp__",
ctx=ast.Load(),
),
)
l_expr = ast.Call(
func=ast.Name(id="hasattr", ctx=ast.Load()),
args=[
ast.Name(id="__tmp__", ctx=ast.Load()),
ast.Constant(value="__prep_unpack__", kind=None),
],
keywords=[],
)
r_expr = ast.Call(
func=ast.Attribute(
value=ast.Name(id="__tmp__", ctx=ast.Load()),
attr="__prep_unpack__",
ctx=ast.Load(),
),
args=[ast.Constant(value=len(vars), kind=None)],
keywords=[],
)
p_expr = ast.Expr(
value=ast.BoolOp(op=ast.And(), values=[l_expr, r_expr]),
)
a_expr = ast.Expr(
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id="helpers", ctx=ast.Load()),
attr="_m_assign",
ctx=ast.Load(),
),
args=[
ast.Name(id="__tmp__", ctx=ast.Load()),
ast.List(
elts=[
ast.Constant(value=str(v), kind=None)
for v in vars
],
ctx=ast.Load(),
),
ast.List(
elts=[
ast.Name(id=str(v), ctx=ast.Load())
for v in vars
],
ctx=ast.Load(),
),
],
keywords=[],
),
)
nodes = [node, p_expr, e_expr, a_expr]
else:
target = node.targets[0]
if hasattr(target, "id"):
tg = target.id
# Let's not patch some specific vars
if tg.startswith("__") and tg.endswith("__"):
return node
else:
tg = None
nodes = [node]
if isinstance(node.value, ast.Constant):
if isinstance(node.value.value, int) and type(
node.value.value,
) != type(False):
node.value = ast.Call(
func=ast.Attribute(
value=ast.Name(id="helpers", ctx=ast.Load()),
attr="factory_",
ctx=ast.Load(),
),
args=[ast.Constant("int"), node.value],
keywords=[],
)
nt = deepcopy(target)
nt.ctx = ast.Load()
if isinstance(target, ast.Attribute):
rem_expr = ast.Call(
func=ast.Attribute(
value=nt,
attr="__rem_name__",
ctx=ast.Load(),
),
args=[],
keywords=[],
)
r_expr = ast.Assign(
targets=[ast.Name(id="__rem__", ctx=ast.Store())],
value=rem_expr,
)
elif isinstance(target, ast.Name):
r_expr = ast.Assign(
targets=[ast.Name(id="__rem__", ctx=ast.Store())],
value=ast.Constant(value=target.id, kind=None),
)
nodes.insert(0, r_expr)
c_expr = ast.Call(
func=ast.Attribute(
value=nt,
attr="__assign__",
ctx=ast.Load(),
),
args=[ast.Name(id="__rem__", ctx=ast.Load())],
keywords=[],
)
if isinstance(node.value, ast.Name):
a_expr = ast.Assign(
targets=[target],
value=c_expr,
)
else:
a_expr = ast.Assign(
targets=[target],
value=c_expr,
)
# a_expr = ast.Expr(
# value=c_expr,
# )
nodes.append(a_expr)
for g_node in nodes:
ast.fix_missing_locations(g_node)
return nodes | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/ast/patchers/assign_patcher.py | 0.599016 | 0.25682 | assign_patcher.py | pypi |
import re
from os import getcwd, path
from tomlkit import parse
class ContractConfig:
name: str | None
contract: str
tests: list[str]
deploy: str | None
def get_file_name(self) -> str:
name = self.name
if name is None:
name = self.contract
# CamelCase -> snake_case
name = re.sub(r"(?<!^)(?=[A-Z])", "_", name).lower()
return name
@classmethod
def load(cls, data: dict) -> "ContractConfig":
config = ContractConfig()
config.name = data.get("name", None)
config.contract = data["contract"]
config.tests = data.get("tests", [])
config.deploy = data.get("deploy", None)
return config
class ProjectConfig:
name: str
contracts: dict[str, ContractConfig]
def get_contract(self, name: str) -> ContractConfig:
for contract_cfg in self.contracts.values():
if contract_cfg.contract == name:
return contract_cfg
return ContractConfig.load({"contract": name})
def get_contract_file_name(self, contract: type | str):
if isinstance(contract, type):
contract = contract.__name__
cfg = self.get_contract(contract)
return cfg.get_file_name()
@classmethod
def load(cls, path_: str) -> "ProjectConfig":
with open(path_) as f:
content = f.read()
doc = parse(content)
config = ProjectConfig()
config.name = doc["name"]
contracts = doc["contracts"]
config.contracts = {}
for c in contracts:
config.contracts[c] = ContractConfig.load(contracts[c])
return config
@classmethod
def working(cls) -> "ProjectConfig | None":
"""Loads config from the working directory
Returns None if not a project
"""
cwd = getcwd()
config_file = path.join(cwd, "project.toml")
if not path.exists(config_file):
return None
return cls.load(config_file) | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/cli/config.py | 0.503418 | 0.167695 | config.py | pypi |
from typing import Type, TypeVar
from rift.core.annots import impure, is_method, method
from rift.core.invokable import InvokableFunc
from rift.fift.contract import ExecutableContract
from rift.fift.types.cell import Cell as FiftCell
from rift.func.meta_contract import ContractMeta
from rift.func.types.types import Cell, Slice
from rift.func.util import cls_attrs
from rift.network.inetwork import INetwork
from rift.types.bases.cell import Cell as GeneralCell
from rift.types.model import Model
from rift.types.msg import (
ExternalMessage,
InternalMessage,
MsgAddress,
StateInit,
)
from rift.types.payload import Payload
T = TypeVar("T", bound="Contract")
class Contract(metaclass=ContractMeta):
__fc_code__ = None
__interface__ = False
NOT_IMPLEMENTED = 0x91AC43
def __init__(self):
pass
@impure
@method()
def recv_internal(
self,
balance: int,
msg_value: int,
in_msg_full: Cell,
in_msg_body: Slice,
) -> None:
self.balance = balance
self.in_value = msg_value
self.message = InternalMessage(in_msg_full.parse())
self.body = in_msg_body
return self.internal_receive()
@impure
@method()
def recv_external(
self,
in_msg_full: Cell,
in_msg_body: Slice,
) -> None:
self.body = in_msg_body
self.message = ExternalMessage(in_msg_full.parse())
return self.external_receive()
def internal_receive(
self,
) -> None:
return self.NOT_IMPLEMENTED
def external_receive(
self,
) -> None:
return self.NOT_IMPLEMENTED
def __getattr__(self, item):
return InvokableFunc(item)
def connect(
self,
network: INetwork,
addr: str,
use_code=False,
use_data=True,
):
self._addr = addr
acc = network.get_account(self._addr)
if acc.state != acc.state.ACTIVE:
return False, acc
if use_data:
d_slice = FiftCell(__value__=acc.data).parse()
if hasattr(type(self), "Data"):
Data = type(self).Data
self.data = Data.from_slice(d_slice)
else:
self.data = d_slice
if use_code:
self.__code_cell__ = FiftCell(__value__=acc.code)
return True, acc
@classmethod
def address(
cls,
data: GeneralCell | Model | Payload,
code: GeneralCell = None,
pretty=False,
return_state=False,
) -> Slice | str | tuple[Slice | str, GeneralCell]:
if isinstance(data, Model) or isinstance(data, Payload):
data = data.as_cell()
if code is None:
code = cls.__code_cell__
s = StateInit(
split_depth=None,
special=None,
code=code,
data=data,
library=None,
)
s = s.as_cell()
s_hash = s.hash()
address = MsgAddress.std(0, s_hash)
if pretty:
address = MsgAddress.human_readable(address)
if return_state:
return address, s
return address
@classmethod
def deploy(
cls,
data: GeneralCell | Model | Payload,
code: GeneralCell | str = None,
body: GeneralCell = None,
amount=10**7,
independent: bool = False,
ref_state=True,
) -> GeneralCell:
if isinstance(data, Model) or isinstance(data, Payload):
data = data.as_cell()
if code is None:
code = cls.__code_cell__
address, s = cls.address(data, code, pretty=False, return_state=True)
if ref_state:
s = s.as_ref()
if body is None:
body = GeneralCell()
if not independent:
msg = InternalMessage.build(
address,
state_init=s,
body=body,
amount=amount,
)
else:
msg = ExternalMessage.build(
address,
state_init=s,
body=body,
)
return msg.as_cell(), address
@classmethod
def code(cls) -> Cell:
return cls.__code_cell__
@classmethod
def instantiate(cls: Type[T], data: Cell) -> T:
attrs = cls_attrs(cls)
methods = list(filter(lambda x: is_method(x[1]), attrs.items()))
methods = [x[0] for x in methods]
return ExecutableContract.create(cls.code(), data, methods) | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/func/contract.py | 0.797517 | 0.206554 | contract.py | pypi |
from rift.ast.types import Expr
from rift.core import Entity
from rift.core.factory import Factory
from rift.core.utils import init_abstract_type
from rift.func.types.builder_base import _BuilderBase
from rift.func.types.cell_base import _CellBase
from rift.func.types.cont_base import _ContBase
from rift.func.types.dict_base import _DictBase
from rift.func.types.idict_base import _IDictBase
from rift.func.types.int_base import _IntBase
from rift.func.types.pfxdict_base import _PfxDictBase
from rift.func.types.slice_base import _SliceBase
from rift.func.types.string_base import _StringBase
from rift.func.types.udict_base import _UDictBase
from rift.util import type_id
class Int(_IntBase):
__type_id__ = type_id("Int")
def __init__(self, value, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.value = value
if "data" not in kwargs:
self.data = Expr.const(value)
@classmethod
def abstract_init(cls, *args, **kwargs) -> "Int":
return cls(0, *args, **kwargs)
@classmethod
def type_name(cls) -> str:
return "int"
class HexInt(_IntBase):
__type_id__ = type_id("HexInt")
def __init__(self, value, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.value = value
if "data" not in kwargs:
self.data = Expr.const(value)
@classmethod
def abstract_init(cls, *args, **kwargs) -> "Int":
return cls(0, *args, **kwargs)
def _repr_(self):
return hex(self.value)
@classmethod
def type_name(cls) -> str:
return "int"
class Slice(_SliceBase):
__type_id__ = type_id("Slice")
@classmethod
def type_name(cls) -> str:
return "slice"
def __mod__(self, other):
assert isinstance(other, type)
return other(self)
def __rshift__(self, other):
return other.__deserialize__(self)
class Cont(_ContBase):
__type_id__ = type_id("Cont")
@classmethod
def type_name(cls) -> str:
return "cont"
class String(_StringBase):
__type_id__ = type_id("String")
@classmethod
def type_name(cls) -> str:
return "string"
class Cell(_CellBase):
__type_id__ = type_id("Cell")
@classmethod
def type_name(cls) -> str:
return "cell"
def as_ref(self):
from rift.types.ref import Ref
return Ref[Cell](self)
class Dict(_DictBase):
__type_id__ = type_id("Dict")
@classmethod
def type_name(cls) -> str:
return "cell"
class UDict(_UDictBase):
__type_id__ = type_id("UDict")
pass
class IDict(_IDictBase):
__type_id__ = type_id("IDict")
pass
class PfxDict(_PfxDictBase, Cell):
__type_id__ = type_id("PfxDict")
@classmethod
def type_name(cls) -> str:
return "cell"
class Builder(_BuilderBase):
__type_id__ = type_id("Builder")
@classmethod
def type_name(cls) -> str:
return "builder"
class Tensor(Entity, tuple):
__type_id__ = type_id("Tensor")
def __new__(cls, *args, **kwargs):
return super().__new__(cls, *args)
def __init__(self, *args, **kwargs):
name = kwargs.pop("name", None)
data = kwargs.pop("data", None)
self.type_ = kwargs.pop("type_", None)
super().__init__(data=data, name=name)
def __iter__(self):
if not self.type_:
return super().__iter__()
if hasattr(self, "__unpackable") and self.__unpackable:
for _, tp in zip(range(self._unpack_len), self.type_.__args__):
yield init_abstract_type(tp)
class Tuple(Entity):
__type_id__ = type_id("Tuple")
@classmethod
def type_name(cls) -> str:
return "tuple"
Factory.register("Tensor", Tensor)
Factory.register("Tuple", Tuple)
Factory.register("Builder", Builder)
Factory.register("Dict", Dict)
Factory.register("UDict", UDict)
Factory.register("IDict", IDict)
Factory.register("PfxDict", PfxDict)
Factory.register("Slice", Slice)
Factory.register("Cell", Cell)
Factory.register("String", String)
Factory.register("Cont", Cont)
Factory.register("Int", Int)
Factory.register("HexInt", HexInt) | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/func/types/types.py | 0.681303 | 0.158337 | types.py | pypi |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from rift.func.types.types import Slice, Cell, UDict, Builder
from rift.core.invokable import typed_invokable
from rift.func.types.dict_base import _DictBase
class _UDictBase(_DictBase):
@typed_invokable(name="udict_set_ref")
def set_ref(self, key_len: int, index: int, value: "Cell") -> "UDict":
pass
@typed_invokable(name="udict_set_ref_")
def set_ref_(self, key_len: int, index: int, value: "Cell") -> None:
pass
@typed_invokable(name="udict_get_ref")
def get_ref(self, key_len: int, index: int) -> "Cell":
pass
@typed_invokable(name="udict_get_ref?")
def get_ref_check(self, key_len: int, index: int) -> tuple["UDict", int]:
pass
@typed_invokable(name="udict_set_get_ref")
def set_get_ref(
self,
key_len: int,
index: int,
value: "Cell",
) -> tuple["UDict", "Cell"]:
pass
@typed_invokable(name="udict_set_get_ref_")
def set_get_ref_(self, key_len: int, index: int, value: "Cell") -> "Cell":
pass
@typed_invokable(name="udict_delete?")
def delete_check(self, key_len: int, index: int) -> tuple["UDict", int]:
pass
@typed_invokable(name="udict_delete?_")
def delete_check_(self, key_len: int, index: int) -> int:
pass
@typed_invokable(name="udict_get?")
def get_check(self, key_len: int, index: int) -> tuple["Slice", int]:
pass
@typed_invokable(name="udict_delete_get?")
def delete_get_check(
self,
key_len: int,
index: int,
) -> tuple["UDict", "Slice", int]:
pass
@typed_invokable(name="udict_delete_get?_")
def delete_get_check_(
self,
key_len: int,
index: int,
) -> tuple["Slice", int]:
pass
@typed_invokable(name="udict_set")
def set(self, key_len: int, index: int, value: "Slice") -> "UDict":
pass
@typed_invokable(name="udict_set_")
def set_(self, key_len: int, index: int, value: "Slice") -> None:
pass
@typed_invokable(name="udict_add?")
def add_check(
self,
key_len: int,
index: int,
value: "Slice",
) -> tuple["UDict", int]:
pass
@typed_invokable(name="udict_add?_")
def add_check_(self, key_len: int, index: int, value: "Slice") -> int:
pass
@typed_invokable(name="udict_replace?")
def replace_check(
self,
key_len: int,
index: int,
value: "Slice",
) -> tuple["UDict", int]:
pass
@typed_invokable(name="udict_replace?_")
def replace_check_(self, key_len: int, index: int, value: "Slice") -> int:
pass
@typed_invokable(name="udict_set_builder")
def set_builder(
self,
key_len: int,
index: int,
value: "Builder",
) -> "UDict":
pass
@typed_invokable(name="udict_set_builder_")
def set_builder_(
self,
key_len: int,
index: int,
value: "Builder",
) -> None:
pass
@typed_invokable(name="udict_add_builder?")
def add_builder_check(
self,
key_len: int,
index: int,
value: "Builder",
) -> tuple["UDict", int]:
pass
@typed_invokable(name="udict_replace_builder?")
def replace_builder_check(
self,
key_len: int,
index: int,
value: "Builder",
) -> tuple["UDict", int]:
pass
@typed_invokable(name="udict_add_builder?_")
def add_builder_check_(
self,
key_len: int,
index: int,
value: "Builder",
) -> int:
pass
@typed_invokable(name="udict_replace_builder?_")
def replace_builder_check_(
self,
key_len: int,
index: int,
value: "Builder",
) -> int:
pass
@typed_invokable(name="udict_delete_get_min")
def delete_get_min(
self,
key_len: int,
) -> tuple["UDict", int, "Slice", int]:
pass
@typed_invokable(name="udict::delete_get_min_")
def delete_get_min_(self, key_len: int) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="udict_delete_get_max")
def delete_get_max(
self,
key_len: int,
) -> tuple["UDict", int, "Slice", int]:
pass
@typed_invokable(name="udict::delete_get_max_")
def delete_get_max_(self, key_len: int) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="udict_get_min?")
def get_min_check(self, key_len: int) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="udict_get_max?")
def get_max_check(self, key_len: int) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="udict_get_min_ref?")
def get_min_ref_check(self, key_len: int) -> tuple[int, "Cell", int]:
pass
@typed_invokable(name="udict_get_max_ref?")
def get_max_ref_check(self, key_len: int) -> tuple[int, "Cell", int]:
pass
@typed_invokable(name="udict_get_next?")
def get_next_check(
self,
key_len: int,
pivot: int,
) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="udict_get_nexteq?")
def get_nexteq_check(
self,
key_len: int,
pivot: int,
) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="udict_get_prev?")
def get_prev_check(
self,
key_len: int,
pivot: int,
) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="udict_get_preveq?")
def get_preveq_check(
self,
key_len: int,
pivot: int,
) -> tuple[int, "Slice", int]:
pass
@classmethod
def __deserialize__(
cls,
from_: "Slice",
name: str = None,
inplace: bool = True,
lazy: bool = True,
**kwargs,
):
if inplace:
v = from_.udict_()
else:
v = from_.udict()
if name is not None:
v.__assign__(name)
return v | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/func/types/udict_base.py | 0.701406 | 0.213398 | udict_base.py | pypi |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from rift.func.types.types import (
Slice,
Cont,
Cell,
Tuple,
Dict,
IDict,
UDict,
PfxDict,
)
from rift.core.invokable import typed_invokable
from rift.func.types.entity_base import _EntityBase
class _SliceBase(_EntityBase):
@typed_invokable(name="load_coins_")
def coin_(self) -> int:
pass
@typed_invokable(name="load_uint_")
def uint_(self, bits: int) -> int:
pass
@typed_invokable(name="preload_uint")
def uint(self, bits: int) -> int:
pass
@typed_invokable(name="load_int_")
def sint_(self, bits: int) -> int:
pass
@typed_invokable(name="preload_int")
def sint(self, bits: int) -> int:
pass
@typed_invokable(name="slice_hash")
def hash(self) -> int:
pass
@typed_invokable(name="string_hash")
def string_hash(self) -> int:
pass
@typed_invokable(name="check_data_signature")
def check_signature(self, signature: "Slice", public_key: int) -> int:
pass
@typed_invokable(name="slice_compute_data_size")
def compute_data_size(self, max_cells: int) -> tuple[int, int, int]:
pass
@typed_invokable(name="bless")
def bless(self) -> "Cont":
pass
@typed_invokable(name="end_parse")
def end_parse(self) -> None:
pass
@typed_invokable(name="load_ref_")
def ref_(self) -> "Cell":
pass
@typed_invokable(name="preload_ref")
def ref(self) -> "Cell":
pass
@typed_invokable(name="load_bits_")
def bits_(self, len_: int) -> "Slice":
pass
@typed_invokable(name="preload_bits")
def bits(self, len_: int) -> "Slice":
pass
@typed_invokable(name="skip_bits")
def skip_n(self, len_: int) -> None:
pass
@typed_invokable(name="skip_bits")
def skip_n_(self, len_: int) -> None:
pass
@typed_invokable(name="first_bits")
def first_bits(self, len_: int) -> "Slice":
pass
@typed_invokable(name="skip_last_bits")
def skip_last_n(self, len_: int) -> None:
pass
@typed_invokable(name="skip_last_bits")
def skip_last_n_(self, len_: int) -> None:
pass
@typed_invokable(name="slice_last")
def slice_last(self, len_: int) -> "Slice":
pass
@typed_invokable(name="load_dict_")
def ldict_(self) -> "Dict":
pass
@typed_invokable(name="preload_dict")
def ldict(self) -> "Dict":
pass
@typed_invokable(name="load_dict_")
def idict_(self) -> "IDict":
pass
@typed_invokable(name="preload_dict")
def idict(self) -> "IDict":
pass
@typed_invokable(name="load_dict_")
def udict_(self) -> "UDict":
pass
@typed_invokable(name="preload_dict")
def udict(self) -> "UDict":
pass
@typed_invokable(name="load_dict_")
def pdict_(self) -> "PfxDict":
pass
@typed_invokable(name="preload_dict")
def pdict(self) -> "PfxDict":
pass
@typed_invokable(name="skip_dict")
def skip_dict(self) -> None:
pass
@typed_invokable(name="load_maybe_ref_")
def maybe_ref_(self) -> "Cell":
pass
@typed_invokable(name="preload_maybe_ref")
def maybe_ref(self) -> "Cell":
pass
@typed_invokable(name="slice_refs")
def refs_n(self) -> int:
pass
@typed_invokable(name="slice_bits")
def bits_n(self) -> int:
pass
@typed_invokable(name="slice_bits_refs")
def bits_refs_n(self) -> tuple[int, int]:
pass
@typed_invokable(name="slice_empty?")
def is_empty(self) -> int:
pass
@typed_invokable(name="slice_data_empty?")
def is_data_empty(self) -> int:
pass
@typed_invokable(name="slice_refs_empty?")
def are_refs_empty(self) -> int:
pass
@typed_invokable(name="slice_depth")
def depth(self) -> int:
pass
@typed_invokable(name="load_msg_addr_")
def addr_(self) -> "Slice":
pass
@typed_invokable(name="parse_addr")
def parse_addr(self) -> "Tuple":
pass
@typed_invokable(name="parse_std_addr")
def parse_std_addr(self) -> tuple[int, int]:
pass
@typed_invokable(name="parse_var_addr")
def parse_var_addr(self) -> tuple[int, "Slice"]:
pass
@typed_invokable(name="equal_slices")
def is_equal(self, b: "Slice") -> int:
pass | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/func/types/slice_base.py | 0.747063 | 0.344333 | slice_base.py | pypi |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from rift.core import Entity
from rift.func.types.types import Slice, Dict, Builder
from rift.core.invokable import typed_invokable
from rift.func.types.cell_base import _CellBase
class _DictBase(_CellBase):
@typed_invokable(name="dict_set")
def dict_set(
self,
key_len: int,
index: "Slice",
value: "Slice",
) -> "Dict":
pass
@typed_invokable(name="dict_set")
def dict_set_(self, key_len: int, index: "Slice", value: "Slice") -> None:
pass
@typed_invokable(name="dict_set_builder")
def dict_set_builder(
self,
key_len: int,
index: "Slice",
value: "Builder",
) -> "Dict":
pass
@typed_invokable(name="~dict_set_builder")
def dict_set_builder_(
self,
key_len: int,
index: "Slice",
value: "Builder",
) -> None:
pass
@typed_invokable(name="dict_delete_get_min")
def dict_delete_get_min(
self,
key_len: int,
) -> tuple["Dict", "Slice", "Slice", int]:
pass
@typed_invokable(name="dict::delete_get_min")
def dict_delete_get_min_(
self,
key_len: int,
) -> tuple["Slice", "Slice", int]:
pass
@typed_invokable(name="dict_delete_get_max")
def dict_delete_get_max(
self,
key_len: int,
) -> tuple["Dict", "Slice", "Slice", int]:
pass
@typed_invokable(name="dict::delete_get_max")
def dict_delete_get_max_(
self,
key_len: int,
) -> tuple["Slice", "Slice", int]:
pass
@typed_invokable(name="dict_empty?")
def dict_empty_check(self) -> int:
pass
@classmethod
def __serialize__(cls, to: "Builder", value: "Entity") -> "Builder":
if value is None:
return to.uint(0, 1)
return to.dict(value)
@classmethod
def __deserialize__(
cls,
from_: "Slice",
name: str = None,
inplace: bool = True,
lazy: bool = True,
**kwargs,
):
if inplace:
v = from_.ldict_()
else:
v = from_.ldict()
if name is not None:
v and v.__assign__(name)
return v | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/func/types/dict_base.py | 0.75274 | 0.24663 | dict_base.py | pypi |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from rift.func.types.types import Slice, Cell, IDict, Builder
from rift.core.invokable import typed_invokable
from rift.func.types.dict_base import _DictBase
class _IDictBase(_DictBase):
@typed_invokable(name="idict_set_ref")
def set_ref(self, key_len: int, index: int, value: "Cell") -> "IDict":
pass
@typed_invokable(name="idict_set_ref")
def set_ref_(self, key_len: int, index: int, value: "Cell") -> None:
pass
@typed_invokable(name="idict_get_ref")
def get_ref(self, key_len: int, index: int) -> "Cell":
pass
@typed_invokable(name="idict_get_ref?")
def get_ref_check(self, key_len: int, index: int) -> tuple["IDict", int]:
pass
@typed_invokable(name="idict_set_get_ref")
def set_get_ref(
self,
key_len: int,
index: int,
value: "Cell",
) -> tuple["IDict", "Cell"]:
pass
@typed_invokable(name="idict_set_get_ref")
def set_get_ref_(self, key_len: int, index: int, value: "Cell") -> "Cell":
pass
@typed_invokable(name="idict_delete?")
def delete_check(self, key_len: int, index: int) -> tuple["IDict", int]:
pass
@typed_invokable(name="idict_delete?")
def delete_check_(self, key_len: int, index: int) -> int:
pass
@typed_invokable(name="idict_get?")
def get_check(self, key_len: int, index: int) -> tuple["Slice", int]:
pass
@typed_invokable(name="idict_delete_get?")
def delete_get_check(
self,
key_len: int,
index: int,
) -> tuple["IDict", "Slice", int]:
pass
@typed_invokable(name="idict_delete_get?")
def delete_get_check_(
self,
key_len: int,
index: int,
) -> tuple["Slice", int]:
pass
@typed_invokable(name="idict_set")
def set(self, key_len: int, index: int, value: "Slice") -> "IDict":
pass
@typed_invokable(name="idict_set")
def set_(self, key_len: int, index: int, value: "Slice") -> None:
pass
@typed_invokable(name="idict_add?")
def add_check(
self,
key_len: int,
index: int,
value: "Slice",
) -> tuple["IDict", int]:
pass
@typed_invokable(name="idict_add?")
def add_check_(self, key_len: int, index: int, value: "Slice") -> int:
pass
@typed_invokable(name="idict_replace?")
def replace_check(
self,
key_len: int,
index: int,
value: "Slice",
) -> tuple["IDict", int]:
pass
@typed_invokable(name="idict_replace?")
def replace_check_(self, key_len: int, index: int, value: "Slice") -> int:
pass
@typed_invokable(name="idict_set_builder")
def set_builder(
self,
key_len: int,
index: int,
value: "Builder",
) -> "IDict":
pass
@typed_invokable(name="idict_set_builder")
def set_builder_(
self,
key_len: int,
index: int,
value: "Builder",
) -> None:
pass
@typed_invokable(name="idict_add_builder?")
def add_builder_check(
self,
key_len: int,
index: int,
value: "Builder",
) -> tuple["IDict", int]:
pass
@typed_invokable(name="idict_replace_builder?")
def replace_builder_check(
self,
key_len: int,
index: int,
value: "Builder",
) -> tuple["IDict", int]:
pass
@typed_invokable(name="idict_add_builder?")
def add_builder_check_(
self,
key_len: int,
index: int,
value: "Builder",
) -> int:
pass
@typed_invokable(name="idict_replace_builder?")
def replace_builder_check_(
self,
key_len: int,
index: int,
value: "Builder",
) -> int:
pass
@typed_invokable(name="idict_delete_get_min")
def delete_get_min(
self,
key_len: int,
) -> tuple["IDict", int, "Slice", int]:
pass
@typed_invokable(name="idict::delete_get_min")
def delete_get_min_(self, key_len: int) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="idict_delete_get_max")
def delete_get_max(
self,
key_len: int,
) -> tuple["IDict", int, "Slice", int]:
pass
@typed_invokable(name="idict::delete_get_max")
def delete_get_max_(self, key_len: int) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="idict_get_min?")
def get_min_check(self, key_len: int) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="idict_get_max?")
def get_max_check(self, key_len: int) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="idict_get_min_ref?")
def get_min_ref_check(self, key_len: int) -> tuple[int, "Cell", int]:
pass
@typed_invokable(name="idict_get_max_ref?")
def get_max_ref_check(self, key_len: int) -> tuple[int, "Cell", int]:
pass
@typed_invokable(name="idict_get_next?")
def get_next_check(
self,
key_len: int,
pivot: int,
) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="idict_get_nexteq?")
def get_nexteq_check(
self,
key_len: int,
pivot: int,
) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="idict_get_prev?")
def get_prev_check(
self,
key_len: int,
pivot: int,
) -> tuple[int, "Slice", int]:
pass
@typed_invokable(name="idict_get_preveq?")
def get_preveq_check(
self,
key_len: int,
pivot: int,
) -> tuple[int, "Slice", int]:
pass
@classmethod
def __deserialize__(
cls,
from_: "Slice",
name: str = None,
inplace: bool = True,
lazy: bool = True,
**kwargs,
):
if inplace:
v = from_.idict_()
else:
v = from_.idict()
if name is not None:
v.__assign__(name)
return v | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/func/types/idict_base.py | 0.671255 | 0.238628 | idict_base.py | pypi |
from rift.core.annots import asm, impure
from rift.core.entity import Entity
from rift.func.library import Library
from rift.func.types.types import Builder, Cell, Cont, Slice, Tuple
# noinspection PyTypeChecker,SpellCheckingInspection,PyUnusedLocal
class Stdlib(Library):
__ignore__ = True
@asm(hide=True)
def null(self) -> Entity:
return "NULL"
@asm()
def now(self) -> int:
return "NOW"
@asm()
def my_address(self) -> Slice:
return "MYADDR"
@asm(hide=True)
def get_balance(self) -> Tuple:
return "BALANCE"
@asm()
def cur_lt(self) -> int:
return "LTIME"
@asm()
def block_lt(self) -> int:
return "BLOCKLT"
@asm()
def cell_hash(self, c: Cell) -> int:
return "HASHCU"
@asm()
def slice_hash(self, s: Slice) -> int:
return "HASHSU"
@asm()
def string_hash(self, s: Slice) -> int:
return "SHA256U"
@asm()
def check_signature(
self,
hash_: int,
signature: Slice,
public_key: int,
) -> int:
return "CHKSIGNU"
@asm()
def check_data_signature(
self,
data: Slice,
signature: Slice,
public_key: int,
) -> int:
return "CHKSIGNS"
@impure
@asm()
def compute_data_size(
self,
c: Cell,
max_cells: int,
) -> tuple[int, int, int]:
return "CDATASIZE"
@impure
@asm()
def slice_compute_data_size(
self,
s: Slice,
max_cells: int,
) -> tuple[int, int, int]:
return "SDATASIZE"
@asm(name="compute_data_size?")
def compute_data_size_check(
self,
c: Cell,
max_cells: int,
) -> tuple[int, int, int, int]:
return "CDATASIZEQ NULLSWAPIFNOT2 NULLSWAPIFNOT"
@asm(name="slice_compute_data_size?")
def slice_compute_data_size_check(
self,
c: Cell,
max_cells: int,
) -> tuple[int, int, int, int]:
return "SDATASIZEQ NULLSWAPIFNOT2 NULLSWAPIFNOT"
@impure
@asm(hide=True)
def throw_if(self, excno: int, cond: int) -> None:
return "THROWARGIF"
@impure
@asm()
def dump_stack(self) -> None:
return "DUMPSTK"
@asm()
def get_data(self) -> Cell:
return "c4 PUSH"
@impure
@asm()
def set_data(self, c: Cell) -> None:
return "c4 POP"
@impure
@asm()
def get_c3(self) -> Cont:
return "c3 PUSH"
@impure
@asm()
def set_c3(self, c: Cont) -> None:
return "c3 POP"
@impure
@asm()
def bless(self, s: Slice) -> Cont:
return "BLESS"
@impure
@asm()
def accept_message(self) -> None:
return "ACCEPT"
@impure
@asm()
def set_gas_limit(self, limit: int) -> None:
return "SETGASLIMIT"
@impure
@asm()
def commit(self) -> None:
return "COMMIT"
@impure
@asm()
def buy_gas(self, gram: int) -> None:
return "BUYGAS"
@asm()
def min(self, x: int, y: int) -> int:
return "MIN"
@asm()
def max(self, x: int, y: int) -> int:
return "MAX"
@asm()
def minmax(self, x: int, y: int) -> tuple[int, int]:
return "MINMAX"
@asm()
def abs(self, x: int) -> int:
return "ABS"
@asm()
def begin_parse(self, c: Cell) -> Slice:
return "CTOS"
@impure
@asm()
def end_parse(self, s: Slice) -> None:
return "ENDS"
@asm(out_order=(1, 0))
def load_ref(self, s: Slice) -> tuple[Slice, Cell]:
return "LDREF"
@asm()
def preload_ref(self, s: Slice) -> Cell:
return "PLDREF"
@asm(
input_order=("s", "len_"),
out_order=(1, 0),
name="~load_int",
hide=True,
)
def load_int_(self, s: Slice, len_: int) -> tuple[Slice, int]:
return "LDIX"
@asm(out_order=(1, 0), name="~load_uint", hide=True)
def load_uint_(self, s: Slice, len_: int) -> tuple[Slice, int]:
return "LDUX"
@asm(hide=True)
def preload_int(self, s: Slice, len_: int) -> int:
return "PLDIX"
@asm(hide=True)
def preload_uint(self, s: Slice, len_: int) -> int:
return "PLDUX"
@asm(input_order=("s", "len_"), out_order=(1, 0), hide=True)
def load_bits(self, s: Slice, len_: int) -> tuple[Slice, Slice]:
return "LDSLICEX"
@asm(hide=True)
def preload_bits(self, s: Slice, len_: int) -> Slice:
return "PLDSLICEX"
@asm(out_order=(1, 0))
def load_grams(self, s: Slice) -> tuple[Slice, int]:
return "LDGRAMS"
@asm()
def skip_bits(self, s: Slice, len_: int) -> Slice:
return "SDSKIPFIRST"
@asm(name="~skip_bits")
def skip_bits_(self, s: Slice, len_: int) -> tuple[Slice, None]:
return "SDSKIPFIRST"
@asm()
def first_bits(self, s: Slice, len_: int) -> Slice:
return "SDCUTFIRST"
@asm()
def skip_last_bits(self, s: Slice, len_: int) -> Slice:
return "SDSKIPLAST"
@asm(name="~skip_last_bits")
def skip_last_bits_(self, s: Slice, len_: int) -> tuple[Slice, None]:
return "SDSKIPLAST"
@asm()
def slice_last(self, s: Slice, len_: int) -> Slice:
return "SDCUTLAST"
@asm(out_order=(1, 0))
def load_dict(self, s: Slice) -> tuple[Slice, Cell]:
return "LDDICT"
@asm()
def preload_dict(self, s: Slice) -> Cell:
return "PLDDICT"
@asm()
def skip_dict(self, s: Slice) -> Slice:
return "SKIPDICT"
@asm(out_order=(1, 0))
def load_maybe_ref(self, s: Slice) -> tuple[Slice, Cell]:
return "LDOPTREF"
@asm()
def preload_maybe_ref(self, s: Slice) -> Cell:
return "PLDOPTREF"
@asm(input_order=("c", "b"))
def store_maybe_ref(self, b: Builder, c: Cell) -> Builder:
return "STOPTREF"
@asm()
def cell_depth(self, c: Cell) -> int:
return "CDEPTH"
@asm()
def slice_refs(self, s: Slice) -> int:
return "SREFS"
@asm()
def slice_bits(self, s: Slice) -> int:
return "SBITS"
@asm()
def slice_bits_refs(self, s: Slice) -> tuple[int, int]:
return "SBITREFS"
@asm(name="slice_empty?")
def slice_empty_check(self, s: Slice) -> int:
return "SEMPTY"
@asm(name="slice_data_empty?")
def slice_data_empty_check(self, s: Slice) -> int:
return "SDEMPTY"
@asm(name="slice_refs_empty?")
def slice_refs_empty_check(self, s: Slice) -> int:
return "SREMPTY"
@asm()
def slice_depth(self, s: Slice) -> int:
return "SDEPTH"
@asm()
def builder_refs(self, b: Builder) -> int:
return "BREFS"
@asm()
def builder_bits(self, b: Builder) -> int:
return "BBITS"
@asm()
def builder_depth(self, b: Builder) -> int:
return "BDEPTH"
@asm()
def begin_cell(self) -> Builder:
return "NEWC"
@asm()
def end_cell(self, b: Builder) -> Cell:
return "ENDC"
@asm(input_order=("c", "b"))
def store_ref(self, b: Builder, c: Cell) -> Builder:
return "STREF"
@asm(input_order=("x", "b", "len_"), hide=True)
def store_uint(self, b: Builder, x: int, len_: int) -> Builder:
return "STUX"
@asm(input_order=("x", "b", "len_"), hide=True)
def store_int(self, b: Builder, x: int, len_: int) -> Builder:
return "STIX"
@asm()
def store_slice(self, b: Builder, s: Slice) -> Builder:
return "STSLICER"
@asm()
def store_grams(self, b: Builder, x: int) -> Builder:
return "STGRAMS"
@asm(input_order=("c", "b"))
def store_dict(self, b: Builder, c: Cell) -> Builder:
return "STDICT"
@asm(out_order=(1, 0))
def load_msg_addr(self, s: Slice) -> tuple[Slice, Slice]:
return "LDMSGADDR"
@asm()
def parse_addr(self, s: Slice) -> Tuple:
return "PARSEMSGADDR"
@asm()
def parse_std_addr(self, s: Slice) -> tuple[int, int]:
return "REWRITESTDADDR"
@asm()
def parse_var_addr(self, s: Slice) -> tuple[int, Slice]:
return "REWRITEVARADDR"
@asm(input_order=("value", "index", "dict_", "key_len"))
def idict_set_ref(
self,
dict_: Cell,
key_len: int,
index: int,
value: Cell,
) -> Cell:
return "DICTISETREF"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="~idict_set_ref",
)
def idict_set_ref_(
self,
dict_: Cell,
key_len: int,
index: int,
value: Cell,
) -> tuple[Cell, None]:
return "DICTISETREF"
@asm(input_order=("value", "index", "dict_", "key_len"))
def udict_set_ref(
self,
dict_: Cell,
key_len: int,
index: int,
value: Cell,
) -> Cell:
return "DICTUSETREF"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="~udict_set_ref",
)
def udict_set_ref_(
self,
dict_: Cell,
key_len: int,
index: int,
value: Cell,
) -> tuple[Cell, None]:
return "DICTUSETREF"
@asm(input_order=("index", "dict_", "key_len"), hide=True)
def idict_get_ref(self, dict_: Cell, key_len: int, index: int) -> Cell:
return "DICTIGETOPTREF"
@asm(
input_order=("index", "dict_", "key_len"),
name="idict_get_ref?",
hide=True,
)
def idict_get_ref_check(
self,
dict_: Cell,
key_len: int,
index: int,
) -> tuple[Cell, int]:
return "DICTIGETREF"
@asm(input_order=("index", "dict_", "key_len"), name="udict_get_ref?")
def udict_get_ref_check(
self,
dict_: Cell,
key_len: int,
index: int,
) -> tuple[Cell, int]:
return "DICTUGETREF", "NULLSWAPIFNOT"
@asm(input_order=("value", "index", "dict_", "key_len"))
def idict_set_get_ref(
self,
dict_: Cell,
key_len: int,
index: int,
value: Cell,
) -> tuple[Cell, Cell]:
return "DICTISETGETOPTREF"
@asm(input_order=("value", "index", "dict_", "key_len"))
def udict_set_get_ref(
self,
dict_: Cell,
key_len: int,
index: int,
value: Cell,
) -> tuple[Cell, Cell]:
return "DICTUSETGETOPTREF"
@asm(input_order=("index", "dict_", "key_len"), name="idict_delete?")
def idict_delete_check(
self,
dict_: Cell,
key_len: int,
index: int,
) -> tuple[Cell, int]:
return "DICTIDEL"
@asm(input_order=("index", "dict_", "key_len"), name="udict_delete?")
def udict_delete_check(
self,
dict_: Cell,
key_len: int,
index: int,
) -> tuple[Cell, int]:
return "DICTUDEL"
@asm(input_order=("index", "dict_", "key_len"), name="idict_get?")
def idict_get_check(
self,
dict_: Cell,
key_len: int,
index: int,
) -> tuple[Slice, int]:
return "DICTIGET", "NULLSWAPIFNOT"
@asm(input_order=("index", "dict_", "key_len"), name="udict_get?")
def udict_get_check(
self,
dict_: Cell,
key_len: int,
index: int,
) -> tuple[Slice, int]:
return "DICTUGET", "NULLSWAPIFNOT"
@asm(input_order=("index", "dict_", "key_len"), name="idict_delete_get?")
def idict_delete_get_check(
self,
dict_: Cell,
key_len: int,
index: int,
) -> tuple[Cell, Slice, int]:
return "DICTIDELGET", "NULLSWAPIFNOT"
@asm(input_order=("index", "dict_", "key_len"), name="udict_delete_get?")
def udict_delete_get_check(
self,
dict_: Cell,
key_len: int,
index: int,
) -> tuple[Cell, Slice, int]:
return "DICTUDELGET", "NULLSWAPIFNOT"
@asm(input_order=("index", "dict_", "key_len"), name="~idict_delete_get?")
def idict_delete_get_check_(
self,
dict_: Cell,
key_len: int,
index: int,
) -> tuple[Cell, tuple[Slice, int]]:
return "DICTIDELGET", "NULLSWAPIFNOT"
@asm(input_order=("index", "dict_", "key_len"), name="~udict_delete_get?")
def udict_delete_get_check_(
self,
dict_: Cell,
key_len: int,
index: int,
) -> tuple[Cell, tuple[Slice, int]]:
return "DICTUDELGET", "NULLSWAPIFNOT"
@asm(input_order=("value", "index", "dict_", "key_len"))
def udict_set(
self,
dict_: Cell,
key_len: int,
index: int,
value: Slice,
) -> Cell:
return "DICTUSET"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="~udict_set",
)
def udict_set_(
self,
dict_: Cell,
key_len: int,
index: int,
value: Slice,
) -> tuple[Cell, None]:
return "DICTUSET"
@asm(input_order=("value", "index", "dict_", "key_len"))
def idict_set(
self,
dict_: Cell,
key_len: int,
index: int,
value: Slice,
) -> Cell:
return "DICTISET"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="~idict_set",
)
def idict_set_(
self,
dict_: Cell,
key_len: int,
index: int,
value: Slice,
) -> tuple[Cell, None]:
return "DICTISET"
@asm(input_order=("value", "index", "dict_", "key_len"))
def dict_set(
self,
dict_: Cell,
key_len: int,
index: Slice,
value: Slice,
) -> Cell:
return "DICTSET"
@asm(input_order=("value", "index", "dict_", "key_len"), name="~dict_set")
def dict_set_(
self,
dict_: Cell,
key_len: int,
index: Slice,
value: Slice,
) -> tuple[Cell, None]:
return "DICTSET"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="udict_add?",
)
def udict_add_check(
self,
dict_: Cell,
key_len: int,
index: int,
value: Slice,
) -> tuple[Cell, int]:
return "DICTUADD"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="udict_replace?",
)
def udict_replace_check(
self,
dict_: Cell,
key_len: int,
index: int,
value: Slice,
) -> tuple[Cell, int]:
return "DICTUREPLACE"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="idict_add?",
)
def idict_add_check(
self,
dict_: Cell,
key_len: int,
index: int,
value: Slice,
) -> tuple[Cell, int]:
return "DICTIADD"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="idict_replace?",
)
def idict_replace_check(
self,
dict_: Cell,
key_len: int,
index: int,
value: Slice,
) -> tuple[Cell, int]:
return "DICTIREPLACE"
@asm(input_order=("value", "index", "dict_", "key_len"))
def udict_set_builder(
self,
dict_: Cell,
key_len: int,
index: int,
value: Builder,
) -> Cell:
return "DICTUSETB"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="~udict_set_builder",
)
def udict_set_builder_(
self,
dict_: Cell,
key_len: int,
index: int,
value: Builder,
) -> tuple[Cell, None]:
return "DICTUSETB"
@asm(input_order=("value", "index", "dict_", "key_len"))
def idict_set_builder(
self,
dict_: Cell,
key_len: int,
index: int,
value: Builder,
) -> Cell:
return "DICTISETB"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="~idict_set_builder",
)
def idict_set_builder_(
self,
dict_: Cell,
key_len: int,
index: int,
value: Builder,
) -> tuple[Cell, None]:
return "DICTISETB"
@asm(input_order=("value", "index", "dict_", "key_len"))
def dict_set_builder(
self,
dict_: Cell,
key_len: int,
index: Slice,
value: Builder,
) -> Cell:
return "DICTSETB"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="~dict_set_builder",
)
def dict_set_builder_(
self,
dict_: Cell,
key_len: int,
index: Slice,
value: Builder,
) -> tuple[Cell, None]:
return "DICTSETB"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="udict_add_builder?",
)
def udict_add_builder_check(
self,
dict_: Cell,
key_len: int,
index: int,
value: Builder,
) -> tuple[Cell, int]:
return "DICTUADDB"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="udict_replace_builder?",
)
def udict_replace_builder_check(
self,
dict_: Cell,
key_len: int,
index: int,
value: Builder,
) -> tuple[Cell, int]:
return "DICTUREPLACEB"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="idict_add_builder?",
)
def idict_add_builder_check(
self,
dict_: Cell,
key_len: int,
index: int,
value: Builder,
) -> tuple[Cell, int]:
return "DICTIADDB"
@asm(
input_order=("value", "index", "dict_", "key_len"),
name="idict_replace_builder?",
)
def idict_replace_builder_check(
self,
dict_: Cell,
key_len: int,
index: int,
value: Builder,
) -> tuple[Cell, int]:
return "DICTIREPLACEB"
@asm(out_order=(0, 2, 1, 3))
def udict_delete_get_min(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, int, Slice, int]:
return "DICTUREMMIN", "NULLSWAPIFNOT2"
@asm(out_order=(0, 2, 1, 3), name="~udict::delete_get_min")
def udict_delete_get_min_(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, tuple[int, Slice, int]]:
return "DICTUREMMIN", "NULLSWAPIFNOT2"
@asm(out_order=(0, 2, 1, 3))
def idict_delete_get_min(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, int, Slice, int]:
return "DICTIREMMIN", "NULLSWAPIFNOT2"
@asm(out_order=(0, 2, 1, 3), name="~idict::delete_get_min")
def idict_delete_get_min_(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, tuple[int, Slice, int]]:
return "DICTIREMMIN", "NULLSWAPIFNOT2"
@asm(out_order=(0, 2, 1, 3))
def dict_delete_get_min(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, Slice, Slice, int]:
return "DICTREMMIN", "NULLSWAPIFNOT2"
@asm(out_order=(0, 2, 1, 3), name="~dict::delete_get_min")
def dict_delete_get_min_(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, tuple[Slice, Slice, int]]:
return "DICTREMMIN", "NULLSWAPIFNOT2"
@asm(out_order=(0, 2, 1, 3))
def udict_delete_get_max(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, int, Slice, int]:
return "DICTUREMMAX", "NULLSWAPIFNOT2"
@asm(out_order=(0, 2, 1, 3), name="~udict::delete_get_max")
def udict_delete_get_max_(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, tuple[int, Slice, int]]:
return "DICTUREMMAX", "NULLSWAPIFNOT2"
@asm(out_order=(0, 2, 1, 3))
def idict_delete_get_max(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, int, Slice, int]:
return "DICTIREMMAX", "NULLSWAPIFNOT2"
@asm(out_order=(0, 2, 1, 3), name="~idict::delete_get_max")
def idict_delete_get_max_(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, tuple[int, Slice, int]]:
return "DICTIREMMAX", "NULLSWAPIFNOT2"
@asm(out_order=(0, 2, 1, 3))
def dict_delete_get_max(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, Slice, Slice, int]:
return "DICTREMMAX", "NULLSWAPIFNOT2"
@asm(out_order=(0, 2, 1, 3), name="~dict::delete_get_max")
def dict_delete_get_max_(
self,
dict_: Cell,
key_len: int,
) -> tuple[Cell, tuple[Slice, Slice, int]]:
return "DICTREMMAX", "NULLSWAPIFNOT2"
@asm(out_order=(1, 0, 2), name="udict_get_min?")
def udict_get_min_check(
self,
dict_: Cell,
key_len: int,
) -> tuple[int, Slice, int]:
return "DICTUMIN", "NULLSWAPIFNOT2"
@asm(out_order=(1, 0, 2), name="udict_get_max?")
def udict_get_max_check(
self,
dict_: Cell,
key_len: int,
) -> tuple[int, Slice, int]:
return "DICTUMAX", "NULLSWAPIFNOT2"
@asm(out_order=(1, 0, 2), name="udict_get_min_ref?")
def udict_get_min_ref_check(
self,
dict_: Cell,
key_len: int,
) -> tuple[int, Cell, int]:
return "DICTUMINREF", "NULLSWAPIFNOT2"
@asm(out_order=(1, 0, 2), name="udict_get_max_ref?")
def udict_get_max_ref_check(
self,
dict_: Cell,
key_len: int,
) -> tuple[int, Cell, int]:
return "DICTUMAXREF", "NULLSWAPIFNOT2"
@asm(out_order=(1, 0, 2), name="idict_get_min?")
def idict_get_min_check(
self,
dict_: Cell,
key_len: int,
) -> tuple[int, Slice, int]:
return "DICTIMIN", "NULLSWAPIFNOT2"
@asm(out_order=(1, 0, 2), name="idict_get_max?")
def idict_get_max_check(
self,
dict_: Cell,
key_len: int,
) -> tuple[int, Slice, int]:
return "DICTIMAX", "NULLSWAPIFNOT2"
@asm(out_order=(1, 0, 2), name="idict_get_min_ref?")
def idict_get_min_ref_check(
self,
dict_: Cell,
key_len: int,
) -> tuple[int, Cell, int]:
return "DICTIMINREF", "NULLSWAPIFNOT2"
@asm(out_order=(1, 0, 2), name="idict_get_max_ref?")
def idict_get_max_ref_check(
self,
dict_: Cell,
key_len: int,
) -> tuple[int, Cell, int]:
return "DICTIMAXREF", "NULLSWAPIFNOT2"
@asm(
input_order=("pivot", "dict_", "key_len"),
out_order=(1, 0, 2),
name="udict_get_next?",
)
def udict_get_next_check(
self,
dict_: Cell,
key_len: int,
pivot: int,
) -> tuple[int, Slice, int]:
return "DICTUGETNEXT", "NULLSWAPIFNOT2"
@asm(
input_order=("pivot", "dict_", "key_len"),
out_order=(1, 0, 2),
name="udict_get_nexteq?",
)
def udict_get_nexteq_check(
self,
dict_: Cell,
key_len: int,
pivot: int,
) -> tuple[int, Slice, int]:
return "DICTUGETNEXTEQ", "NULLSWAPIFNOT2"
@asm(
input_order=("pivot", "dict_", "key_len"),
out_order=(1, 0, 2),
name="udict_get_prev?",
)
def udict_get_prev_check(
self,
dict_: Cell,
key_len: int,
pivot: int,
) -> tuple[int, Slice, int]:
return "DICTUGETPREV", "NULLSWAPIFNOT2"
@asm(
input_order=("pivot", "dict_", "key_len"),
out_order=(1, 0, 2),
name="udict_get_preveq?",
)
def udict_get_preveq_check(
self,
dict_: Cell,
key_len: int,
pivot: int,
) -> tuple[int, Slice, int]:
return "DICTUGETPREVEQ", "NULLSWAPIFNOT2"
@asm(
input_order=("pivot", "dict_", "key_len"),
out_order=(1, 0, 2),
name="idict_get_next?",
)
def idict_get_next_check(
self,
dict_: Cell,
key_len: int,
pivot: int,
) -> tuple[int, Slice, int]:
return "DICTIGETNEXT", "NULLSWAPIFNOT2"
@asm(
input_order=("pivot", "dict_", "key_len"),
out_order=(1, 0, 2),
name="idict_get_nexteq?",
)
def idict_get_nexteq_check(
self,
dict_: Cell,
key_len: int,
pivot: int,
) -> tuple[int, Slice, int]:
return "DICTIGETNEXTEQ", "NULLSWAPIFNOT2"
@asm(
input_order=("pivot", "dict_", "key_len"),
out_order=(1, 0, 2),
name="idict_get_prev?",
)
def idict_get_prev_check(
self,
dict_: Cell,
key_len: int,
pivot: int,
) -> tuple[int, Slice, int]:
return "DICTIGETPREV", "NULLSWAPIFNOT2"
@asm(
input_order=("pivot", "dict_", "key_len"),
out_order=(1, 0, 2),
name="idict_get_preveq?",
)
def idict_get_preveq_check(
self,
dict_: Cell,
key_len: int,
pivot: int,
) -> tuple[int, Slice, int]:
return "DICTIGETPREVEQ", "NULLSWAPIFNOT2"
@asm()
def new_dict(self) -> Cell:
return "NEWDICT"
@asm(name="dict_empty?")
def dict_empty_check(self, c: Cell) -> int:
return "DICTEMPTY"
@asm(input_order=("key", "dict_", "key_len"), name="pfxdict_get?")
def pfxdict_get_check(
self,
dict_: Cell,
key_len: int,
key: Slice,
) -> tuple[Slice, Slice, Slice, int]:
return "PFXDICTGETQ", "NULLSWAPIFNOT2"
@asm(
input_order=("value", "key", "dict_", "key_len"),
name="pfxdict_set?",
)
def pfxdict_set_check(
self,
dict_: Cell,
key_len: int,
key: Slice,
value: Slice,
) -> tuple[Cell, int]:
return "PFXDICTSET"
@asm(input_order=("key", "dict_", "key_len"), name="pfxdict_delete?")
def pfxdict_delete_check(
self,
dict_: Cell,
key_len: int,
key: Slice,
) -> tuple[Cell, int]:
return "PFXDICTDEL"
@asm()
def config_param(self, x: int) -> Cell:
return "CONFIGOPTPARAM"
@asm(name="cell_null?")
def cell_null_check(self, c: Cell) -> int:
return "ISNULL"
@impure
@asm()
def raw_reserve(self, amount: int, mode: int) -> None:
return "RAWRESERVE"
@impure
@asm()
def raw_reserve_extra(
self,
amount: int,
extra_amount: Cell,
mode: int,
) -> None:
return "RAWRESERVEX"
@impure
@asm()
def send_raw_message(self, msg: Cell, mode: int) -> None:
return "SENDRAWMSG"
@impure
@asm()
def set_code(self, new_code: Cell) -> None:
return "SETCODE"
@impure
@asm()
def random(self) -> int:
return "RANDU256"
@impure
@asm()
def rand(self, range_: int) -> int:
return "RAND"
@impure
@asm()
def get_seed(self) -> int:
return "RANDSEED"
@impure
@asm()
def set_seed(self) -> int:
return "SETRAND"
@impure
@asm()
def randomize(self, x: int) -> None:
return "ADDRAND"
@impure
@asm()
def randomize_lt(self) -> None:
return "LTIME", "ADDRAND"
@asm(hide=True)
def store_coins(self, b: Builder, x: int) -> Builder:
return "STVARUINT16"
@asm(out_order=(1, 0), hide=True)
def load_coins(self, s: Slice) -> tuple[Slice, int]:
return "LDVARUINT16"
@asm()
def equal_slices(self, a: Slice, b: Slice) -> int:
return "SDEQ"
@asm(name="builder_null?")
def builder_null_check(self, b: Builder) -> int:
return "ISNULL"
@asm()
def store_builder(self, to: Builder, from_: Builder) -> Builder:
return "STBR"
std = Stdlib() | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/library/std.py | 0.631026 | 0.296193 | std.py | pypi |
from rift.ast import CallStacks
from rift.ast.types import Expr, Node, Statement
from rift.core.factory import Factory
from rift.core.invokable import Invokable
from rift.core.mark import mark
class Entity(Node):
__magic__ = 0x050794
N_ID = 0
def __init__(self, data=None, name=None) -> None:
super().__init__()
if data is None:
data = {}
self.data = data
self.NAMED = False
if name is not None:
self.NAMED = True
self.name = name
Entity.N_ID += 1
self.id = Entity.N_ID
self.assigned = False
self.has_expr = False
self.__used__ = False
def _binary(self, op, other, r=False):
mark(self, other)
e = Entity(
Expr.binary_op(
op,
other if r else self,
self if r else other,
type(self),
),
)
return e
def _unary(self, op):
mark(self)
e = Entity(Expr.unary_op(op, self, type(self)))
return e
def __eq__(self, other):
return self._binary("==", other)
def __neg__(self):
return self._unary("-")
def __ne__(self, other):
return self._binary("!=", other)
def __le__(self, other):
return self._binary("<=", other)
def __lt__(self, other):
return self._binary("<", other)
def __gt__(self, other):
return self._binary(">", other)
def __ge__(self, other):
return self._binary(">=", other)
def __add__(self, other):
return self._binary("+", other)
def __radd__(self, other):
return self._binary("+", other, r=True)
def __sub__(self, other):
return self._binary("-", other)
def __rsub__(self, other):
return self._binary("-", other, r=True)
def __truediv__(self, other):
return self._binary("/", other)
def __rtruediv__(self, other):
return self._binary("/", other, r=True)
def __mul__(self, other):
return self._binary("*", other)
def __rmul__(self, other):
return self._binary("*", other, r=True)
def __or__(self, other):
return self._binary("|", other)
def __ror__(self, other):
return self._binary("|", other, r=True)
def __and__(self, other):
return self._binary("&", other)
def __rand__(self, other):
return self._binary("&", other, r=True)
def __iadd__(self, other):
x = self._binary("+", other)
if self.NAMED:
x.__assign__(self.name)
return x
def __isub__(self, other):
x = self._binary("-", other)
if self.NAMED:
x.__assign__(self.name)
return x
def __imul__(self, other):
x = self._binary("*", other)
if self.NAMED:
x.__assign__(self.name)
return x
def __idiv__(self, other):
x = self._binary("/", other)
if self.NAMED:
x.__assign__(self.name)
return x
def __invert__(self):
return self._unary("~")
def __getattr__(self, item):
mark(self)
return Invokable(item, self)
def __str__(self):
return repr(self)
def __repr__(self):
if self.NAMED:
return self.name
return self._repr_()
def _repr_(self):
return repr(self.data)
def __assign__(self, v):
if self.NAMED:
t = type(self)
CallStacks.assign(v, Expr.variable(self.name, type_=t))
return t.abstract_init(name=v)
if self.has_expr:
_x = getattr(self, "__expr")
s: Statement = Node.find(_x)
s.args = (v, s.args[0])
s.type = Statement.ASSIGN
s.refresh()
else:
# TODO: Most likely this never occurs (cleanup)
CallStacks.assign(v, self.data)
self.NAMED = True
self.name = v
return self
def __massign__(self, vs, xs):
if self.has_expr:
_x = getattr(self, "__expr")
s: Statement = Node.find(_x)
if s.type == s.EXPR:
s.args = (vs, s.args[0])
s.type = Statement.M_ASSIGN
s.refresh()
else:
CallStacks.multi_assign(vs, self.data)
for x, v in zip(xs, vs):
x.NAMED = True
x.name = v
def __prep_unpack__(self, l_):
self._unpack_len = l_
def __iter__(self):
if hasattr(self, "__unpackable") and self.__unpackable:
for _ in range(self._unpack_len):
yield Entity()
def __rem_name__(self):
if self.NAMED:
return self.name
return None
@classmethod
def type_name(cls) -> str:
return "var"
@classmethod
def abstract_init(cls, *args, **kwargs) -> "Entity":
return cls(*args, **kwargs)
Factory.register("Entity", Entity) | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/core/entity.py | 0.467332 | 0.223282 | entity.py | pypi |
from typing import TYPE_CHECKING
from rift.core import Entity
from rift.fift.types._fift_base import _FiftBaseType
from rift.logging import log_system
from rift.runtime.config import Config
from rift.types.bases import Builder, Cell, Slice
from rift.types.utils import CachingSubscriptable
from rift.util.type_id import type_id
if TYPE_CHECKING:
from rift.types.payload import Payload
class Ref(metaclass=CachingSubscriptable):
bound: "Entity"
def __init__(self, bound) -> None:
self.bound = bound
@classmethod
def __serialize__(cls, to: "Builder", value: "Entity") -> "Builder":
if isinstance(value, Ref):
value = value.bound
if isinstance(value, Entity) or isinstance(value, _FiftBaseType):
b = to.ref(value)
elif hasattr(value, "__magic__") and value.__magic__ == 0xA935E5:
p: "Payload" = value
c = p.as_cell()
b = to.ref(c)
return b
base = cls.__basex__
if value is None:
# NOTE: Is this ideal behavior for a None ref ?! I think so
return to
if base == Cell:
b = to.ref(value)
elif hasattr(base, "__magic__") and base.__magic__ == 0xA935E5:
p: "Payload" = value
c = p.as_cell()
b = to.ref(c)
return b
@classmethod
def __deserialize__(
cls,
from_: "Slice",
name: str = None,
inplace: bool = True,
lazy: bool = True,
**kwargs,
):
base = cls.__basex__
if Config.mode.is_fift():
log_system(
"DE",
"[{name}] loading ref=>{base} [{lazy}] from=>{frm}",
name=name,
lazy=lazy,
base=base.__name__,
frm=from_._init_hash(),
)
if inplace:
v = from_.ref_()
else:
v = from_.ref()
if base == Cell:
if name is not None:
v.__assign__(name)
if hasattr(base, "__magic__") and base.__magic__ == 0xA935E5:
v = v.parse()
v = base.__deserialize__(v, name=name, lazy=lazy)
return v
@classmethod
def __predefine__(
cls,
name: str = None,
lazy: bool = True,
**kwargs,
):
if name is None:
return
base = cls.__basex__
if base == Cell:
Cell.__predefine__(name)
elif hasattr(base, "__magic__") and base.__magic__ == 0xA935E5:
base.__predefine__(name)
@classmethod
def type_name(cls) -> str:
base = cls.__basex__
return base.type_name()
@classmethod
def __build_type__(cls, item):
t = type(
"Ref_%s" % item.__name__,
(cls,),
{
"__basex__": item,
},
)
t.__type_id__ = type_id(t.__name__)
return t | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/types/ref.py | 0.78789 | 0.159185 | ref.py | pypi |
from rift.core import Entity
from rift.fift.fift import Fift
from rift.library import std
from rift.logging import log_system
from rift.runtime.config import Config
from rift.types.bases import Builder, Cell, Int, Slice, String
from rift.types.int_aliases import int8, integer, uint256
from rift.types.maybe import Maybe
from rift.types.payload import Payload
class MsgAddress(Slice):
class Std(Payload):
__tag__ = "$10"
anycast: Maybe[Cell]
workchain: int8
address: uint256
@classmethod
def __serialize__(cls, to: "Builder", value: "Entity") -> "Builder":
if isinstance(value, Int) or isinstance(value, integer):
b = type(value).__serialize__(to, value)
elif isinstance(value, str) and Config.mode.is_fift():
wc, addr, _, ok = Fift.exec("$>smca", value)
if not ok:
raise RuntimeError("Invalid addr!")
s = cls.std(wc, addr)
b = to.slice(s)
else:
b = to.slice(value)
return b
@classmethod
def __deserialize__(
cls,
from_: "Slice",
name: str = None,
inplace: bool = True,
lazy: bool = True,
**kwargs,
):
log_system(
"DE", "[{name}] loading address [{lazy}]", name=name, lazy=lazy
)
# TODO: HANDLE INPLACE STUFF
v = from_.addr_()
if name is not None:
v and v.__assign__(name)
return v
@classmethod
def std(cls, workchain: int, addr: uint256) -> Slice:
return (
cls.Std(
anycast=None,
workchain=workchain,
address=addr,
)
.as_cell()
.parse()
)
@classmethod
def human_readable(cls, addr: Slice, flags=0) -> str:
# We assume this is an standard one
# TODO: Copy stuff
s = Slice(__value__=addr.value)
s.uint_(3)
wc = s.uint_(8)
hash_ = s.uint_(256)
hr: String
(hr,) = s.cmd("smca>$", wc, hash_, flags)
return hr.value
@classmethod
def empty(cls) -> Slice:
if Config.mode.is_fift():
b = Builder()
else:
b = std.begin_cell()
b = b.uint(0, 2)
return b.end().parse() | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/types/addr.py | 0.508544 | 0.201322 | addr.py | pypi |
from copy import deepcopy
from rift.types.addr import MsgAddress
from rift.types.bases import Cell, Dict
from rift.types.bool import Bool
from rift.types.coin import Coin
from rift.types.either import Either
from rift.types.either_ref import EitherRef
from rift.types.int_aliases import uint5, uint32, uint64
from rift.types.maybe import Maybe
from rift.types.payload import Payload
from rift.types.ref import Ref
class TickTock(Payload):
tick: Bool
tock: Bool
class SimpleLib(Payload):
public: Bool
root: Ref[Cell]
pass
class StateInit(Payload):
split_depth: Maybe[uint5]
special: Maybe[TickTock]
code: Maybe[Ref[Cell]]
data: Maybe[Ref[Cell]]
library: Dict
pass
class CurrencyCollection(Payload):
amount: Coin
other: Dict
class InboundExternalMsgInfo(Payload):
__tag__ = "$10"
src: MsgAddress
dest: MsgAddress
import_fee: Coin
@classmethod
def build(
cls,
dest: MsgAddress,
src: MsgAddress = None,
import_fee: Coin = 0,
) -> "InternalMsgInfo":
if src is None:
src = MsgAddress.empty()
info = InboundExternalMsgInfo()
info.dest = dest
info.src = src
info.import_fee = import_fee
return info
class InternalMsgInfo(Payload):
__tag__ = "$0"
ihr_disabled: Bool
bounce: Bool
bounced: Bool
src: MsgAddress
dest: MsgAddress
value: CurrencyCollection
ihr_fee: Coin
fwd_fee: Coin
created_lt: uint64
created_at: uint32
@classmethod
def build(
cls,
dest: MsgAddress,
ihr_disabled: Bool = True,
bounce: Bool = True,
bounced: Bool = False,
src: MsgAddress = None,
amount: Coin = 0,
extra_currency: Dict = None,
ihr_fee: Coin = 0,
fwd_fee: Coin = 0,
created_lt: uint64 = 0,
created_at: uint32 = 0,
) -> "InternalMsgInfo":
if src is None:
src = MsgAddress.empty()
info = InternalMsgInfo()
info.dest = dest
info.ihr_disabled = ihr_disabled
info.bounce = bounce
info.bounced = bounced
info.src = src
info.value = CurrencyCollection()
info.value.amount = amount
info.value.other = extra_currency
info.ihr_fee = ihr_fee
info.fwd_fee = fwd_fee
info.created_lt = created_lt
info.created_at = created_at
return info
class InboundExtMsgInfo(Payload):
__tag__ = "$10"
src: MsgAddress
dest: MsgAddress
import_fee: Coin
class InternalMessage(Payload):
info: InternalMsgInfo
init: Maybe[EitherRef[StateInit]]
body: EitherRef[Cell]
@classmethod
def __build_type__(cls, item):
n_cls = deepcopy(cls)
n_cls.__annotations__["body"] = EitherRef[item]
return n_cls
@classmethod
def build(
cls,
dest: MsgAddress,
state_init: Maybe[EitherRef[StateInit]] = None,
body: EitherRef[Cell] = None,
ihr_disabled: Bool = 1,
bounce: Bool = 1,
bounced: Bool = 0,
src: MsgAddress = None,
amount: Coin = 0,
extra_currency: Dict = None,
ihr_fee: Coin = 0,
fwd_fee: Coin = 0,
created_lt: uint64 = 0,
created_at: uint32 = 0,
) -> "InternalMessage":
if src is None:
src = MsgAddress.empty()
msg = InternalMessage()
msg.info = InternalMsgInfo.build(
dest=dest,
ihr_disabled=ihr_disabled,
bounce=bounce,
bounced=bounced,
src=src,
amount=amount,
extra_currency=extra_currency,
ihr_fee=ihr_fee,
fwd_fee=fwd_fee,
created_lt=created_lt,
created_at=created_at,
)
msg.init = state_init
msg.body = body
return msg
def send(self, mode: int = 0, flags: int = 0):
c = self.as_cell()
c.send_raw_message(mode + flags)
pass
class ExternalMessage(Payload):
info: InboundExternalMsgInfo
init: Maybe[EitherRef[StateInit]]
body: EitherRef[Cell]
@classmethod
def __build_type__(cls, item):
n_cls = deepcopy(cls)
n_cls.__annotations__["body"] = EitherRef[item]
return n_cls
@classmethod
def build(
cls,
dest: MsgAddress,
src: MsgAddress = None,
state_init: Maybe[EitherRef[StateInit]] = None,
body: EitherRef[Cell] = None,
import_fee: Coin = 0,
) -> "ExternalMessage":
if src is None:
src = MsgAddress.empty()
msg = ExternalMessage()
msg.info = InboundExternalMsgInfo.build(
dest=dest,
import_fee=import_fee,
src=src,
)
msg.init = state_init
msg.body = body
return msg
def send(self, mode: int = 0, flags: int = 0):
c = self.as_cell()
c.send_raw_message(mode + flags)
pass
class MessageMode:
ORDINARY = 0
CARRY_REM_VALUE = 64
CARRY_ALL_BALANCE = 128
class MessageFlag:
FLAG_SEPERATE_FEE = 1
FLAG_IGNORE_ACTION_ERR = 2
FLAG_DESTROY_CONTRACT_ON_ZERO = 32 | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/types/msg.py | 0.587943 | 0.277085 | msg.py | pypi |
from rift.core.entity import Entity
from rift.library.std import std
from rift.runtime.config import Config
from rift.types.bases import Builder
class Model:
__magic__ = 0xBB10C0
_pointer: int
_skipped_ones: dict[str, Entity]
def __init__(self, **kwargs):
self.annotations = self.__annotations__
self._items = list(self.annotations.keys())
self._lazy = True
self._skipped_ones = {}
self._pointer = 0
self._has_data = False
self._build = False
if len(kwargs) != 0:
self._build = True
for k in self.annotations:
setattr(self, k, None)
for k in kwargs:
if k in self.annotations:
setattr(self, k, kwargs[k])
@classmethod
def from_slice(cls, data):
d = cls()
d.__data__ = data
d._has_data = True
d._pointer = -1
return d
def __getattr__(self, item):
# This gets called whenever item doesn't exist in data model
# So we'll check whether it's really from fields or not
# Purpose => Lazy Loading
if item not in self.annotations:
raise AttributeError(item)
if item in self._skipped_ones:
n = self._skipped_ones[item]
name = f"data_{item}"
return n.__assign__(name)
if not self._has_data and not self._build:
self._init_data()
if self._pointer == -1:
self._pointer = 0
# Strategy => Skip if not present
targets = self._items[self._pointer :]
for t in targets:
self._pointer += 1
v = self.annotations[t]
is_ = t == item
name = None
if is_:
name = f"data_{t}"
n = v.__deserialize__(
self.__data__,
name=name,
inplace=True,
lazy=True,
)
if is_:
setattr(self, t, n)
return n
else:
self._skipped_ones[t] = n
def load(self):
self.__predefine__("data")
data = std.get_data().parse()
data.__assign__("data")
for k, v in self.annotations.items():
name = f"data_{k}"
n = v.__deserialize__(data, name=name, inplace=True)
setattr(self, k, n)
def _init_data(self):
self.__data__ = std.get_data().parse()
self.__data__.__assign__("data")
self._has_data = True
def is_empty(self):
if not self._has_data:
self._init_data()
return self.__data__.bits_n() == 0
def as_cell(self):
if Config.mode.is_fift():
builder = Builder()
else:
builder = std.begin_cell()
for k, v in self.annotations.items():
c_v = getattr(self, k)
builder = v.__serialize__(builder, c_v)
cell = builder.end()
return cell
def save(self):
cell = self.as_cell()
cell.set_data()
def get(self, key):
data = std.get_data().parse()
data.__assign__("data")
for k, v in self.annotations.items():
target = k == key
res = v.__deserialize__(data, inplace=not target)
if target:
break
return res
def copy(self, reset=False):
# TODO: Better copy
cp = type(self)()
if not reset:
cp.__dict__ = {**self.__dict__}
cp._skipped_ones = {**cp._skipped_ones}
return cp
def __predefine__(
self,
name: str = None,
lazy: bool = True,
**kwargs,
):
if name is None:
return
if lazy and "target" in kwargs:
tg = kwargs["target"]
targets = {tg: self.annotations[tg]}
else:
targets = self.annotations
for k, v in targets.items():
v_name = f"{name}_{k}"
v.__predefine__(name=v_name, lazy=lazy, **kwargs) | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/types/model.py | 0.578329 | 0.163947 | model.py | pypi |
from rift.core import Entity
from rift.core.condition import Cond
from rift.library import std
from rift.logging import log_system
from rift.runtime.config import Config
from rift.types.bases import Builder, Cell, Slice
from rift.types.ref import Ref
from rift.types.type_helper import type_matches
from rift.types.utils import CachingSubscriptable
from rift.util.type_id import type_id
class EitherRef(metaclass=CachingSubscriptable):
which: Entity
bound: Entity
def __getattr__(self, item):
return getattr(self.bound, item)
def __assign__(self, name):
return self
def is_ref(self):
return self.which == 1
@classmethod
def __serialize__(cls, to: "Builder", value: "EitherRef") -> "Builder":
base1 = cls.__base1__
if value is None:
b = to.uint(0, 1)
return b
if not isinstance(value, EitherRef):
if type_matches(base1, type(value)):
v = 0
elif type_matches(Ref[base1], type(value)):
v = 1
elif type_matches(Cell, type(value)):
# NOTE: Is this a good approach?
v = 0
elif type_matches(Slice, type(value)):
# NOTE: Is this a good approach?
v = 0
elif type_matches(Ref[Cell], type(value)):
v = 1
else:
msg = "got {current} expected {e1} or {e2}"
msg = msg.format(current=type(value), e1=base1, e2=Ref[base1])
raise RuntimeError("Couldn't match either types; " + msg)
to = to.uint(v, 1)
return type(value).__serialize__(to, value)
to.__assign__("_b_tmp_")
with Cond() as c:
c.match(value.which)
b = to.uint(1, 1)
nb = std.begin_cell()
nb = base1.__serialize__(nb, value.bound)
nc = nb.end()
b = b.ref(nc)
b.__assign__("_b_tmp_")
c.otherwise()
b = to.uint(0, 1)
b = base1.__serialize__(b, value.bound)
b.__assign__("_b_tmp_")
return b
@classmethod
def __deserialize__(
cls,
from_: "Slice",
name: str = None,
inplace: bool = True,
lazy: bool = True,
**kwargs,
):
base1 = cls.__base1__
if inplace:
i = from_.uint_(1)
else:
i = from_.uint(1)
m = EitherRef()
m.which = i
if Config.mode.is_func():
m.which.__assign__(f"{name}_which")
Slice.__predefine__(f"{name}_slice")
first_name = from_.name
with Cond() as c:
c.match(i)
v = Ref[Cell].__deserialize__(from_)
x = v.parse().__assign__(f"{name}_slice")
c.otherwise()
x = from_.__assign__(f"{name}_slice")
d = base1.__deserialize__(
x,
name=name,
inplace=inplace,
lazy=lazy,
)
with Cond() as c:
c.match(i == 0)
x.__assign__(first_name)
m.bound = d
elif Config.mode.is_fift():
log_system(
"DE",
"[{name}] loading either ref={ref} base={base} [{lazy}] from={frm}",
name=name,
lazy=lazy,
ref=m.which != 0,
base=base1.__name__,
frm=from_._init_hash(),
)
if m.which == 0:
n = from_
else:
n = from_.ref_().parse()
d = base1.__deserialize__(
n, name=name, inplace=inplace, lazy=lazy
)
return d
return m
@classmethod
def __predefine__(
cls,
name: str = None,
lazy: bool = True,
**kwargs,
):
base1 = cls.__base1__
base1.__predefine__(name=name)
@classmethod
def __build_type__(cls, item):
base1 = item
t = type(
"EitherRef_%s" % (base1.__name__),
(cls,),
{
"__base1__": base1,
},
)
t.__type_id__ = type_id(t.__name__)
return t | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/types/either_ref.py | 0.531209 | 0.197503 | either_ref.py | pypi |
from typing import TYPE_CHECKING
from rift.fift.types.slice import Slice as FiftSlice
from rift.func.types.types import Slice as FunCSlice
from rift.meta.behaviors import stub
if TYPE_CHECKING:
from rift.func.types.types import Cont, Tuple
from rift.types.bases.cell import Cell
class Slice(FunCSlice + FiftSlice):
@stub
def coin(self) -> int:
pass
@stub
def uint_(self, bits: int) -> int:
pass
@stub
def uint(self, bits: int) -> int:
pass
@stub
def sint_(self, bits: int) -> int:
pass
@stub
def sint(self, bits: int) -> int:
pass
@stub
def hash(self) -> int:
pass
@stub
def string_hash(self) -> int:
pass
@stub
def check_signature(self, signature: "Slice", public_key: int) -> int:
pass
@stub
def compute_data_size(self, max_cells: int) -> tuple[int, int, int]:
pass
@stub
def bless(self) -> "Cont":
pass
@stub
def end_parse(self) -> None:
pass
@stub
def ref_(self) -> "Cell":
pass
@stub
def ref(self) -> "Cell":
pass
@stub
def bits_(self, len_: int) -> "Slice":
pass
@stub
def bits(self, len_: int) -> "Slice":
pass
@stub
def skip_n(self, len_: int) -> None:
pass
@stub
def skip_n_(self, len_: int) -> None:
pass
@stub
def first_bits(self, len_: int) -> "Slice":
pass
@stub
def skip_last_n(self, len_: int) -> None:
pass
@stub
def skip_last_n_(self, len_: int) -> None:
pass
@stub
def slice_last(self, len_: int) -> "Slice":
pass
@stub
def ldict_(self) -> "Cell":
pass
@stub
def ldict(self) -> "Cell":
pass
@stub
def skip_dict(self) -> None:
pass
@stub
def maybe_ref_(self) -> "Cell":
pass
@stub
def maybe_ref(self) -> "Cell":
pass
@stub
def refs_n(self) -> int:
pass
@stub
def bits_n(self) -> int:
pass
@stub
def bits_refs_n(self) -> tuple[int, int]:
pass
@stub
def is_empty(self) -> int:
pass
@stub
def is_data_empty(self) -> int:
pass
@stub
def are_refs_empty(self) -> int:
pass
@stub
def depth(self) -> int:
pass
@stub
def addr_(self) -> "Slice":
pass
@stub
def parse_addr(self) -> "Tuple":
pass
@stub
def parse_std_addr(self) -> tuple[int, int]:
pass
@stub
def parse_var_addr(self) -> tuple[int, "Slice"]:
pass
@stub
def is_equal(self, b: "Slice") -> int:
pass | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/types/bases/slice.py | 0.721939 | 0.467818 | slice.py | pypi |
words = [
"abandon",
"ability",
"able",
"about",
"above",
"absent",
"absorb",
"abstract",
"absurd",
"abuse",
"access",
"accident",
"account",
"accuse",
"achieve",
"acid",
"acoustic",
"acquire",
"across",
"act",
"action",
"actor",
"actress",
"actual",
"adapt",
"add",
"addict",
"address",
"adjust",
"admit",
"adult",
"advance",
"advice",
"aerobic",
"affair",
"afford",
"afraid",
"again",
"age",
"agent",
"agree",
"ahead",
"aim",
"air",
"airport",
"aisle",
"alarm",
"album",
"alcohol",
"alert",
"alien",
"all",
"alley",
"allow",
"almost",
"alone",
"alpha",
"already",
"also",
"alter",
"always",
"amateur",
"amazing",
"among",
"amount",
"amused",
"analyst",
"anchor",
"ancient",
"anger",
"angle",
"angry",
"animal",
"ankle",
"announce",
"annual",
"another",
"answer",
"antenna",
"antique",
"anxiety",
"any",
"apart",
"apology",
"appear",
"apple",
"approve",
"april",
"arch",
"arctic",
"area",
"arena",
"argue",
"arm",
"armed",
"armor",
"army",
"around",
"arrange",
"arrest",
"arrive",
"arrow",
"art",
"artefact",
"artist",
"artwork",
"ask",
"aspect",
"assault",
"asset",
"assist",
"assume",
"asthma",
"athlete",
"atom",
"attack",
"attend",
"attitude",
"attract",
"auction",
"audit",
"august",
"aunt",
"author",
"auto",
"autumn",
"average",
"avocado",
"avoid",
"awake",
"aware",
"away",
"awesome",
"awful",
"awkward",
"axis",
"baby",
"bachelor",
"bacon",
"badge",
"bag",
"balance",
"balcony",
"ball",
"bamboo",
"banana",
"banner",
"bar",
"barely",
"bargain",
"barrel",
"base",
"basic",
"basket",
"battle",
"beach",
"bean",
"beauty",
"because",
"become",
"beef",
"before",
"begin",
"behave",
"behind",
"believe",
"below",
"belt",
"bench",
"benefit",
"best",
"betray",
"better",
"between",
"beyond",
"bicycle",
"bid",
"bike",
"bind",
"biology",
"bird",
"birth",
"bitter",
"black",
"blade",
"blame",
"blanket",
"blast",
"bleak",
"bless",
"blind",
"blood",
"blossom",
"blouse",
"blue",
"blur",
"blush",
"board",
"boat",
"body",
"boil",
"bomb",
"bone",
"bonus",
"book",
"boost",
"border",
"boring",
"borrow",
"boss",
"bottom",
"bounce",
"box",
"boy",
"bracket",
"brain",
"brand",
"brass",
"brave",
"bread",
"breeze",
"brick",
"bridge",
"brief",
"bright",
"bring",
"brisk",
"broccoli",
"broken",
"bronze",
"broom",
"brother",
"brown",
"brush",
"bubble",
"buddy",
"budget",
"buffalo",
"build",
"bulb",
"bulk",
"bullet",
"bundle",
"bunker",
"burden",
"burger",
"burst",
"bus",
"business",
"busy",
"butter",
"buyer",
"buzz",
"cabbage",
"cabin",
"cable",
"cactus",
"cage",
"cake",
"call",
"calm",
"camera",
"camp",
"can",
"canal",
"cancel",
"candy",
"cannon",
"canoe",
"canvas",
"canyon",
"capable",
"capital",
"captain",
"car",
"carbon",
"card",
"cargo",
"carpet",
"carry",
"cart",
"case",
"cash",
"casino",
"castle",
"casual",
"cat",
"catalog",
"catch",
"category",
"cattle",
"caught",
"cause",
"caution",
"cave",
"ceiling",
"celery",
"cement",
"census",
"century",
"cereal",
"certain",
"chair",
"chalk",
"champion",
"change",
"chaos",
"chapter",
"charge",
"chase",
"chat",
"cheap",
"check",
"cheese",
"chef",
"cherry",
"chest",
"chicken",
"chief",
"child",
"chimney",
"choice",
"choose",
"chronic",
"chuckle",
"chunk",
"churn",
"cigar",
"cinnamon",
"circle",
"citizen",
"city",
"civil",
"claim",
"clap",
"clarify",
"claw",
"clay",
"clean",
"clerk",
"clever",
"click",
"client",
"cliff",
"climb",
"clinic",
"clip",
"clock",
"clog",
"close",
"cloth",
"cloud",
"clown",
"club",
"clump",
"cluster",
"clutch",
"coach",
"coast",
"coconut",
"code",
"coffee",
"coil",
"coin",
"collect",
"color",
"column",
"combine",
"come",
"comfort",
"comic",
"common",
"company",
"concert",
"conduct",
"confirm",
"congress",
"connect",
"consider",
"control",
"convince",
"cook",
"cool",
"copper",
"copy",
"coral",
"core",
"corn",
"correct",
"cost",
"cotton",
"couch",
"country",
"couple",
"course",
"cousin",
"cover",
"coyote",
"crack",
"cradle",
"craft",
"cram",
"crane",
"crash",
"crater",
"crawl",
"crazy",
"cream",
"credit",
"creek",
"crew",
"cricket",
"crime",
"crisp",
"critic",
"crop",
"cross",
"crouch",
"crowd",
"crucial",
"cruel",
"cruise",
"crumble",
"crunch",
"crush",
"cry",
"crystal",
"cube",
"culture",
"cup",
"cupboard",
"curious",
"current",
"curtain",
"curve",
"cushion",
"custom",
"cute",
"cycle",
"dad",
"damage",
"damp",
"dance",
"danger",
"daring",
"dash",
"daughter",
"dawn",
"day",
"deal",
"debate",
"debris",
"decade",
"december",
"decide",
"decline",
"decorate",
"decrease",
"deer",
"defense",
"define",
"defy",
"degree",
"delay",
"deliver",
"demand",
"demise",
"denial",
"dentist",
"deny",
"depart",
"depend",
"deposit",
"depth",
"deputy",
"derive",
"describe",
"desert",
"design",
"desk",
"despair",
"destroy",
"detail",
"detect",
"develop",
"device",
"devote",
"diagram",
"dial",
"diamond",
"diary",
"dice",
"diesel",
"diet",
"differ",
"digital",
"dignity",
"dilemma",
"dinner",
"dinosaur",
"direct",
"dirt",
"disagree",
"discover",
"disease",
"dish",
"dismiss",
"disorder",
"display",
"distance",
"divert",
"divide",
"divorce",
"dizzy",
"doctor",
"document",
"dog",
"doll",
"dolphin",
"domain",
"donate",
"donkey",
"donor",
"door",
"dose",
"double",
"dove",
"draft",
"dragon",
"drama",
"drastic",
"draw",
"dream",
"dress",
"drift",
"drill",
"drink",
"drip",
"drive",
"drop",
"drum",
"dry",
"duck",
"dumb",
"dune",
"during",
"dust",
"dutch",
"duty",
"dwarf",
"dynamic",
"eager",
"eagle",
"early",
"earn",
"earth",
"easily",
"east",
"easy",
"echo",
"ecology",
"economy",
"edge",
"edit",
"educate",
"effort",
"egg",
"eight",
"either",
"elbow",
"elder",
"electric",
"elegant",
"element",
"elephant",
"elevator",
"elite",
"else",
"embark",
"embody",
"embrace",
"emerge",
"emotion",
"employ",
"empower",
"empty",
"enable",
"enact",
"end",
"endless",
"endorse",
"enemy",
"energy",
"enforce",
"engage",
"engine",
"enhance",
"enjoy",
"enlist",
"enough",
"enrich",
"enroll",
"ensure",
"enter",
"entire",
"entry",
"envelope",
"episode",
"equal",
"equip",
"era",
"erase",
"erode",
"erosion",
"error",
"erupt",
"escape",
"essay",
"essence",
"estate",
"eternal",
"ethics",
"evidence",
"evil",
"evoke",
"evolve",
"exact",
"example",
"excess",
"exchange",
"excite",
"exclude",
"excuse",
"execute",
"exercise",
"exhaust",
"exhibit",
"exile",
"exist",
"exit",
"exotic",
"expand",
"expect",
"expire",
"explain",
"expose",
"express",
"extend",
"extra",
"eye",
"eyebrow",
"fabric",
"face",
"faculty",
"fade",
"faint",
"faith",
"fall",
"false",
"fame",
"family",
"famous",
"fan",
"fancy",
"fantasy",
"farm",
"fashion",
"fat",
"fatal",
"father",
"fatigue",
"fault",
"favorite",
"feature",
"february",
"federal",
"fee",
"feed",
"feel",
"female",
"fence",
"festival",
"fetch",
"fever",
"few",
"fiber",
"fiction",
"field",
"figure",
"file",
"film",
"filter",
"final",
"find",
"fine",
"finger",
"finish",
"fire",
"firm",
"first",
"fiscal",
"fish",
"fit",
"fitness",
"fix",
"flag",
"flame",
"flash",
"flat",
"flavor",
"flee",
"flight",
"flip",
"float",
"flock",
"floor",
"flower",
"fluid",
"flush",
"fly",
"foam",
"focus",
"fog",
"foil",
"fold",
"follow",
"food",
"foot",
"force",
"forest",
"forget",
"fork",
"fortune",
"forum",
"forward",
"fossil",
"foster",
"found",
"fox",
"fragile",
"frame",
"frequent",
"fresh",
"friend",
"fringe",
"frog",
"front",
"frost",
"frown",
"frozen",
"fruit",
"fuel",
"fun",
"funny",
"furnace",
"fury",
"future",
"gadget",
"gain",
"galaxy",
"gallery",
"game",
"gap",
"garage",
"garbage",
"garden",
"garlic",
"garment",
"gas",
"gasp",
"gate",
"gather",
"gauge",
"gaze",
"general",
"genius",
"genre",
"gentle",
"genuine",
"gesture",
"ghost",
"giant",
"gift",
"giggle",
"ginger",
"giraffe",
"girl",
"give",
"glad",
"glance",
"glare",
"glass",
"glide",
"glimpse",
"globe",
"gloom",
"glory",
"glove",
"glow",
"glue",
"goat",
"goddess",
"gold",
"good",
"goose",
"gorilla",
"gospel",
"gossip",
"govern",
"gown",
"grab",
"grace",
"grain",
"grant",
"grape",
"grass",
"gravity",
"great",
"green",
"grid",
"grief",
"grit",
"grocery",
"group",
"grow",
"grunt",
"guard",
"guess",
"guide",
"guilt",
"guitar",
"gun",
"gym",
"habit",
"hair",
"half",
"hammer",
"hamster",
"hand",
"happy",
"harbor",
"hard",
"harsh",
"harvest",
"hat",
"have",
"hawk",
"hazard",
"head",
"health",
"heart",
"heavy",
"hedgehog",
"height",
"hello",
"helmet",
"help",
"hen",
"hero",
"hidden",
"high",
"hill",
"hint",
"hip",
"hire",
"history",
"hobby",
"hockey",
"hold",
"hole",
"holiday",
"hollow",
"home",
"honey",
"hood",
"hope",
"horn",
"horror",
"horse",
"hospital",
"host",
"hotel",
"hour",
"hover",
"hub",
"huge",
"human",
"humble",
"humor",
"hundred",
"hungry",
"hunt",
"hurdle",
"hurry",
"hurt",
"husband",
"hybrid",
"ice",
"icon",
"idea",
"identify",
"idle",
"ignore",
"ill",
"illegal",
"illness",
"image",
"imitate",
"immense",
"immune",
"impact",
"impose",
"improve",
"impulse",
"inch",
"include",
"income",
"increase",
"index",
"indicate",
"indoor",
"industry",
"infant",
"inflict",
"inform",
"inhale",
"inherit",
"initial",
"inject",
"injury",
"inmate",
"inner",
"innocent",
"input",
"inquiry",
"insane",
"insect",
"inside",
"inspire",
"install",
"intact",
"interest",
"into",
"invest",
"invite",
"involve",
"iron",
"island",
"isolate",
"issue",
"item",
"ivory",
"jacket",
"jaguar",
"jar",
"jazz",
"jealous",
"jeans",
"jelly",
"jewel",
"job",
"join",
"joke",
"journey",
"joy",
"judge",
"juice",
"jump",
"jungle",
"junior",
"junk",
"just",
"kangaroo",
"keen",
"keep",
"ketchup",
"key",
"kick",
"kid",
"kidney",
"kind",
"kingdom",
"kiss",
"kit",
"kitchen",
"kite",
"kitten",
"kiwi",
"knee",
"knife",
"knock",
"know",
"lab",
"label",
"labor",
"ladder",
"lady",
"lake",
"lamp",
"language",
"laptop",
"large",
"later",
"latin",
"laugh",
"laundry",
"lava",
"law",
"lawn",
"lawsuit",
"layer",
"lazy",
"leader",
"leaf",
"learn",
"leave",
"lecture",
"left",
"leg",
"legal",
"legend",
"leisure",
"lemon",
"lend",
"length",
"lens",
"leopard",
"lesson",
"letter",
"level",
"liar",
"liberty",
"library",
"license",
"life",
"lift",
"light",
"like",
"limb",
"limit",
"link",
"lion",
"liquid",
"list",
"little",
"live",
"lizard",
"load",
"loan",
"lobster",
"local",
"lock",
"logic",
"lonely",
"long",
"loop",
"lottery",
"loud",
"lounge",
"love",
"loyal",
"lucky",
"luggage",
"lumber",
"lunar",
"lunch",
"luxury",
"lyrics",
"machine",
"mad",
"magic",
"magnet",
"maid",
"mail",
"main",
"major",
"make",
"mammal",
"man",
"manage",
"mandate",
"mango",
"mansion",
"manual",
"maple",
"marble",
"march",
"margin",
"marine",
"market",
"marriage",
"mask",
"mass",
"master",
"match",
"material",
"math",
"matrix",
"matter",
"maximum",
"maze",
"meadow",
"mean",
"measure",
"meat",
"mechanic",
"medal",
"media",
"melody",
"melt",
"member",
"memory",
"mention",
"menu",
"mercy",
"merge",
"merit",
"merry",
"mesh",
"message",
"metal",
"method",
"middle",
"midnight",
"milk",
"million",
"mimic",
"mind",
"minimum",
"minor",
"minute",
"miracle",
"mirror",
"misery",
"miss",
"mistake",
"mix",
"mixed",
"mixture",
"mobile",
"model",
"modify",
"mom",
"moment",
"monitor",
"monkey",
"monster",
"month",
"moon",
"moral",
"more",
"morning",
"mosquito",
"mother",
"motion",
"motor",
"mountain",
"mouse",
"move",
"movie",
"much",
"muffin",
"mule",
"multiply",
"muscle",
"museum",
"mushroom",
"music",
"must",
"mutual",
"myself",
"mystery",
"myth",
"naive",
"name",
"napkin",
"narrow",
"nasty",
"nation",
"nature",
"near",
"neck",
"need",
"negative",
"neglect",
"neither",
"nephew",
"nerve",
"nest",
"net",
"network",
"neutral",
"never",
"news",
"next",
"nice",
"night",
"noble",
"noise",
"nominee",
"noodle",
"normal",
"north",
"nose",
"notable",
"note",
"nothing",
"notice",
"novel",
"now",
"nuclear",
"number",
"nurse",
"nut",
"oak",
"obey",
"object",
"oblige",
"obscure",
"observe",
"obtain",
"obvious",
"occur",
"ocean",
"october",
"odor",
"off",
"offer",
"office",
"often",
"oil",
"okay",
"old",
"olive",
"olympic",
"omit",
"once",
"one",
"onion",
"online",
"only",
"open",
"opera",
"opinion",
"oppose",
"option",
"orange",
"orbit",
"orchard",
"order",
"ordinary",
"organ",
"orient",
"original",
"orphan",
"ostrich",
"other",
"outdoor",
"outer",
"output",
"outside",
"oval",
"oven",
"over",
"own",
"owner",
"oxygen",
"oyster",
"ozone",
"pact",
"paddle",
"page",
"pair",
"palace",
"palm",
"panda",
"panel",
"panic",
"panther",
"paper",
"parade",
"parent",
"park",
"parrot",
"party",
"pass",
"patch",
"path",
"patient",
"patrol",
"pattern",
"pause",
"pave",
"payment",
"peace",
"peanut",
"pear",
"peasant",
"pelican",
"pen",
"penalty",
"pencil",
"people",
"pepper",
"perfect",
"permit",
"person",
"pet",
"phone",
"photo",
"phrase",
"physical",
"piano",
"picnic",
"picture",
"piece",
"pig",
"pigeon",
"pill",
"pilot",
"pink",
"pioneer",
"pipe",
"pistol",
"pitch",
"pizza",
"place",
"planet",
"plastic",
"plate",
"play",
"please",
"pledge",
"pluck",
"plug",
"plunge",
"poem",
"poet",
"point",
"polar",
"pole",
"police",
"pond",
"pony",
"pool",
"popular",
"portion",
"position",
"possible",
"post",
"potato",
"pottery",
"poverty",
"powder",
"power",
"practice",
"praise",
"predict",
"prefer",
"prepare",
"present",
"pretty",
"prevent",
"price",
"pride",
"primary",
"print",
"priority",
"prison",
"private",
"prize",
"problem",
"process",
"produce",
"profit",
"program",
"project",
"promote",
"proof",
"property",
"prosper",
"protect",
"proud",
"provide",
"public",
"pudding",
"pull",
"pulp",
"pulse",
"pumpkin",
"punch",
"pupil",
"puppy",
"purchase",
"purity",
"purpose",
"purse",
"push",
"put",
"puzzle",
"pyramid",
"quality",
"quantum",
"quarter",
"question",
"quick",
"quit",
"quiz",
"quote",
"rabbit",
"raccoon",
"race",
"rack",
"radar",
"radio",
"rail",
"rain",
"raise",
"rally",
"ramp",
"ranch",
"random",
"range",
"rapid",
"rare",
"rate",
"rather",
"raven",
"raw",
"razor",
"ready",
"real",
"reason",
"rebel",
"rebuild",
"recall",
"receive",
"recipe",
"record",
"recycle",
"reduce",
"reflect",
"reform",
"refuse",
"region",
"regret",
"regular",
"reject",
"relax",
"release",
"relief",
"rely",
"remain",
"remember",
"remind",
"remove",
"render",
"renew",
"rent",
"reopen",
"repair",
"repeat",
"replace",
"report",
"require",
"rescue",
"resemble",
"resist",
"resource",
"response",
"result",
"retire",
"retreat",
"return",
"reunion",
"reveal",
"review",
"reward",
"rhythm",
"rib",
"ribbon",
"rice",
"rich",
"ride",
"ridge",
"rifle",
"right",
"rigid",
"ring",
"riot",
"ripple",
"risk",
"ritual",
"rival",
"river",
"road",
"roast",
"robot",
"robust",
"rocket",
"romance",
"roof",
"rookie",
"room",
"rose",
"rotate",
"rough",
"round",
"route",
"royal",
"rubber",
"rude",
"rug",
"rule",
"run",
"runway",
"rural",
"sad",
"saddle",
"sadness",
"safe",
"sail",
"salad",
"salmon",
"salon",
"salt",
"salute",
"same",
"sample",
"sand",
"satisfy",
"satoshi",
"sauce",
"sausage",
"save",
"say",
"scale",
"scan",
"scare",
"scatter",
"scene",
"scheme",
"school",
"science",
"scissors",
"scorpion",
"scout",
"scrap",
"screen",
"script",
"scrub",
"sea",
"search",
"season",
"seat",
"second",
"secret",
"section",
"security",
"seed",
"seek",
"segment",
"select",
"sell",
"seminar",
"senior",
"sense",
"sentence",
"series",
"service",
"session",
"settle",
"setup",
"seven",
"shadow",
"shaft",
"shallow",
"share",
"shed",
"shell",
"sheriff",
"shield",
"shift",
"shine",
"ship",
"shiver",
"shock",
"shoe",
"shoot",
"shop",
"short",
"shoulder",
"shove",
"shrimp",
"shrug",
"shuffle",
"shy",
"sibling",
"sick",
"side",
"siege",
"sight",
"sign",
"silent",
"silk",
"silly",
"silver",
"similar",
"simple",
"since",
"sing",
"siren",
"sister",
"situate",
"six",
"size",
"skate",
"sketch",
"ski",
"skill",
"skin",
"skirt",
"skull",
"slab",
"slam",
"sleep",
"slender",
"slice",
"slide",
"slight",
"slim",
"slogan",
"slot",
"slow",
"slush",
"small",
"smart",
"smile",
"smoke",
"smooth",
"snack",
"snake",
"snap",
"sniff",
"snow",
"soap",
"soccer",
"social",
"sock",
"soda",
"soft",
"solar",
"soldier",
"solid",
"solution",
"solve",
"someone",
"song",
"soon",
"sorry",
"sort",
"soul",
"sound",
"soup",
"source",
"south",
"space",
"spare",
"spatial",
"spawn",
"speak",
"special",
"speed",
"spell",
"spend",
"sphere",
"spice",
"spider",
"spike",
"spin",
"spirit",
"split",
"spoil",
"sponsor",
"spoon",
"sport",
"spot",
"spray",
"spread",
"spring",
"spy",
"square",
"squeeze",
"squirrel",
"stable",
"stadium",
"staff",
"stage",
"stairs",
"stamp",
"stand",
"start",
"state",
"stay",
"steak",
"steel",
"stem",
"step",
"stereo",
"stick",
"still",
"sting",
"stock",
"stomach",
"stone",
"stool",
"story",
"stove",
"strategy",
"street",
"strike",
"strong",
"struggle",
"student",
"stuff",
"stumble",
"style",
"subject",
"submit",
"subway",
"success",
"such",
"sudden",
"suffer",
"sugar",
"suggest",
"suit",
"summer",
"sun",
"sunny",
"sunset",
"super",
"supply",
"supreme",
"sure",
"surface",
"surge",
"surprise",
"surround",
"survey",
"suspect",
"sustain",
"swallow",
"swamp",
"swap",
"swarm",
"swear",
"sweet",
"swift",
"swim",
"swing",
"switch",
"sword",
"symbol",
"symptom",
"syrup",
"system",
"table",
"tackle",
"tag",
"tail",
"talent",
"talk",
"tank",
"tape",
"target",
"task",
"taste",
"tattoo",
"taxi",
"teach",
"team",
"tell",
"ten",
"tenant",
"tennis",
"tent",
"term",
"test",
"text",
"thank",
"that",
"theme",
"then",
"theory",
"there",
"they",
"thing",
"this",
"thought",
"three",
"thrive",
"throw",
"thumb",
"thunder",
"ticket",
"tide",
"tiger",
"tilt",
"timber",
"time",
"tiny",
"tip",
"tired",
"tissue",
"title",
"toast",
"tobacco",
"today",
"toddler",
"toe",
"together",
"toilet",
"token",
"tomato",
"tomorrow",
"tone",
"tongue",
"tonight",
"tool",
"tooth",
"top",
"topic",
"topple",
"torch",
"tornado",
"tortoise",
"toss",
"total",
"tourist",
"toward",
"tower",
"town",
"toy",
"track",
"trade",
"traffic",
"tragic",
"train",
"transfer",
"trap",
"trash",
"travel",
"tray",
"treat",
"tree",
"trend",
"trial",
"tribe",
"trick",
"trigger",
"trim",
"trip",
"trophy",
"trouble",
"truck",
"true",
"truly",
"trumpet",
"trust",
"truth",
"try",
"tube",
"tuition",
"tumble",
"tuna",
"tunnel",
"turkey",
"turn",
"turtle",
"twelve",
"twenty",
"twice",
"twin",
"twist",
"two",
"type",
"typical",
"ugly",
"umbrella",
"unable",
"unaware",
"uncle",
"uncover",
"under",
"undo",
"unfair",
"unfold",
"unhappy",
"uniform",
"unique",
"unit",
"universe",
"unknown",
"unlock",
"until",
"unusual",
"unveil",
"update",
"upgrade",
"uphold",
"upon",
"upper",
"upset",
"urban",
"urge",
"usage",
"use",
"used",
"useful",
"useless",
"usual",
"utility",
"vacant",
"vacuum",
"vague",
"valid",
"valley",
"valve",
"van",
"vanish",
"vapor",
"various",
"vast",
"vault",
"vehicle",
"velvet",
"vendor",
"venture",
"venue",
"verb",
"verify",
"version",
"very",
"vessel",
"veteran",
"viable",
"vibrant",
"vicious",
"victory",
"video",
"view",
"village",
"vintage",
"violin",
"virtual",
"virus",
"visa",
"visit",
"visual",
"vital",
"vivid",
"vocal",
"voice",
"void",
"volcano",
"volume",
"vote",
"voyage",
"wage",
"wagon",
"wait",
"walk",
"wall",
"walnut",
"want",
"warfare",
"warm",
"warrior",
"wash",
"wasp",
"waste",
"water",
"wave",
"way",
"wealth",
"weapon",
"wear",
"weasel",
"weather",
"web",
"wedding",
"weekend",
"weird",
"welcome",
"west",
"wet",
"whale",
"what",
"wheat",
"wheel",
"when",
"where",
"whip",
"whisper",
"wide",
"width",
"wife",
"wild",
"will",
"win",
"window",
"wine",
"wing",
"wink",
"winner",
"winter",
"wire",
"wisdom",
"wise",
"wish",
"witness",
"wolf",
"woman",
"wonder",
"wood",
"wool",
"word",
"work",
"world",
"worry",
"worth",
"wrap",
"wreck",
"wrestle",
"wrist",
"write",
"wrong",
"yard",
"year",
"yellow",
"you",
"young",
"youth",
"zebra",
"zero",
"zone",
"zoo",
] | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/keys/mnemonic/bip39/english.py | 0.45302 | 0.597784 | english.py | pypi |
import base64
import json
import os
from hashlib import sha256
from os import path
from typing import Union
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from rift.fift.types.builder import Builder
from rift.fift.types.bytes import Bytes
from rift.fift.types.cell import Cell
from rift.keys.key_pair import KeyPair
from rift.runtime.config import Config
from rift.types.payload import Payload
class KeyStore:
_global_ks: "KeyStore" = None
_key: Fernet
secure: bool
pair: KeyPair
def __init__(self) -> None:
pass
@classmethod
def initialize(cls):
ks = cls._read_keystore()
if ks is None:
print("No keystore found!")
print("Please select one of following options:")
print("\t[1] Import your mnemonics")
print("\t[2] Generate new mnemonics")
print("\t[3] Import your 32-byte private key as a hex string")
print("\t[4] Import your 32-byte private key as a base64 string")
if Config.TEST:
choice = 2
else:
choice = int(input(":"))
if choice == 1:
mnemonics = input(
"Please provide your mnemonics as a space separated string:",
)
kp = KeyPair(mnemonic=mnemonics)
elif choice == 2:
kp = KeyPair()
print("Please save the following mnemonic for later imports!")
print("!--- CAUTION: YOU WON'T BE ABLE TO GET IT LATER ---!")
print("24 Secret Words:")
print(" ".join(kp.mnem))
elif choice == 3:
pk = input(
"Please provide your 32-byte private key as a hex string:",
)
kp = KeyPair(priv_key=pk)
elif choice == 4:
pk = input(
"Please provide your 32-byte private key as a base64 string:",
)
kp = KeyPair(priv_key=pk, encoding="base64")
else:
raise RuntimeError("Invalid choice! Valid range [1-4]")
if Config.TEST:
secure = False
else:
secure = input(
"Would you like to secure keystore with custom password (we'll ask on every run)? [Y/n]: ",
)
secure = secure.lower() == "y"
if secure:
pass_ = input("Please input a memorable password: ")
else:
pass_ = None
ks = KeyStore()
ks._key = cls._fernet_key(pass_)
ks.pair = kp
ks.secure = secure
if not Config.TEST:
cls._write_keystore(ks)
print("Configuration done successfully!")
cls._global_ks = ks
@classmethod
def _read_keystore(cls):
ks_f = path.join(Config.dirs.user_data_dir, "keys/.keystore")
if path.exists(ks_f):
with open(ks_f, "r") as f:
data = json.loads(f.read())
if data["sc"]:
pass_ = input("Please provide your keystore password:")
else:
pass_ = None
ks = KeyStore()
ks._key = cls._fernet_key(pass_)
pk = bytes.fromhex(data["pk"])
pk = ks._key.decrypt(pk)
ks.pair = KeyPair(priv_key=pk.hex())
return ks
return None
@classmethod
def _write_keystore(cls, ks: "KeyStore"):
d = {
"sc": ks.secure,
"pk": ks._key.encrypt(bytes(ks.pair.priv_key)).hex(),
}
ks_f = path.join(Config.dirs.user_data_dir, "keys/.keystore")
os.makedirs(path.dirname(ks_f), exist_ok=True)
with open(ks_f, "w") as f:
data = json.dumps(d, indent=4)
f.write(data)
@classmethod
def _fernet_key(cls, password: str = None):
if password is None:
password = "rift-key-store!"
password = password.encode("utf-8")
salt = sha256(password).digest()[:16]
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=390000,
)
key = base64.urlsafe_b64encode(kdf.derive(password))
f = Fernet(key)
return f
@classmethod
def sign(
cls,
data: Union["Bytes", "Cell", "Payload"],
hash_bytes=False,
) -> "Bytes":
if not cls._global_ks:
cls.initialize()
return cls._global_ks.pair.sign(data, hash_bytes)
@classmethod
def sign_pack(
cls,
data: Union["Bytes", "Cell", "Payload"],
hash_bytes=False,
) -> "Cell":
sig = cls.sign(data, hash_bytes=hash_bytes)
b = Builder()
b.call_("B,", sig)
if isinstance(data, Bytes):
b.call_("B,", data)
elif isinstance(data, Payload):
b = b.builder(data.as_builder())
elif isinstance(data, Cell):
b = b.slice(data.parse())
return b.end()
@classmethod
def public_key(cls, bytes_=False) -> int:
if not cls._global_ks:
cls.initialize()
if bytes_:
return cls._global_ks.pair.pub_key
return cls._global_ks.pair.pub_key.call("B>u@", 256)[0]
@classmethod
def override(cls, private_hex: str):
"""
This function overrides a private key in the running session
Useful For Tests!
"""
kp = KeyPair(priv_key=private_hex, encoding="hex")
ks = KeyStore()
ks.pair = kp
cls._global_ks = ks | /rift_framework-1.0.0rc1-py3-none-any.whl/rift/runtime/keystore.py | 0.575469 | 0.182098 | keystore.py | pypi |
import codecs
import json
from hashlib import sha256
from math import ceil
from bitarray import bitarray
from crcset import crc32c
reach_boc_magic_prefix = b"\xB5\xEE\x9C\x72"
lean_boc_magic_prefix = b"\x68\xff\x65\xf3"
lean_boc_magic_prefix_crc = b"\xac\xc3\xa7\x28"
class CellData:
def __init__(self, max_length=1023):
self.data = bitarray()
self.max_length = max_length
def put_bool(self, element):
if not (self.max_length is None):
if len(self.data) >= self.max_length:
raise Exception("Cell overflow")
self.data.append(element)
def put_arbitrary_uint(self, uint, bitsize):
if bitsize <= 0 or (2**bitsize - 1 < uint):
raise Exception(
"Not enough bits (%d) to encode integer (%d)" % (bitsize, uint)
)
for i in range(bitsize, 0, -1):
k = 2 ** (i - 1)
if uint // k == 1:
self.put_bool(1)
uint -= k
else:
self.put_bool(0)
def put_uint8(self, uint):
self.put_arbitrary_uint(uint, 8)
def put_bytes(self, _bytes):
for byte in _bytes:
self.put_uint8(byte)
def put_arbitrary_int(self, _int, bitsize):
if bitsize == 1:
if _int in [0, -1]:
self.put_bool(_int == -1)
return
else:
raise Exception(
"Not enough bits (%d) to encode integer (%d)" % (bitsize, _int)
)
if _int < 0:
self.put_bool(1)
s = 2 ** (bitsize - 1)
self.put_arbitrary_uint(s - _int, bitsize - 1)
else:
self.put_bool(0)
self.put_arbitrary_uint(_int, bitsize - 1)
def concatenate(self, another_cell_data):
if self.length() + another_cell_data.length() > 1023:
raise Exception(
"Not enough bits to concantenate cells: %d + %d"
% (self.length(), another_cell_data.length())
)
self.data.extend(another_cell_data.data)
def top_up(self):
d_len = len(self.data)
additional_bits = ceil(d_len / 8) - d_len // 8
if ceil(d_len / 8) == 128:
additional_bits -= 1
for i in range(additional_bits):
if i == 0:
self.put_bool(1)
else:
self.put_bool(0)
def copy(self):
cd = CellData()
cd.data = bitarray(self.data)
return cd
def length(self):
return len(self.data)
def top_upped_bytes(self):
t = self.copy()
t.top_up()
return t.data.tobytes()
def from_bytes(self, data, top_upped=False):
self.data = bitarray()
self.data.frombytes(data)
if top_upped:
x = self.data.pop()
while not x:
x = self.data.pop()
def __eq__(self, another_cell_data):
return (self.data.tobytes() == another_cell_data.data.tobytes()) and (
self.length() == another_cell_data.length()
)
def __len__(self):
return self.length()
def __repr__(self):
if self.length() % 8:
x = self.copy()
x.top_up()
return "%s_" % (x.data.tobytes())
else:
return "%s" % (self.data.tobytes())
class Cell:
def __init__(self):
self.data = CellData()
self.refs = []
self.special = False
def level(self):
if self.is_special():
raise NotImplementedError(
"Calculating level not implemented for special cells"
)
max_level = 0
for k in self.refs:
if k.level() > max_level:
max_level = k.level()
return max_level
def is_special(self):
return self.special
def is_explicitly_stored_hashes(self):
return 0
def depth(self):
max_depth = 0
if len(self.refs) > 0:
for k in self.refs:
if k.depth() > max_depth:
max_depth = k.depth()
max_depth = max_depth + 1
return max_depth
def encoded_depth(self):
return (self.depth() // 256).to_bytes(1, "big") + (self.depth() % 256).to_bytes(
1, "big"
)
def concatenate(self, another_cell):
self.data.concatenate(another_cell.data)
self.refs = self.refs + another_cell.refs
def refs_descriptor(self):
return (len(self.refs) + self.is_special() * 8 + self.level() * 32).to_bytes(
1, "big"
)
def bits_descriptor(self):
return ((len(self.data) // 8) + ceil(len(self.data) / 8)).to_bytes(1, "big")
def data_with_descriptors(self):
return (
self.refs_descriptor()
+ self.bits_descriptor()
+ self.data.top_upped_bytes()
)
def repr(self):
ret = self.data_with_descriptors()
for k in self.refs:
ret += k.encoded_depth()
for k in self.refs:
ret += k.hash()
return ret
def hash(self):
hasher = sha256()
hasher.update(self.repr())
return hasher.digest()
def serialize_for_boc(self, cells_index, ref_size):
# This is not serialization of the cell as boc with this cell as root_cell
# it is serialization of the cell to be used in boc serialization
ret = self.data_with_descriptors()
if self.is_explicitly_stored_hashes():
raise NotImplementedError("Do not support explicitly stored hashes yet")
for k in self.refs:
ref_hash = k.hash()
ref_index_int = cells_index[ref_hash]
ref_index_hex = f"{ref_index_int:x}"
if len(ref_index_hex) % 2:
ref_index_hex = "0" + ref_index_hex
reference = bytes.fromhex(ref_index_hex)
ret += reference
return ret
def serialize_for_boc_size(self, cells_index, ref_size):
return len(self.serialize_for_boc(cells_index, ref_size))
def build_indexes(self):
def move_to_end(index_hashmap, topological_order_array, target):
target_index = index_hashmap[target]
for hash in index_hashmap:
if index_hashmap[hash] > target_index:
index_hashmap[hash] -= 1
index_hashmap[target] = len(topological_order_array) - 1
data = topological_order_array[target_index]
topological_order_array.append(data)
for subcell in data[1].refs:
index_hashmap, topological_order_array = move_to_end(
index_hashmap, topological_order_array, subcell.hash()
)
return index_hashmap, topological_order_array
def tree_walk(cell, topological_order_array, index_hashmap, parent_hash=None):
cell_hash = cell.hash()
if cell_hash in index_hashmap:
if parent_hash:
if index_hashmap[parent_hash] > index_hashmap[cell_hash]:
index_hashmap, topological_order_array = move_to_end(
index_hashmap, topological_order_array, cell_hash
)
return topological_order_array, index_hashmap
index_hashmap[cell_hash] = len(topological_order_array)
topological_order_array.append((cell_hash, cell))
for subcell in cell.refs:
topological_order_array, index_hashmap = tree_walk(
subcell, topological_order_array, index_hashmap, cell_hash
)
return topological_order_array, index_hashmap
return tree_walk(self, [], {})
def serialize_boc(
self, has_idx=True, hash_crc32=True, has_cache_bits=False, flags=0
):
# This is serialization of the cell to boc as root_cell
topological_order, index_hashmap = self.build_indexes()
cells_num = len(topological_order)
s = (
cells_num.bit_length()
) # Minimal number of bits to represent reference (unused?)
s_bytes = max(ceil(s / 8), 1)
full_size = 0
cell_sizes = {}
for (_hash, subcell) in topological_order:
cell_sizes[_hash] = subcell.serialize_for_boc_size(index_hashmap, s_bytes)
full_size += cell_sizes[_hash]
offset_bits = full_size.bit_length() # Minimal number of bits to encode offset
offset_bytes = max(ceil(offset_bits / 8), 1)
# has_idx 1bit, hash_crc32 1bit, has_cache_bits 1bit, flags 2bit,
# s_bytes 3 bit
serialization = CellData(max_length=None)
serialization.put_bytes(reach_boc_magic_prefix)
serialization.put_arbitrary_uint(bool(has_idx), 1)
serialization.put_arbitrary_uint(bool(hash_crc32), 1)
serialization.put_arbitrary_uint(bool(has_cache_bits), 1)
serialization.put_arbitrary_uint(flags, 2)
serialization.put_arbitrary_uint(s_bytes, 3)
serialization.put_uint8(offset_bytes)
serialization.put_arbitrary_uint(cells_num, s_bytes * 8)
serialization.put_arbitrary_uint(1, s_bytes * 8) # One root for now
serialization.put_arbitrary_uint(0, s_bytes * 8) # Complete BOCs only
serialization.put_arbitrary_uint(full_size, offset_bytes * 8)
serialization.put_arbitrary_uint(0, s_bytes * 8) # Root should have index 0
if has_idx:
for (_hash, subcell) in topological_order:
serialization.put_arbitrary_uint(cell_sizes[_hash], offset_bytes * 8)
for (_hash, subcell) in topological_order:
refcell_ser = subcell.serialize_for_boc(index_hashmap, offset_bytes)
for byte in refcell_ser:
serialization.put_uint8(byte)
ser_arr = serialization.top_upped_bytes()
if hash_crc32:
ser_arr += crc32c(ser_arr).to_bytes(4, "little")
return ser_arr
def copy(self):
ret = Cell()
ret.data = self.data.copy()
ret.refs = self.refs.copy()
ret.special = self.special
return ret
def __repr__(self):
return "<Cell refs_num: %d, data: %s>" % (len(self.refs), repr(self.data))
def serialize_to_object(self):
ret = {"data": {"b64": b"", "len": 0}, "refs": [], "special": False}
for r in self.refs:
ret["refs"].append(r.serialize_to_object())
ret["data"]["b64"] = codecs.decode(
codecs.encode(self.data.data.tobytes(), "base64"), "utf8"
).replace("\n", "")
ret["data"]["len"] = len(self.data)
ret["special"] = self.is_special()
return ret
def serialize_to_json(self):
return json.dumps(self.serialize_to_object())
def __eq__(self, another_cell):
if not len(self.refs) == len(another_cell.refs):
return False
for i in range(len(self.refs)):
if not self.refs[i] == another_cell.refs[i]:
return False
return self.data == another_cell.data
def test_boc_serialization():
c0 = Cell()
res = c0.serialize_boc(has_idx=False)
reference_serialization_0 = bytes.fromhex("B5EE9C724101010100020000004CACB9CD")
assert res == reference_serialization_0, "Wrong empty cell boc-serialization"
c1 = Cell()
c1.data.put_uint8(0)
res = c1.serialize_boc(has_idx=False)
reference_serialization_1 = bytes.fromhex("B5EE9C7241010101000300000200D367DC41")
assert res == reference_serialization_1, "Wrong <b 0 8 u, b> cell boc-serialization"
c1 = Cell()
c2 = Cell()
c1.data.put_uint8(0)
c2.data.put_uint8(73)
c1.refs.append(c2)
res = c1.serialize_boc(has_idx=False)
reference_serialization_2 = bytes.fromhex(
"B5EE9C72410102010007000102000100024995C5FE15"
)
assert (
res == reference_serialization_2
), "Wrong '<b 0 8 u, <b 73 8 u, b> ref, b>' cell boc-serialization"
def parse_flags(serialization):
header_byte, serialization = serialization[0], serialization[1:]
has_idx, hash_crc32, has_cache_bits = (
header_byte & 128,
header_byte & 64,
header_byte & 32,
)
header_byte %= 32
flags, size_bytes = header_byte >> 3, header_byte % 8
return (has_idx, hash_crc32, has_cache_bits, flags, size_bytes), serialization
def deserialize_cell_data(ser, index_size):
d1, d2, ser = ser[0], ser[1], ser[2:]
_, d1 = (d1 // 32), d1 % 32
h, d1 = (d1 // 16), d1 % 16
if h > 0:
raise NotImplementedError(
"Cell with explicit hash references are not supported yet",
)
s, r = (d1 // 8), d1 % 8
if r > 4:
raise NotImplementedError(
"Cell with explicit hash references are not supported yet (r>4)"
)
if d2 % 2:
data_size = (d2 + 1) // 2
not_full = True
else:
data_size = d2 // 2
not_full = False
cell_data, ser = ser[:data_size], ser[data_size:]
c = Cell()
c.special = s > 0
c.data.from_bytes(cell_data, top_upped=not_full)
for i in range(r):
ref_index, ser = int.from_bytes(ser[:index_size], "big"), ser[index_size:]
c.refs.append(ref_index)
return c, ser
def substitute_indexes_with_cells(cells):
for cell in cells[::-1]:
for i, r in enumerate(cell.refs):
cell.refs[i] = cells[r]
return cells
def deserialize_boc(boc):
bocs_prefixes = [
reach_boc_magic_prefix,
lean_boc_magic_prefix,
lean_boc_magic_prefix_crc,
]
prefix, boc = boc[:4], boc[4:]
assert prefix in bocs_prefixes, "Unknown boc prefix"
if prefix == reach_boc_magic_prefix:
(has_idx, hash_crc32, has_cache_bits, flags, size_bytes), boc = parse_flags(boc)
root_list = True
elif prefix == lean_boc_magic_prefix:
(has_idx, hash_crc32, has_cache_bits, flags, size_bytes), boc = (
1,
0,
0,
0,
boc[0],
), boc[1:]
root_list = False
elif prefix == lean_boc_magic_prefix_crc:
(has_idx, hash_crc32, has_cache_bits, flags, size_bytes), boc = (
1,
1,
0,
0,
boc[0],
), boc[1:]
root_list = False
off_bytes, boc = boc[0], boc[1:]
cells_num, boc = int.from_bytes(boc[0:size_bytes], "big"), boc[size_bytes:]
roots_num, boc = int.from_bytes(boc[0:size_bytes], "big"), boc[size_bytes:]
absent_num, boc = int.from_bytes(boc[0:size_bytes], "big"), boc[size_bytes:]
assert absent_num == 0
tot_cells_size, boc = int.from_bytes(boc[0:off_bytes], "big"), boc[off_bytes:]
if root_list:
if roots_num > 1:
raise NotImplementedError("Only 1 root supported for now (%d)" % roots_num)
roots_indexes = []
for i in range(roots_num):
ri, boc = int.from_bytes(boc[0:size_bytes], "big"), boc[size_bytes:]
roots_indexes.append(ri)
else:
roots_indexes = [0]
if has_idx:
offsets = []
for i in range(cells_num):
o, boc = int.from_bytes(boc[0:off_bytes], "big"), boc[off_bytes:]
offsets.append(o)
cells = []
for i in range(cells_num):
unfinished_cell, boc = deserialize_cell_data(boc, size_bytes)
cells.append(unfinished_cell)
cells = substitute_indexes_with_cells(cells)
# TODO hash_crc32?
return cells[0]
def deserialize_cell_from_object(data):
cell = Cell()
b64 = data["data"]["b64"]
cell.data.from_bytes(codecs.decode(codecs.encode(b64, "utf8"), "base64"))
cell.data.data = cell.data.data[: data["data"]["len"]]
for r in data["refs"]:
cell.refs.append(deserialize_cell_from_object(r))
cell.special = data["special"]
return cell
def deserialize_cell_from_json(json_data):
return deserialize_cell_from_object(json.loads(json_data))
def test_boc_deserialization():
c1 = Cell()
c2 = Cell()
c3 = Cell()
c4 = Cell()
c1.data.put_arbitrary_uint(2**25, 26)
c2.data.put_arbitrary_uint(2**37, 38)
c3.data.put_arbitrary_uint(2**41, 42)
c4.data.put_arbitrary_uint(2**44 - 2, 44)
c2.refs.append(c3)
c1.refs.append(c2)
c1.refs.append(c4)
serialized_c1 = c1.serialize_boc(has_idx=False)
dc1 = deserialize_boc(serialized_c1)
assert dc1.data == c1.data
assert dc1.refs[0].data == c2.data
assert dc1.refs[1].data == c4.data
assert dc1.refs[0].refs[0].data == c3.data
def Slice(Cell):
def __repr__(self):
return "<Slice refs_num: %d, data: %s>" % (len(self.refs), repr(self.data))
def __init__(self, cell):
self.data = cell.data.copy()
self.refs = cell.refs.copy() | /rift-tonlib-0.0.3.tar.gz/rift-tonlib-0.0.3/rift_tonlib/types/cell.py | 0.503662 | 0.234385 | cell.py | pypi |
import codecs
from .cell import Slice, deserialize_boc, deserialize_cell_from_json
def render_tvm_element(element_type, element):
if element_type in ["num", "number", "int"]:
element = str(int(str(element), 0))
return {
"@type": "tvm.stackEntryNumber",
"number": {"@type": "tvm.numberDecimal", "number": element},
}
elif element_type == "cell":
element = deserialize_cell_from_json(element)
element_data = codecs.decode(
codecs.encode(element.serialize_boc(has_idx=False), "base64"), "utf-8"
).replace("\n", "")
return {
"@type": "tvm.stackEntryCell",
"cell": {"@type": "tvm.Cell", "bytes": element_data},
}
elif element_type == "slice":
element = deserialize_cell_from_json(element)
element_data = codecs.decode(
codecs.encode(element.serialize_boc(has_idx=False), "base64"), "utf-8"
).replace("\n", "")
return {
"@type": "tvm.stackEntrySlice",
"slice": {"@type": "tvm.Slice", "bytes": element_data},
}
elif element_type == "tvm.Slice":
return {
"@type": "tvm.stackEntrySlice",
"slice": {"@type": "tvm.Slice", "bytes": element},
}
elif element_type == "tvm.Cell":
return {
"@type": "tvm.stackEntryCell",
"cell": {"@type": "tvm.Cell", "bytes": element},
}
else:
raise NotImplementedError()
def render_tvm_stack(stack_data):
"""
Elements like that are expected:
[["num", 300], ["cell", "0x"], ["dict", {...}]]
Currently only "num", "cell" and "slice" are supported.
To be implemented:
T: "list", "tuple", "num", "cell", "slice", "dict", "list"
"""
stack = []
for t in stack_data:
stack.append(render_tvm_element(*t))
return stack
def serialize_tvm_element(t):
if "@type" not in t:
raise Exception("Not TVM stack element")
if t["@type"] == "tvm.stackEntryNumber":
return ["num", hex(int(t["number"]["number"]))]
elif t["@type"] == "tvm.stackEntrySlice":
data = codecs.encode(t["cell"]["bytes"], "utf8")
data = codecs.decode(data, "base64")
s = Slice(deserialize_boc(data))
return [
"cell",
{"bytes": t["cell"]["bytes"], "object": s.serialize_to_object()},
]
elif t["@type"] == "tvm.stackEntryCell":
data = codecs.encode(t["cell"]["bytes"], "utf8")
data = codecs.decode(data, "base64")
cell = deserialize_boc(data)
return [
"cell",
{"bytes": t["cell"]["bytes"], "object": cell.serialize_to_object()},
]
elif t["@type"] == "tvm.stackEntryTuple":
return ["tuple", t["tuple"]]
elif t["@type"] == "tvm.stackEntryList":
return ["list", t["list"]]
else:
raise Exception("Unknown type")
def serialize_tvm_stack(tvm_stack):
stack = []
for t in tvm_stack:
stack.append(serialize_tvm_element(t))
return stack | /rift-tonlib-0.0.3.tar.gz/rift-tonlib-0.0.3/rift_tonlib/types/stack_utils.py | 0.425367 | 0.535766 | stack_utils.py | pypi |
import asyncio
import base64
import codecs
import functools
import logging
import struct
from functools import wraps
from crcset import crc16xmodem
logger = logging.getLogger(__name__)
def b64str_to_bytes(b64str):
b64bytes = codecs.encode(b64str, "utf8")
return codecs.decode(b64bytes, "base64")
def b64str_to_hex(b64str):
_bytes = b64str_to_bytes(b64str)
_hex = codecs.encode(_bytes, "hex")
return codecs.decode(_hex, "utf8")
def hex_to_b64str(x):
return codecs.encode(codecs.decode(x, "hex"), "base64").decode().replace("\n", "")
def hash_to_hex(b64_or_hex_hash):
"""
Detect encoding of transactions hash and if necessary convert it to hex.
"""
if len(b64_or_hex_hash) == 44:
# Hash is base64
return b64str_to_hex(b64_or_hex_hash)
if len(b64_or_hex_hash) == 64:
# Hash is hex
return b64_or_hex_hash
raise ValueError("Invalid hash")
def pubkey_b64_to_hex(b64_key):
"""
Convert tonlib's pubkey in format f'I{"H"*16}' i.e. prefix:key to upperhex filename as it stored in keystore
:param b64_key: base64 encoded 36 bytes of public key
:return:
"""
bin_key = base64.b64decode(b64_key)
words = 18
ints_key = struct.unpack(f'{"H"*words}', bin_key)
key = [x.to_bytes(2, byteorder="little") for x in ints_key]
key = b"".join(key)
key = [
((x & 0x0F) << 4 | (x & 0xF0) >> 4).to_bytes(1, byteorder="little") for x in key
]
name = b"".join(key)
return name.hex().upper()
def parallelize(f):
@functools.wraps(f)
def wrapper(self, *args, **kwds):
if self._style == "futures":
return self._executor.submit(f, self, *args, **kwds)
if self._style == "asyncio":
loop = asyncio.get_event_loop()
return loop.run_in_executor(
self._executor, functools.partial(f, self, *args, **kwds)
)
raise RuntimeError(self._style)
return wrapper
def coro_result(coro):
return asyncio.get_event_loop().run_until_complete(coro)
def raw_to_userfriendly(address, tag=0x11):
workchain_id, key = address.split(":")
workchain_id = int(workchain_id)
key = bytearray.fromhex(key)
short_ints = [j * 256 + i for i, j in zip(*[iter(key)] * 2)]
payload = struct.pack(f'Bb{"H"*16}', tag, workchain_id, *short_ints)
crc = crc16xmodem(payload)
e_key = payload + struct.pack(">H", crc)
return base64.urlsafe_b64encode(e_key).decode("utf-8")
def userfriendly_to_raw(address):
k = base64.urlsafe_b64decode(address)[1:34]
workchain_id = struct.unpack("b", k[:1])[0]
key = k[1:].hex().upper()
return f"{workchain_id}:{key}"
def str_b64encode(s):
return (
base64.b64encode(s.encode("utf-8")).decode("utf-8")
if s and isinstance(s, str)
else None
)
# repeat
def retry_async(repeats=3, last_archval=False, raise_error=True):
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
result = None
exception = None
for i in range(repeats):
try:
kwargs_loc = kwargs.copy()
if i == repeats - 1 and last_archval:
logger.info("Retry with archival node")
kwargs_loc["archival"] = True
result = await func(*args, **kwargs_loc)
exception = None
except Exception as ee:
logger.warning(f"Retry. Attempt {i+1}")
exception = ee
if exception is not None and raise_error:
raise exception
return result
# end def
return wrapper
return decorator | /rift-tonlib-0.0.3.tar.gz/rift-tonlib-0.0.3/rift_tonlib/utils/common.py | 0.499512 | 0.286568 | common.py | pypi |
import codecs
import json
import math
from hashlib import sha256
from bitarray import bitarray
from bitarray.util import ba2hex, ba2int
from rift_tonlib.types.cell import Cell, deserialize_boc
from rift_tonlib.types.dict_utils import parse_hashmap
class Slice:
def __init__(self, cell: Cell):
self._data = cell.data.data
self._data_offset = 0
self._refs = cell.refs
self._refs_offset = 0
def prefetch_next(self, bits_count: int):
return self._data[self._data_offset : self._data_offset + bits_count]
def read_next(self, bits_count: int):
result = self._data[self._data_offset : self._data_offset + bits_count]
self._data_offset += bits_count
return result
def read_next_ref(self):
cell = self._refs[self._refs_offset]
self._refs_offset += 1
return Slice(cell)
def read_uint(self, bits_count: int):
return ba2int(self.read_next(bits_count), signed=False)
def read_var_uint(self, max_len: int):
"""
var_uint$_ {n:#} len:(#< n) value:(uint (len * 8))
= VarUInteger n;
"""
header_bits = math.ceil(math.log2(max_len))
uint_len = ba2int(self.read_next(header_bits), signed=False)
if uint_len == 0:
return 0
return ba2int(self.read_next(uint_len * 8), signed=False)
def bits_left(self):
return len(self._data) - self._data_offset
def refs_left(self):
return len(self._refs) - self._refs_offset
def raise_if_not_empty(self):
assert (
self.bits_left() == 0
), f"Parsing error - slice has {self.bits_left()} unread bits left."
assert (
self.refs_left() == 0
), f"Parsing error - slice has {self.refs_left()} unread refs left."
class CurrencyCollection:
"""
nanograms$_ amount:(VarUInteger 16) = Grams;
extra_currencies$_ dict:(HashmapE 32 (VarUInteger 32))
= ExtraCurrencyCollection;
currencies$_ grams:Grams other:ExtraCurrencyCollection
= CurrencyCollection;
"""
def __init__(self, slice: Slice):
self.grams = slice.read_var_uint(16)
extra_currency_collection_empty = slice.read_next(1)
if extra_currency_collection_empty == bitarray("1"):
extra_currency_collection = slice.read_next_ref() # TODO: parse hashmap
class TrStoragePhase:
"""
tr_phase_storage$_ storage_fees_collected:Grams
storage_fees_due:(Maybe Grams)
status_change:AccStatusChange
= TrStoragePhase;
"""
def __init__(self, cell_slice: Slice):
self.storage_fees_collected = cell_slice.read_var_uint(16)
self.storage_fees_due = (
cell_slice.read_var_uint(16) if cell_slice.read_next(1).any() else None
)
account_status_change = cell_slice.read_next(1)
if account_status_change == bitarray("0"):
self.status_change = "acst_unchanged"
else:
account_status_change += cell_slice.read_next(1)
if account_status_change == bitarray("10"):
self.status_change = "acst_frozen"
else:
self.status_change = "acst_deleted"
class TrCreditPhase:
"""
tr_phase_credit$_ due_fees_collected:(Maybe Grams)
credit:CurrencyCollection = TrCreditPhase;
"""
def __init__(self, cell_slice: Slice):
self.due_fees_collected = (
cell_slice.read_var_uint(16) if cell_slice.read_next(1).any() else None
)
self.credit = CurrencyCollection(cell_slice)
class TrComputePhase:
"""
tr_phase_compute_skipped$0 reason:ComputeSkipReason
= TrComputePhase;
tr_phase_compute_vm$1 success:Bool msg_state_used:Bool
account_activated:Bool gas_fees:Grams
^[ gas_used:(VarUInteger 7)
gas_limit:(VarUInteger 7) gas_credit:(Maybe (VarUInteger 3))
mode:int8 exit_code:int32 exit_arg:(Maybe int32)
vm_steps:uint32
vm_init_state_hash:bits256 vm_final_state_hash:bits256 ]
= TrComputePhase;
cskip_no_state$00 = ComputeSkipReason;
cskip_bad_state$01 = ComputeSkipReason;
cskip_no_gas$10 = ComputeSkipReason;
"""
def __init__(self, cell_slice: Slice):
if cell_slice.read_next(1).any():
self.type = "tr_phase_compute_vm"
self.success = cell_slice.read_next(1).any()
self.msg_state_used = cell_slice.read_next(1).any()
self.account_activated = cell_slice.read_next(1).any()
self.gas_fees = cell_slice.read_var_uint(16)
subcell_slice = cell_slice.read_next_ref()
self.gas_used = subcell_slice.read_var_uint(7)
self.gas_limit = subcell_slice.read_var_uint(7)
self.gas_credit = (
subcell_slice.read_var_uint(3)
if subcell_slice.read_next(1).any()
else None
)
self.mode = ba2int(subcell_slice.read_next(8), signed=True)
self.exit_code = ba2int(subcell_slice.read_next(32), signed=True)
self.exit_arg = (
ba2int(subcell_slice.read_next(32), signed=True)
if subcell_slice.read_next(1).any()
else None
)
self.vm_steps = ba2int(subcell_slice.read_next(32), signed=False)
self.vm_init_state_hash = ba2hex(subcell_slice.read_next(256))
self.vm_final_state_hash = ba2hex(subcell_slice.read_next(256))
assert subcell_slice.bits_left() == 0
else:
self.type = "tr_phase_compute_skipped"
reason = cell_slice.read_next(2)
if reason == bitarray("00"):
self.reason = "cskip_no_state"
elif reason == bitarray("01"):
self.reason = "cskip_bad_state"
elif reason == bitarray("10"):
self.reason = "cskip_no_gas"
class StorageUsedShort:
"""
storage_used_short$_ cells:(VarUInteger 7)
bits:(VarUInteger 7) = StorageUsedShort;
"""
def __init__(self, cell_slice: Slice):
self.cells = cell_slice.read_var_uint(7)
self.bits = cell_slice.read_var_uint(7)
class TrActionPhase:
"""
tr_phase_action$_ success:Bool valid:Bool no_funds:Bool
status_change:AccStatusChange
total_fwd_fees:(Maybe Grams) total_action_fees:(Maybe Grams)
result_code:int32 result_arg:(Maybe int32) tot_actions:uint16
spec_actions:uint16 skipped_actions:uint16 msgs_created:uint16
action_list_hash:bits256 tot_msg_size:StorageUsedShort
= TrActionPhase;
"""
def __init__(self, cell_slice: Slice):
self.success = cell_slice.read_next(1).any()
self.valid = cell_slice.read_next(1).any()
self.no_funds = cell_slice.read_next(1).any()
account_status_change = cell_slice.read_next(1)
if account_status_change == bitarray("0"):
self.status_change = "acst_unchanged"
else:
account_status_change += cell_slice.read_next(1)
if account_status_change == bitarray("10"):
self.status_change = "acst_frozen"
else:
self.status_change = "acst_deleted"
self.total_fwd_fees = (
cell_slice.read_var_uint(16) if cell_slice.read_next(1).any() else None
)
self.total_action_fees = (
cell_slice.read_var_uint(16) if cell_slice.read_next(1).any() else None
)
self.result_code = ba2int(cell_slice.read_next(32), signed=True)
self.result_arg = (
ba2int(cell_slice.read_next(32), signed=True)
if cell_slice.read_next(1).any()
else None
)
self.tot_actions = ba2int(cell_slice.read_next(16), signed=False)
self.spec_actions = ba2int(cell_slice.read_next(16), signed=False)
self.skipped_actions = ba2int(cell_slice.read_next(16), signed=False)
self.msgs_created = ba2int(cell_slice.read_next(16), signed=False)
self.action_list_hash = ba2hex(cell_slice.read_next(256))
self.tot_msg_size = StorageUsedShort(cell_slice)
class TrBouncePhase:
"""
tr_phase_bounce_negfunds$00 = TrBouncePhase;
tr_phase_bounce_nofunds$01 msg_size:StorageUsedShort
req_fwd_fees:Grams = TrBouncePhase;
tr_phase_bounce_ok$1 msg_size:StorageUsedShort
msg_fees:Grams fwd_fees:Grams = TrBouncePhase;
"""
def __init__(self, cell_slice: Slice):
prefix = cell_slice.read_next(1)
if prefix == bitarray("1"):
self.type = "tr_phase_bounce_ok"
self.msg_size = StorageUsedShort(cell_slice)
self.msg_fees = cell_slice.read_var_uint(16)
self.fwd_fees = cell_slice.read_var_uint(16)
else:
prefix += cell_slice.read_next(1)
if prefix == bitarray("00"):
self.type = "tr_phase_bounce_negfunds"
else:
self.type = "tr_phase_bounce_nofunds"
self.msg_size = StorageUsedShort(cell_slice)
self.req_fwd_fees = cell_slice.read_var_uint(16)
class SplitMergeInfo:
"""
split_merge_info$_ cur_shard_pfx_len:(## 6)
acc_split_depth:(## 6) this_addr:bits256 sibling_addr:bits256
= SplitMergeInfo;
"""
def __init__(self, cell_slice: Slice):
self.cur_shard_pfx_len = ba2int(cell_slice.read_next(6), signed=False)
self.acc_split_depth = ba2int(cell_slice.read_next(6), signed=False)
self.this_addr = ba2hex(cell_slice.read_next(256))
self.sibling_addr = ba2hex(cell_slice.read_next(256))
class TransactionDescr:
"""
trans_ord$0000 credit_first:Bool
storage_ph:(Maybe TrStoragePhase)
credit_ph:(Maybe TrCreditPhase)
compute_ph:TrComputePhase action:(Maybe ^TrActionPhase)
aborted:Bool bounce:(Maybe TrBouncePhase)
destroyed:Bool
= TransactionDescr;
trans_storage$0001 storage_ph:TrStoragePhase
= TransactionDescr;
trans_tick_tock$001 is_tock:Bool storage_ph:TrStoragePhase
compute_ph:TrComputePhase action:(Maybe ^TrActionPhase)
aborted:Bool destroyed:Bool = TransactionDescr;
trans_split_prepare$0100 split_info:SplitMergeInfo
storage_ph:(Maybe TrStoragePhase)
compute_ph:TrComputePhase action:(Maybe ^TrActionPhase)
aborted:Bool destroyed:Bool
= TransactionDescr;
trans_split_install$0101 split_info:SplitMergeInfo
prepare_transaction:^Transaction
installed:Bool = TransactionDescr;
trans_merge_prepare$0110 split_info:SplitMergeInfo
storage_ph:TrStoragePhase aborted:Bool
= TransactionDescr;
trans_merge_install$0111 split_info:SplitMergeInfo
prepare_transaction:^Transaction
storage_ph:(Maybe TrStoragePhase)
credit_ph:(Maybe TrCreditPhase)
compute_ph:TrComputePhase action:(Maybe ^TrActionPhase)
aborted:Bool destroyed:Bool
= TransactionDescr;
"""
def __init__(self, cell_slice: Slice):
prefix = cell_slice.read_next(3)
if prefix == bitarray("001"):
self._init_tick_tock(cell_slice)
else:
prefix += cell_slice.read_next(1)
if prefix == bitarray("0000"):
self._init_ord(cell_slice)
elif prefix == bitarray("0001"):
self._init_storage(cell_slice)
elif prefix == bitarray("0100"):
self._init_split_prepare(cell_slice)
elif prefix == bitarray("0110"):
self._init_merge_prepare(cell_slice)
elif prefix == bitarray("0111"):
self._init_merge_install(cell_slice)
def _init_ord(self, cell_slice: Slice):
self.type = "trans_ord"
self.credit_first = cell_slice.read_next(1).any()
self.storage_ph = (
TrStoragePhase(cell_slice) if cell_slice.read_next(1).any() else None
)
self.credit_ph = (
TrCreditPhase(cell_slice) if cell_slice.read_next(1).any() else None
)
self.compute_ph = TrComputePhase(cell_slice)
self.action = (
TrActionPhase(cell_slice.read_next_ref())
if cell_slice.read_next(1).any()
else None
)
self.aborted = cell_slice.read_next(1).any()
self.bounce = (
TrBouncePhase(cell_slice) if cell_slice.read_next(1).any() else None
)
self.destroyed = cell_slice.read_next(1).any()
def _init_storage(self, cell_slice: Slice):
self.type = "trans_storage"
self.storage_ph = TrStoragePhase(cell_slice)
def _init_tick_tock(self, cell_slice: Slice):
self.type = "trans_tick_tock"
self.is_tock = cell_slice.read_next(1).any()
self.storage_ph = TrStoragePhase(cell_slice)
self.compute_ph = TrComputePhase(cell_slice)
self.action = (
TrActionPhase(cell_slice.read_next_ref())
if cell_slice.read_next(1).any()
else None
)
self.aborted = cell_slice.read_next(1).any()
self.destroyed = cell_slice.read_next(1).any()
def _init_split_prepare(self, cell_slice: Slice):
self.type = "trans_split_prepare"
self.split_info = SplitMergeInfo(cell_slice)
self.storage_ph = (
TrStoragePhase(cell_slice) if cell_slice.read_next(1).any() else None
)
self.compute_ph = TrComputePhase(cell_slice)
self.action = (
TrActionPhase(cell_slice.read_next_ref())
if cell_slice.read_next(1).any()
else None
)
self.aborted = cell_slice.read_next(1).any()
self.destroyed = cell_slice.read_next(1).any()
def _init_merge_prepare(self, cell_slice: Slice):
self.type = "trans_merge_prepare"
self.split_info = SplitMergeInfo(cell_slice)
self.storage_ph = TrStoragePhase(cell_slice)
self.aborted = cell_slice.read_next(1).any()
def _init_merge_install(self, cell_slice: Slice):
self.type = "trans_merge_install"
self.split_info = SplitMergeInfo(cell_slice)
self.prepare_transaction = Transaction(cell_slice.read_next_ref())
self.storage_ph = (
TrStoragePhase(cell_slice) if cell_slice.read_next(1).any() else None
)
self.credit_ph = (
TrCreditPhase(cell_slice) if cell_slice.read_next(1).any() else None
)
self.compute_ph = TrComputePhase(cell_slice)
self.action = (
TrActionPhase(cell_slice.read_next_ref())
if cell_slice.read_next(1).any()
else None
)
self.aborted = cell_slice.read_next(1).any()
self.destroyed = cell_slice.read_next(1).any()
class AccountStatus:
"""
acc_state_uninit$00 = AccountStatus;
acc_state_frozen$01 = AccountStatus;
acc_state_active$10 = AccountStatus;
acc_state_nonexist$11 = AccountStatus;
"""
def __init__(self, cell_slice):
prefix = cell_slice.read_next(2)
if prefix == bitarray("00"):
self.type = "acc_state_uninit"
elif prefix == bitarray("01"):
self.type = "acc_state_frozen"
elif prefix == bitarray("10"):
self.type = "acc_state_active"
elif prefix == bitarray("11"):
self.type = "acc_state_nonexist"
class Transaction:
"""
transaction$0111 account_addr:bits256 lt:uint64
prev_trans_hash:bits256 prev_trans_lt:uint64 now:uint32
outmsg_cnt:uint15
orig_status:AccountStatus end_status:AccountStatus
^[ in_msg:(Maybe ^(Message Any)) out_msgs:(HashmapE 15 ^(Message Any)) ]
total_fees:CurrencyCollection state_update:^(HASH_UPDATE Account)
description:^TransactionDescr = Transaction;
"""
def __init__(self, cell_slice):
prefix = cell_slice.read_next(4)
if prefix != bitarray("0111"):
raise ValueError(f"Transaction must have prefix 0111 (but has {prefix})")
self.account_addr = ba2hex(cell_slice.read_next(256))
self.lt = ba2int(cell_slice.read_next(64), signed=False)
self.prev_trans_hash = ba2hex(cell_slice.read_next(256))
self.prev_trans_lt = ba2int(cell_slice.read_next(64), signed=False)
self.now = ba2int(cell_slice.read_next(32), signed=False)
self.outmsg_cnt = ba2int(cell_slice.read_next(15), signed=False)
self.orig_status = AccountStatus(cell_slice)
self.end_status = AccountStatus(cell_slice)
messages = cell_slice.read_next_ref() # TODO: parse messages
self.total_fees = CurrencyCollection(cell_slice)
state_update = cell_slice.read_next_ref() # TODO: parse state update
description_cell_slice = cell_slice.read_next_ref()
self.description = TransactionDescr(description_cell_slice)
description_cell_slice.raise_if_not_empty()
def parse_transaction(b64_tx_data: str) -> dict:
transaction_boc = codecs.decode(codecs.encode(b64_tx_data, "utf-8"), "base64")
cell = deserialize_boc(transaction_boc)
cell_slice = Slice(cell)
tx = Transaction(cell_slice)
cell_slice.raise_if_not_empty()
return json.loads(json.dumps(tx, default=lambda o: o.__dict__))
class MsgAddress:
def parse(cell_slice):
prefix = cell_slice.prefetch_next(2)
if prefix == bitarray("00") or prefix == bitarray("01"):
return MsgAddressExt(cell_slice)
else:
return MsgAddressInt(cell_slice)
class MsgAddressExt:
"""
addr_none$00 = MsgAddressExt;
addr_extern$01 len:(## 9) external_address:(bits len)
= MsgAddressExt;
"""
def __init__(self, cell_slice):
prefix = cell_slice.read_next(2)
if prefix == bitarray("00"):
self.type = "addr_none"
elif prefix == bitarray("01"):
self.type = "addr_extern"
cell_slice.read_next(
cell_slice.bits_left()
) # TODO: parse len and external_address
class MsgAddressInt:
"""
anycast_info$_ depth:(#<= 30) { depth >= 1 }
rewrite_pfx:(bits depth) = Anycast;
addr_std$10 anycast:(Maybe Anycast)
workchain_id:int8 address:bits256 = MsgAddressInt;
addr_var$11 anycast:(Maybe Anycast) addr_len:(## 9)
workchain_id:int32 address:(bits addr_len) = MsgAddressInt;
"""
def __init__(self, cell_slice):
prefix = cell_slice.read_next(2)
if prefix == bitarray("10"):
self.type = "addr_std"
elif prefix == bitarray("11"):
self.type = "addr_var"
else:
raise ValueError(
f"MsgAddressInt must have prefix 10 or 11 (but has {prefix})"
)
if cell_slice.read_next(1).any():
raise NotImplementedError("Anycast not supported yet")
if self.type == "addr_std":
self.workchain_id = ba2int(cell_slice.read_next(8), signed=True)
self.address = ba2hex(cell_slice.read_next(256))
else:
addr_len = ba2int(cell_slice.read_next(6), signed=False)
self.workchain_id = ba2int(cell_slice.read_next(32), signed=True)
self.address = ba2hex(cell_slice.read_next(addr_len))
class TokenData:
attributes = [
"uri",
"name",
"description",
"image",
"image_data",
"symbol",
"decimals",
]
attributes_hashes = {}
for attr in attributes:
attributes_hashes[sha256(attr.encode("utf-8")).hexdigest()] = attr
def __init__(self, cell_slice):
prefix = cell_slice.read_next(8)
if prefix == bitarray("00000000"):
self.type = "onchain"
if cell_slice.read_next(1).any():
child_slice = cell_slice.read_next_ref()
hashmap_cell = Cell()
hashmap_cell.data.data = child_slice._data
hashmap_cell.refs = child_slice._refs
hashmap = {}
parse_hashmap(hashmap_cell, 256, hashmap, bitarray())
self.data = self._parse_attributes(hashmap)
else:
self.data = {}
elif prefix == bitarray("00000001"):
self.type = "offchain"
data = cell_slice.read_next(cell_slice.bits_left())
while cell_slice.refs_left() > 0:
cell_slice = cell_slice.read_next_ref()
data += cell_slice.read_next(cell_slice.bits_left())
self.data = data.tobytes().decode("ascii")
else:
raise ValueError("Unexpected content prefix")
def _parse_attributes(self, hashmap: dict):
res = {}
for attr_hash_bitstr, value_cell in hashmap.items():
attr_hash_hex = ba2hex(bitarray(attr_hash_bitstr))
attr_name = TokenData.attributes_hashes.get(attr_hash_hex)
if attr_name is None:
attr_name = attr_hash_hex
res[attr_name] = self._parse_content_data(value_cell)
return res
def _parse_content_data(self, cell: Cell, encoding="utf-8"):
assert cell.data.data == bitarray()
cell_slice = Slice(cell.refs[0])
prefix = cell_slice.read_next(8)
if prefix == bitarray("00000000"):
# snake
data = cell_slice.read_next(cell_slice.bits_left())
while cell_slice.refs_left() > 0:
cell_slice = cell_slice.read_next_ref()
data += cell_slice.read_next(cell_slice.bits_left())
return data.tobytes().decode(encoding)
elif prefix == bitarray("00000001"):
# chunks
data = bitarray()
if cell_slice.read_next(1).any():
child_slice = cell_slice.read_next_ref()
hashmap_cell = Cell()
hashmap_cell.data.data = child_slice._data
hashmap_cell.refs = child_slice._refs
hashmap = {}
parse_hashmap(hashmap_cell, 32, hashmap, bitarray())
for ind in range(len(hashmap)):
ind_bitstr = f"{ind:032b}"
chunk_cell = hashmap[ind_bitstr]
assert chunk_cell.data.data == bitarray()
assert len(chunk_cell.refs) == 1
data += chunk_cell.refs[0].data.data
else:
raise ValueError(f"Unexpected content data prefix: {prefix}")
return data.tobytes().decode(encoding)
def parse_tlb_object(b64_boc: str, tlb_type):
boc = codecs.decode(codecs.encode(b64_boc, "utf-8"), "base64")
cell = deserialize_boc(boc)
cell_slice = Slice(cell)
parse_cons = getattr(tlb_type, "parse", None)
if callable(parse_cons):
object = parse_cons(cell_slice)
else:
object = tlb_type(cell_slice)
cell_slice.raise_if_not_empty()
return json.loads(json.dumps(object, default=lambda o: o.__dict__)) | /rift-tonlib-0.0.3.tar.gz/rift-tonlib-0.0.3/rift_tonlib/utils/tlb.py | 0.425725 | 0.349921 | tlb.py | pypi |
from rift_tonlib.utils.address import detect_address
from rift_tonlib.utils.tlb import MsgAddress, MsgAddressInt, TokenData, parse_tlb_object
def read_stack_num(entry: list):
assert entry[0] == "num"
return int(entry[1], 16)
def read_stack_cell(entry: list):
assert entry[0] == "cell"
return entry[1]["bytes"]
def parse_jetton_master_data(stack: list):
total_supply = read_stack_num(stack[0])
mintable = bool(read_stack_num(stack[1]))
admin_address_int = parse_tlb_object(read_stack_cell(stack[2]), MsgAddressInt)
admin_address = detect_address(
f"{admin_address_int['workchain_id']}:{admin_address_int['address']}"
)["bounceable"]["b64url"]
jetton_content = parse_tlb_object(read_stack_cell(stack[3]), TokenData)
jetton_wallet_code = read_stack_cell(stack[4])
return {
"total_supply": total_supply,
"mintable": mintable,
"admin_address": admin_address,
"jetton_content": jetton_content,
"jetton_wallet_code": jetton_wallet_code,
}
def parse_jetton_wallet_data(stack: list):
balance = read_stack_num(stack[0])
owner = parse_tlb_object(read_stack_cell(stack[1]), MsgAddressInt)
owner = detect_address(f"{owner['workchain_id']}:{owner['address']}")["bounceable"][
"b64url"
]
jetton = parse_tlb_object(read_stack_cell(stack[2]), MsgAddressInt)
jetton = detect_address(f"{jetton['workchain_id']}:{jetton['address']}")[
"bounceable"
]["b64url"]
jetton_wallet_code = read_stack_cell(stack[3])
return {
"balance": balance,
"owner": owner,
"jetton": jetton,
"jetton_wallet_code": jetton_wallet_code,
}
def parse_nft_collection_data(stack: list):
next_item_index = read_stack_num(stack[0])
collection_content = parse_tlb_object(read_stack_cell(stack[1]), TokenData)
owner_address = parse_tlb_object(read_stack_cell(stack[2]), MsgAddress)
if owner_address["type"] == "addr_std":
owner_address_friendly = detect_address(
f"{owner_address['workchain_id']}:{owner_address['address']}"
)["bounceable"]["b64url"]
elif owner_address["type"] == "addr_none":
owner_address_friendly = None
else:
raise NotImplementedError("Owner address not supported")
return {
"next_item_index": next_item_index,
"collection_content": collection_content,
"owner_address": owner_address_friendly,
}
def parse_nft_item_data(stack: list):
init = bool(read_stack_num(stack[0]))
index = read_stack_num(stack[1])
collection_address = parse_tlb_object(read_stack_cell(stack[2]), MsgAddress)
if collection_address["type"] == "addr_std":
collection_address_friendly = detect_address(
f"{collection_address['workchain_id']}:{collection_address['address']}"
)["bounceable"]["b64url"]
elif collection_address["type"] == "addr_none":
collection_address_friendly = None
else:
raise NotImplementedError("Collection address not supported")
owner_address = parse_tlb_object(read_stack_cell(stack[3]), MsgAddress)
if owner_address["type"] == "addr_std":
owner_address_friendly = detect_address(
f"{owner_address['workchain_id']}:{owner_address['address']}"
)["bounceable"]["b64url"]
elif owner_address["type"] == "addr_none":
owner_address_friendly = None
else:
raise NotImplementedError("Owner address not supported")
if collection_address["type"] == "addr_none":
individual_content = parse_tlb_object(read_stack_cell(stack[4]), TokenData)
else:
individual_content = read_stack_cell(stack[4])
return {
"init": init,
"index": index,
"owner_address": owner_address_friendly,
"collection_address": collection_address_friendly,
"individual_content": individual_content,
}
def parse_nft_content(stack: list):
return parse_tlb_object(read_stack_cell(stack[0]), TokenData) | /rift-tonlib-0.0.3.tar.gz/rift-tonlib-0.0.3/rift_tonlib/utils/tokens.py | 0.610802 | 0.475179 | tokens.py | pypi |
from six import iteritems
class Style(object):
"""Defines the style of a polygon to be drawn.
Thie Style object defines a set of Cairo drawing parameters (see
:py:meth:`.FIELDS`) for the drawing of certain elements in Rig P&R Diagram
diagrams. For example, :py:class:`.Style`s are used to define how chips,
links cores and nets are drawn. Exceptions can be added to the style to
allow individual instances to have their style options altered.
Style options can be set using the constructor or via the :py:meth:`.set`
method like so::
>>> # Define a style with a red 50% transparent fill and black stroke
>>> # 0.1 units wide.
>>> s = Style(fill=(1.0, 0.0, 0.0, 0.5))
>>> s.set("stroke", (0.0, 0.0, 0.0, 1.0)
>>> s.set("line_width", 0.1)
The value of these style options can be read back using the ;py:class:`.get`
method::
>>> s.get("fill")
(1.0, 0.0, 0.0, 1.0)
Exceptions can also be defined. For example when defining the drawing style
of a chip, exceptions are made on a chip by chip basis. Here we can cause
chip (2, 4) to be drawn with a thicker outline::
>>> s.set((2, 4), "line_width", 0.3)
When fetching styles, possible exceptions are provided to the
:py:class:`.get` method and if a matching exception exists its value is
returned, otherwise the default value is produced. For example:
>>> s.get((2, 4), "line_width")
0.3
>>> s.get((2, 4), "stroke")
(0.0, 0.0, 0.0, 1.0)
>>> s.get((0, 0), "line_width")
0.1
The :py:class:`.Style` object also acts as a context manager which on entry
will push the current Cairo state onto the stack and on exit stroke and fill
any paths according to the Style's definition. For example::
# Draws a triangle with whatever style and colour of fill and stroke the
# Style defines.
>>> with s(ctx):
... ctx.move_to(1.0, 1.0)
... ctx.line_to(2.0, 2.0)
... ctx.line_to(2.0, 1.0)
... ctx.close_path()
See the :py:class:`.__call__` special method for more details.
"""
"""The set of style options which can be controlled.
* ``fill``: None or (r, g, b, a). If not None, defines the colour fill which
should be applied.
* ``stroke``: None or (r, g, b, a). If not None, defines the colour of the
stroke to draw around the polygon. Should be used in combination with
``line_width``. The stroke will be applied after the fill.
* ``line_width`` (None or float). The width of the stroke to use (or no
stroke if 0).
* ``dash`` (None or list). If not None, specifies the dash pattern to use.
* ``line_cap`` (None or cairo.LINE_CAP_*). If not None, the style of line
cap to use when stroking lines.
* ``line_join`` (None or cairo.LINE_JOIN_*). If not None, the style of line
join to use when stroking lines.
"""
FIELDS = ["fill", "stroke", "line_width", "dash", "line_cap", "line_join"]
def __init__(self, *args, **kwargs):
"""Define a new style.
Initial default values can be set by positional arguments in the same
order as :py:meth:`.FIELDS` or via named keyword arguments. Unless
given, all fields will default to None.
"""
# A lookup from field to default value
self._defaults = {f: None for f in self.FIELDS}
# A lookup from exception to value
self._exceptions = {}
if len(args) > len(self.FIELDS):
raise ValueError("More options specified than exist.")
# Set positional style values
for arg_num, value in enumerate(args):
field = self.FIELDS[arg_num]
self._defaults[field] = value
# Set named style values
for field, value in iteritems(kwargs):
if field not in self._defaults:
raise ValueError("Unknown style field {}".format(repr(field)))
elif self._defaults[field] is not None:
raise ValueError(
"Field {} already set by positional argument.".format(
repr(field)))
else:
self._defaults[field] = value
def copy(self):
"""Create a copy of this style."""
s = type(self)()
s._defaults = self._defaults.copy()
s._exceptions = {e: v.copy() for e, v in iteritems(self._exceptions)}
return s
def set(self, *args):
"""Set the value of a particular style parameter.
Usage examples::
>>> # Set the default line_width to 0.1
>>> s.set("line_width", 0.1)
>>> # Set an exception for the line_width of (3, 2)
>>> s.set((3, 2), "line_width", 0.3)
"""
if len(args) == 2:
field, value = args
self._defaults[field] = value
elif len(args) == 3:
exception, field, value = args
self._exceptions.setdefault(exception, {})[field] = value
else:
raise ValueError("set expects 3 or 4 arguments")
def get(self, *args):
"""Get the value of a particular style parameter.
Usage::
>>> # Get the default line_width
>>> line_width = s.get("line_width")
>>> # Get the line width for (3, 2), returning the default value if
>>> # no exception to the style exists.
>>> line_width = s.get((3, 2), "line_width")
"""
if len(args) == 1:
return self._defaults[args[0]]
elif len(args) == 2:
exception, field = args
return self._exceptions.get(exception, {}).get(
field, self._defaults[field])
else:
raise ValueError("get expects 2 or 3 arguments")
def __contains__(self, exception):
"""Test whether the style has any exceptions for a given object."""
return exception in self._exceptions
def __call__(self, ctx, *exception, **kwargs):
"""Create a context manager object which applies this Style to any Cairo
paths drawn within the context.
A basic example which draws a triangle using the style specified by
``s``::
>>> with s(ctx):
... ctx.move_to(1.0, 1.0)
... ctx.line_to(2.0, 2.0)
... ctx.line_to(2.0, 1.0)
... ctx.close_path()
In this example, the triangle is drawn with the style exception
(3, 2). Additionally, a new object ``s_`` is defined which has a get
function which behaves like a :py:class:`.Style`'s getter except it
gets the style for the supplied style exception.
>>> with s(ctx, (3, 2)) as s_:
... ctx.move_to(1.0, 1.0)
... ctx.line_to(2.0 + s_.get("line_width"),
... 2.0 + s_.get("line_width"))
... ctx.line_to(2.0 + s_.get("line_width"), 1.0)
... ctx.close_path()
Note: if the code within the block raises an exception, the Cairo state
will be restored but the path will not be filled/stroked.
Parameters
----------
ctx : Cairo context
A cairo context into which all paths will be drawn. At the start of
the context the Cairo state is saved. At the end of the context it
is restored.
exception : object
An optional object which specifies what styling exception should be
used. If not specified, the default is used.
no_fill_stroke : bool
By default when the context is exited, the current Cairo path is
filled and stroked as specified. If this named argument is given as
True, the Cairo path is not stroked. This is useful when the
fill/stroke operations required are non-trivial (e.g. when gradients
are in use) but where having a getter with a particular exception
predefined is convenient.
"""
if len(exception) > 1:
raise ValueError("expected 2 or 3 arguments")
return self.ContextMgr(self, ctx, *exception,
no_fill_stroke=
kwargs.get("no_fill_stroke", False))
class ContextMgr(object):
"""The context manager returned by calling a PolygonStyle instance."""
def __init__(self, style, ctx, *exception, **kwargs):
self.style = style
self.ctx = ctx
self.exception = list(exception)
self.no_fill_stroke = kwargs.get("no_fill_stroke", False)
def __enter__(self):
self.ctx.save()
return self
def __exit__(self, exc_type, value, traceback):
try:
if value is None:
# Nothing went wrong in the with block! Proceed with drawing the
# polygon.
line_width = self.style.get(*self.exception + ["line_width"])
if line_width is not None:
self.ctx.set_line_width(line_width)
dash = self.style.get(*self.exception + ["dash"])
if dash is not None:
self.ctx.set_dash(dash)
line_cap = self.style.get(*self.exception + ["line_cap"])
if line_cap is not None:
self.ctx.set_line_cap(line_cap)
line_join = self.style.get(*self.exception + ["line_join"])
if line_join is not None:
self.ctx.set_line_join(line_join)
fill = self.style.get(*self.exception + ["fill"])
stroke = self.style.get(*self.exception + ["stroke"])
if not self.no_fill_stroke:
if fill and stroke:
self.ctx.set_source_rgba(*fill)
self.ctx.fill_preserve()
self.ctx.set_source_rgba(*stroke)
self.ctx.stroke()
elif fill:
self.ctx.set_source_rgba(*fill)
self.ctx.fill()
elif stroke:
self.ctx.set_source_rgba(*stroke)
self.ctx.stroke()
finally:
self.ctx.restore()
def get(self, *args):
return self.style.get(*self.exception + list(args)) | /rig-par-diagram-0.0.4.tar.gz/rig-par-diagram-0.0.4/rig_par_diagram/style.py | 0.93402 | 0.654384 | style.py | pypi |
import argparse
import pickle
import sys
import time
from importlib import import_module
import logging
import cairocffi as cairo
from rig.machine import Machine, Links, Cores
from rig.place_and_route.constraints import ReserveResourceConstraint
from rig_par_diagram import \
Diagram, \
default_chip_style, \
default_link_style, \
default_core_style, \
default_net_style
logger = logging.getLogger(__name__)
def read_netlist(filename):
"""Returns a netlist on success and exits on failure."""
# Read the netlist
try:
netlist = pickle.load(open(filename, "rb"))
except IOError:
sys.stdout.write("Netlist file not found\n")
sys.exit(1)
except (pickle.PickleError, AttributeError, EOFError, IndexError):
sys.stdout.write("Netlist could not be unpickled\n")
sys.exit(1)
# Check the netlist contains the bare minimum of information
if not isinstance(netlist, dict):
sys.stdout.write(
"Netlist must be defined in a dictionary\n")
sys.exit(1)
logger.info("Loaded netlist with fields: {}".format(
", ".join(netlist)))
return netlist
def get_machine(spec=None, core_resource=Cores):
"""Get a rig Machine object based on the supplied specification."""
if spec is None:
# Default to a SpiNN-5 board
return get_machine("spinn5", core_resource)
elif spec == "spinn3":
machine = Machine(2, 2)
if core_resource is not Cores:
machine.chip_resources[core_resource] = machine.chip_resources[Cores]
del machine.chip_resources[Cores]
machine.dead_links.add((0, 0, Links.south_west))
machine.dead_links.add((1, 1, Links.north_east))
machine.dead_links.add((0, 1, Links.south_west))
machine.dead_links.add((1, 0, Links.north_east))
machine.dead_links.add((0, 0, Links.west))
machine.dead_links.add((1, 0, Links.east))
machine.dead_links.add((0, 1, Links.west))
machine.dead_links.add((1, 1, Links.east))
return machine
elif spec == "spinn5":
machine = Machine(8, 8)
if core_resource is not Cores:
machine.chip_resources[core_resource] = machine.chip_resources[Cores]
del machine.chip_resources[Cores]
# Kill all chips outside the board
nominal_live_chips = set([ # noqa
(4, 7), (5, 7), (6, 7), (7, 7),
(3, 6), (4, 6), (5, 6), (6, 6), (7, 6),
(2, 5), (3, 5), (4, 5), (5, 5), (6, 5), (7, 5),
(1, 4), (2, 4), (3, 4), (4, 4), (5, 4), (6, 4), (7, 4),
(0, 3), (1, 3), (2, 3), (3, 3), (4, 3), (5, 3), (6, 3), (7, 3),
(0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (6, 2),
(0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1),
(0, 0), (1, 0), (2, 0), (3, 0), (4, 0),
])
machine.dead_chips = set((x, y)
for x in range(8)
for y in range(8)) - nominal_live_chips
# Kill all any-around links which remain.
for x in range(machine.width):
machine.dead_links.add((x, 0, Links.south))
machine.dead_links.add((x, 0, Links.south_west))
machine.dead_links.add((x, machine.height - 1, Links.north))
machine.dead_links.add((x, machine.height - 1, Links.north_east))
for y in range(machine.height):
machine.dead_links.add((0, y, Links.west))
machine.dead_links.add((0, y, Links.south_west))
machine.dead_links.add((machine.width - 1, y, Links.east))
machine.dead_links.add((machine.width - 1, y, Links.north_east))
return machine
else:
# Specification of the form "XxY"
if "x" not in spec:
sys.stderr.write(
"Machine must be of the form NxM or spinn3 or spinn5.")
sys.exit(1)
x, _, y = spec.partition("x")
x = int(x)
y = int(y)
machine = Machine(x, y)
if core_resource is not Cores:
machine.chip_resources[core_resource] = machine.chip_resources[Cores]
del machine.chip_resources[Cores]
return machine
def place(vertices_resources, nets, machine, constraints, algorithm="default"):
"""Place the specified netlist."""
if algorithm == "default":
module = "rig.place_and_route"
algorithm = "default"
else:
module = "rig.place_and_route.place.{}".format(algorithm)
try:
placer = getattr(import_module(module), "place")
except (ImportError, AttributeError):
sys.stderr.write(
"Placement algorithm {} does not exist\n".format(algorithm))
sys.exit(1)
logger.info("Placing netlist using '{}'...".format(algorithm))
before = time.time()
placements = placer(vertices_resources, nets, machine, constraints)
after = time.time()
logger.info("Placed netlist in {:.2f}s".format(after - before))
return placements
def allocate(vertices_resources, nets, machine, constraints,
placements, algorithm="default"):
"""Allocate resources for the specified netlist."""
if algorithm == "default":
module = "rig.place_and_route"
algorithm = "default"
else:
module = "rig.place_and_route.allocate.{}".format(algorithm)
try:
allocator = getattr(import_module(module), "allocate")
except (ImportError, AttributeError):
sys.stderr.write(
"Allocation algorithm {} does not exist\n".format(algorithm))
sys.exit(1)
logger.info("Allocating netlist using '{}'...".format(algorithm))
before = time.time()
allocations = allocator(vertices_resources, nets, machine, constraints,
placements)
after = time.time()
logger.info("Allocated netlist in {:.2f}s".format(after - before))
return allocations
def route(vertices_resources, nets, machine, constraints,
placements, allocations, algorithm, core_resource):
"""Route all nets in the specified netlist."""
if algorithm == "default":
module = "rig.place_and_route"
algorithm = "default"
else:
module = "rig.place_and_route.route.{}".format(algorithm)
try:
router = getattr(import_module(module), "route")
except (ImportError, AttributeError):
sys.stderr.write(
"Routing algorithm {} does not exist\n".format(algorithm))
sys.exit(1)
logger.info("Routing netlist using '{}'...".format(algorithm))
before = time.time()
routes = router(vertices_resources, nets, machine, constraints,
placements, allocations, core_resource)
after = time.time()
logger.info("Routed netlist in {:.2f}s".format(after - before))
return routes
def main(argv=sys.argv):
parser = argparse.ArgumentParser(
description="Generate a placement and routing diagram for a "
"Rig netlist.")
parser.add_argument("netlist", metavar="NETLIST",
help="The filename of the netlist to present (a "
"pickled dictionary), or - to just generate."
"an empty machine diagram.")
parser.add_argument("output", metavar="OUTPUT",
help="The output PNG filename.")
parser.add_argument("width", metavar="WIDTH", nargs="?", default=1000,
type=int,
help="The width of the output image in pixels.")
parser.add_argument("height", metavar="HEIGHT", nargs="?",
type=int,
help="The height of the output image in pixels.")
parser.add_argument("--machine", "-m", metavar="MACHINE",
help="A SpiNNaker machine to place/route the "
"netlist into e.g. 48x24 or spinn5.")
parser.add_argument("--no-reserve-monitor", "-M", action="store_true",
help="If no constraints are supplied in the netlist, "
"do not automatically reserve core 0 (the "
"default).")
parser.add_argument("--place", "-p", metavar="ALGORITHM", nargs="?",
const="default",
help="Place the netlist using a Rig placement "
"algorithm.")
parser.add_argument("--allocate", "-a", metavar="ALGORITHM", nargs="?",
const="default",
help="Allocate the netlist using a Rig placement "
"algorithm.")
parser.add_argument("--route", "-r", metavar="ALGORITHM", nargs="?",
const="default",
help="Route the netlist using a Rig routing "
"algorithm.")
parser.add_argument("--ratsnest", "-R", action="store_true",
help="Shows nets as a ratsnest (not the actual routes "
"used).")
parser.add_argument("--no-colour-constraints", "-C", action="store_true",
help="Do not automatically colour cores reserved by "
"ReserveResourceConstraints.")
parser.add_argument("--transparent", "-t", action="store_true",
help="Generate a transparent PNG.")
parser.add_argument("--verbose", "-v", action="count", default=0,
help="Show verbose information.")
args = parser.parse_args(argv[1:])
global logger
logger = logging.getLogger(argv[0])
if args.verbose >= 2:
logging.basicConfig(level=logging.DEBUG)
elif args.verbose >= 1:
logging.basicConfig(level=logging.INFO)
# Parse the netlist (if provided)
if args.netlist is not None and args.netlist != "-":
netlist = read_netlist(args.netlist)
else:
netlist = {}
# Work out what resource type cores are represented by
core_resource = netlist.get("core_resource", Cores)
vertices_resources = netlist.get("vertices_resources", {})
nets = netlist.get("nets", [])
# Get the machine from the netlist if possible, otherwise get it from the
# command-line (defaulting to a SpiNN-5 board).
machine = netlist.get("machine", None)
machine_overridden = False
if machine is None or args.machine:
machine = get_machine(args.machine, core_resource)
machine_overridden = True
# If no constraints are supplied, reserve the monitor processor
if "constraints" in netlist:
constraints = netlist["constraints"]
elif not args.no_reserve_monitor:
constraints = [
ReserveResourceConstraint(core_resource, slice(0, 1)),
]
else:
constraints = []
# Get the placement solution if provided, otherwise place using the
# algorithm specified on the command line.
placements = netlist.get("placements", None)
placements_overridden = False
if placements is None or args.place or machine_overridden:
placements = place(vertices_resources, nets, machine, constraints,
args.place or "default")
placements_overridden = True
# Get the allocation solution if provided, otherwise allocate using the
# algorithm specified on the command line.
allocations = netlist.get("allocations", None)
allocations_overridden = False
if allocations is None or args.allocate or placements_overridden:
allocations = allocate(vertices_resources, nets, machine, constraints,
placements, args.allocate or "default")
allocations_overridden = True
# Get the routing solution if provided. If a routing algorithm is specified,
# use that.
routes = netlist.get("routes", None)
if (routes is None or args.route or allocations_overridden) and not args.ratsnest:
routes = route(vertices_resources, nets, machine, constraints,
placements, allocations, args.route or "default",
core_resource)
# Delete the routes if a ratsnest is required
if args.ratsnest:
routes = {}
# Load colour schemes
chip_style = netlist.get("chip_style", default_chip_style.copy())
link_style = netlist.get("link_style", default_link_style.copy())
core_style = netlist.get("core_style", default_core_style.copy())
net_style = netlist.get("net_style", default_net_style.copy())
# Automatically make resource constraint cores grey and translucent.
if not args.no_colour_constraints:
for constraint in constraints:
if constraint not in core_style:
core_style.set(constraint, "fill",
(0.0, 0.0, 0.0, 0.3))
core_style.set(constraint, "stroke", None)
# Set up the diagram
d = Diagram(machine=machine, vertices_resources=vertices_resources,
nets=nets, constraints=constraints, placements=placements,
allocations=allocations, routes=routes,
core_resource=core_resource,
chip_style=chip_style, link_style=link_style,
core_style=core_style, net_style=net_style)
# Work out the aspect ratio to allow automatic calculation of image
# dimensions
if args.height is None:
x1, y1, x2, y2 = d.bbox
w = x2 - x1
h = y2 - y1
ratio = h / w
if ratio < 1.0:
args.height = int(args.width * ratio)
else:
args.height, args.width = args.width, int(args.width / ratio)
# Generate the image itself
logging.info("Generating {}x{} diagram...".format(
args.width, args.height))
before = time.time()
if args.transparent:
mode = cairo.FORMAT_ARGB32
else:
mode = cairo.FORMAT_RGB24
surface = cairo.ImageSurface(mode, args.width, args.height)
ctx = cairo.Context(surface)
# Draw opaque diagrams with a white background.
if not args.transparent:
with ctx:
ctx.rectangle(0, 0, args.width, args.height)
ctx.set_source_rgba(1.0, 1.0, 1.0, 1.0)
ctx.fill()
d.draw(ctx, args.width, args.height)
surface.write_to_png(args.output)
after = time.time()
logging.info("Generated diagram in {:.2f}s".format(after-before))
return 0
if __name__=="__main__": # pragma: no cover
sys.exit(main(sys.argv)) | /rig-par-diagram-0.0.4.tar.gz/rig-par-diagram-0.0.4/rig_par_diagram/cli.py | 0.419291 | 0.213931 | cli.py | pypi |
from Queue import Queue, Empty, Full
import logging
from rig_remote.constants import QUEUE_MAX_SIZE
# logging configuration
logger = logging.getLogger(__name__)
class QueueComms (object):
def __init__(self):
"""Queue instantiation. The queues are used for handling the
communication between threads.
We don't want to have unlimited queue size, 10 seems a value that
if we reach it we are in big trouble..
"""
self.parent_queue = Queue(maxsize=QUEUE_MAX_SIZE)
self.child_queue = Queue(maxsize=QUEUE_MAX_SIZE)
def queued_for_child(self):
"""wrapper on self._queue_for()
"""
return self._queued_for(self.child_queue)
def queued_for_parent(self):
"""wrapper on self._queued_for
"""
return self._queued_for(self.parent_queue)
def _queued_for(self, queue_name):
"""Check if item is waiting on a queue.
:returns: True if item waiting
:param queue: queue to check
:type queue: Queue() object
"""
return (not queue_name.empty())
def _get_from_queue(self, queue):
""" retrieve an item from the queue. Wrapped by get_from_child and
get_from_parent.
:returns: item or None
:param queue: queue to get from
:type queue: Queue() object
"""
try:
return queue.get(False)
except Empty:
logging.info("Queue empty while getting from {}".format(queue))
def get_from_parent(self):
"""wrapper on _get_from_queue
"""
return self._get_from_queue(self.parent_queue)
def get_from_child(self):
"""wrapper on _get_from_queue
"""
return self._get_from_queue(self.child_queue)
def _send_to_queue(self, queue, item):
""" place an item on the queue. Wrapped by send_to_child and
send_to_parent.
:param queue: queue to put item on.
:type: Queue
:returns: None
"""
try:
queue.put(item, False)
except Full:
logger.warning("Queue {} is full.".format(queue) )
raise
def send_to_parent(self, item):
"""Wrapper for _send_to_queue"""
self._send_to_queue(self.parent_queue, item)
def send_to_child(self, item):
"""Wrapper for _send_to_queue"""
self._send_to_queue(self.child_queue, item)
def _signal(self, queue, signal_number):
""" Place a signal number on the queue
:param signal_number: value of the signal
:type signal_number: int
:param queue: Queue to insert signal in
:type queue: Queue() object
:returns: None
"""
if (not isinstance(signal_number, int) or
not isinstance (queue, Queue)):
logger.error("Value error while inserting a signal into a queue.")
logger.error("Value to be inserted isn't int.")
logger.error("Value type: {}".format (type(signal_number)))
raise ValueError()
queue.put(signal_number, False)
def signal_parent(self, signal_number):
"""wrapped by _signal()
"""
self._signal(self.parent_queue, signal_number)
def signal_child(self, signal_number):
"""wrappedby _signal()
"""
self._signal(self.parent_queue, signal_number) | /rig-remote-2.0.tar.gz/rig-remote-2.0/rig_remote/queue_comms.py | 0.686685 | 0.222299 | queue_comms.py | pypi |
import csv
import logging
import os.path
from rig_remote.exceptions import InvalidPathError
from rig_remote.constants import BM
from rig_remote.constants import LOG_FILE_NAME
import datetime
import time
# logging configuration
logger = logging.getLogger(__name__)
class IO(object):
"""IO wrapper class
"""
def __init__(self):
self.row_list = []
def _path_check(self, csv_file):
"""Helper function that checks if the path is valid.
:param csv_file: path
:type csv_file: string
:raises InvalidPathError: if the path is invalid
:returns:none
"""
if not os.path.exists(csv_file):
logger.warning("Invalid path provided:{}".format(csv_file))
raise InvalidPathError
def csv_load(self, csv_file, delimiter):
"""Read the frequency bookmarks file and populate the tree.
:param csv_file: path of the file to be written
:type csv_file: string
:param delimiter: delimiter char
:type delimiter: string
:raises: csv.Error if the data to be written as csv isn't valid
:returns: none
"""
self._path_check(csv_file)
try:
with open(csv_file, 'r') as data_file:
reader = csv.reader(data_file, delimiter=delimiter)
for line in reader:
self.row_list.append(line)
except csv.Error:
logger.exception("The file provided({})"\
" is not a file with values "\
"separated by {}.".format(csv_file, delimiter))
except (IOError, OSError):
logger.exception("Error while trying to read the file: "\
"{}".format(csv_file))
def csv_save(self, csv_file, delimiter):
"""Save current frequencies to disk.
:param delimiter: delimiter char used in the csv
:type delimiter: string
:raises: IOError, OSError
"""
try:
with open(csv_file, 'w') as data_file:
writer = csv.writer(data_file, delimiter=delimiter)
for row in self.row_list:
writer.writerow(row)
except (IOError, OSError):
logger.error("Error while trying to write the file: "\
"{}".format(csv_file))
class LogFile(object):
"""Handles the a tasks of logging to a file.
"""
def __init__(self):
"""Defines the log file name and
sets the fhandler self.log_file to None.
"""
self.log_filename = LOG_FILE_NAME
self.log_file = None
def open(self, name = None):
"""Opens a log file.
:param name: log file name, defaults to None
:type name: string
"""
if name != None :
self.log_filename = name
try:
self.log_file = open(self.log_filename, 'a')
except (IOError, OSError):
logger.error("Error while trying to open log file: "\
"{}".format(self.log_filename))
def write(self, record_type, record, signal):
"""Writes a message to the log file.
:param record_type: type of the record to write
:type record_type: string
:param record: data to write
:type record: tuple
:param signal: signal level
:type signal: list
:raises IOError or OSError for any issue that happens while writing.
"""
if record_type not in ["B","F"]:
logger.error("Record type not supported, must be 'B' or 'F'"\
"got {}".format(record_type))
raise TypeError
if record_type == 'B' :
lstr = 'B ' + str(datetime.datetime.today().strftime\
("%a %Y-%b-%d %H:%M:%S")) + ' ' + \
record[BM.freq] + ' ' + record[BM.mode] + \
' ' + str(signal) + "\n"
else :
lstr = 'F ' + str(datetime.datetime.today().strftime\
("%a %Y-%b-%d %H:%M:%S")) + ' ' + \
record[2] + ' ' + record[1] + \
' ' + str(signal) + "\n"
try:
self.log_file.write(lstr)
except AttributeError:
logger.exception("No log file provided, but log feature selected.")
raise
except (IOError, OSError):
logger.exception("Error while trying to write log file: "\
"{}".format(self.log_filename))
except (TypeError, IndexError):
logger.exception("At least one of the parameter isn't of the "\
"expected type:"\
"record_type {},"\
"record {},"\
"signal {}".format(type(record_type),
type(record),
type(signal)))
raise
def close(self):
"""Closes the log file.
:raises IOError OSError: if there are issues while closing the log file
"""
if self.log_file != None :
try:
self.log_file.close()
except (IOError, OSError):
logger.error("Error while trying to close log file: "\
"{}".format(self.log_filename)) | /rig-remote-2.0.tar.gz/rig-remote-2.0/rig_remote/disk_io.py | 0.466116 | 0.174024 | disk_io.py | pypi |
# SPDX-License-Identifier: MIT
# Copyright © 2021 André Santos
PREDICATE_GRAMMAR = r"""
predicate: "{" condition "}"
top_level_condition: condition
condition: [condition IF_OPERATOR] disjunction
disjunction: [disjunction OR_OPERATOR] conjunction
conjunction: [conjunction AND_OPERATOR] _logic_expr
_logic_expr: negation
| quantification
| atomic_condition
negation: NOT_OPERATOR _logic_expr
quantification: QUANT_OPERATOR CNAME "in" _atomic_value ":" _logic_expr
atomic_condition: expr [RELATIONAL_OPERATOR expr]
expr: [expr ADD_OPERATOR] term
term: [term MULT_OPERATOR] factor
factor: [factor POWER_OPERATOR] _exponent
_exponent: _atomic_value
| negative_number
| "(" condition ")"
negative_number: MINUS_OPERATOR _exponent
_atomic_value: boolean
| string
| number_constant
| number
| function_call
| enum_literal
| range_literal
| _reference
number_constant: CONSTANT
enum_literal: "{" _enum_member "}"
_enum_member: [_enum_member ","] expr
range_literal: _start_range expr "to" expr _end_range
_start_range: L_RANGE_EXC | L_RANGE_INC
_end_range: R_RANGE_EXC | R_RANGE_INC
variable: VAR_REF
function_call: CNAME "(" expr ")"
_base_ref: variable
| own_field
own_field: CNAME
_reference: _base_ref
| field_access
| array_access
field_access: _reference "." CNAME
array_access: _reference "[" _index "]"
_index: expr
ros_name: ROS_NAME
int_literal: INT
string: ESCAPED_STRING
number: NUMBER
signed_number: SIGNED_NUMBER
boolean: TRUE | FALSE
TRUE: "True"
FALSE: "False"
RELATIONAL_OPERATOR: EQ_OPERATOR | COMP_OPERATOR | IN_OPERATOR
EQ_OPERATOR: "=" | "!="
COMP_OPERATOR: "<" "="?
| ">" "="?
IN_OPERATOR.2: "in"
NOT_OPERATOR.3: "not"
IF_OPERATOR.3: "implies" | "iff"
OR_OPERATOR.3: "or"
AND_OPERATOR.3: "and"
QUANT_OPERATOR.4: ALL_OPERATOR | SOME_OPERATOR
ALL_OPERATOR: "forall"
SOME_OPERATOR: "exists"
CONSTANT.5: "PI" | "INF" | "NAN" | "E"
ADD_OPERATOR: "+" | "-"
MULT_OPERATOR: "*" | "/"
POWER_OPERATOR: "**"
MINUS_OPERATOR: "-"
L_RANGE_EXC: "!["
L_RANGE_INC: "["
R_RANGE_EXC: "]!"
R_RANGE_INC: "]"
ROS_NAME: /[\/~]?[a-zA-Z][0-9a-zA-Z_]*(\/[a-zA-Z][0-9a-zA-Z_]*)*/
ROS_MSG_NAME: /[a-zA-Z][0-9a-zA-Z_]*\/[a-zA-Z][0-9a-zA-Z_]*/
VAR_REF: "@" CNAME
TIME_UNIT: "s" | "ms"
FREQ_UNIT: "hz"
%import common.CNAME
%import common.INT
%import common.NUMBER
%import common.SIGNED_NUMBER
%import common.ESCAPED_STRING
%import common.WS
%ignore WS
"""
HPL_GRAMMAR = r"""
hpl_file: _list_of_properties
_list_of_properties: [_list_of_properties] hpl_property
hpl_property: metadata? _scope ":" _pattern
metadata: _metadata_items
_metadata_items: [_metadata_items] "#" _metadata_item
_metadata_item: metadata_id
| metadata_title
| metadata_desc
metadata_id: "id" ":" CNAME
metadata_title: "title" ":" ESCAPED_STRING
metadata_desc: "description" ":" ESCAPED_STRING
_scope: global_scope
| after_until
| until
global_scope: "globally"
after_until: "after" activator ["until" terminator]
until: "until" terminator
activator: _any_event
terminator: _any_event
_pattern: existence
| absence
| response
| prevention
| requirement
existence: "some" _any_event _time_bound?
absence: "no" _any_event _time_bound?
response: _any_event "causes" _any_event _time_bound?
prevention: _any_event "forbids" _any_event _time_bound?
requirement: _any_event "requires" _any_event _time_bound?
_time_bound: "within" time_amount
_any_event: event
| event_disjunction
event: message predicate?
event_disjunction: "(" (event "or")+ event ")"
message: ros_topic _alias?
ros_topic: ros_name _msg_type?
_msg_type: "[" ros_msg_name "]"
ros_msg_name: ROS_MSG_NAME
time_amount: NUMBER TIME_UNIT
frequency: NUMBER FREQ_UNIT
_alias: "as" CNAME
predicate: "{" condition "}"
top_level_condition: condition
condition: [condition IF_OPERATOR] disjunction
disjunction: [disjunction OR_OPERATOR] conjunction
conjunction: [conjunction AND_OPERATOR] _logic_expr
_logic_expr: negation
| quantification
| atomic_condition
negation: NOT_OPERATOR _logic_expr
quantification: QUANT_OPERATOR CNAME "in" _atomic_value ":" _logic_expr
atomic_condition: expr [RELATIONAL_OPERATOR expr]
expr: [expr ADD_OPERATOR] term
term: [term MULT_OPERATOR] factor
factor: [factor POWER_OPERATOR] _exponent
_exponent: _atomic_value
| negative_number
| "(" condition ")"
negative_number: MINUS_OPERATOR _exponent
_atomic_value: boolean
| string
| number_constant
| number
| function_call
| enum_literal
| range_literal
| _reference
number_constant: CONSTANT
enum_literal: "{" _enum_member "}"
_enum_member: [_enum_member ","] expr
range_literal: _start_range expr "to" expr _end_range
_start_range: L_RANGE_EXC | L_RANGE_INC
_end_range: R_RANGE_EXC | R_RANGE_INC
variable: VAR_REF
function_call: CNAME "(" expr ")"
_base_ref: variable
| own_field
own_field: CNAME
_reference: _base_ref
| field_access
| array_access
field_access: _reference "." CNAME
array_access: _reference "[" _index "]"
_index: expr
ros_name: ROS_NAME
int_literal: INT
string: ESCAPED_STRING
number: NUMBER
signed_number: SIGNED_NUMBER
boolean: TRUE | FALSE
TRUE: "True"
FALSE: "False"
RELATIONAL_OPERATOR: EQ_OPERATOR | COMP_OPERATOR | IN_OPERATOR
EQ_OPERATOR: "=" | "!="
COMP_OPERATOR: "<" "="?
| ">" "="?
IN_OPERATOR.2: "in"
NOT_OPERATOR.3: "not"
IF_OPERATOR.3: "implies" | "iff"
OR_OPERATOR.3: "or"
AND_OPERATOR.3: "and"
QUANT_OPERATOR.4: ALL_OPERATOR | SOME_OPERATOR
ALL_OPERATOR: "forall"
SOME_OPERATOR: "exists"
CONSTANT.5: "PI" | "INF" | "NAN" | "E"
ADD_OPERATOR: "+" | "-"
MULT_OPERATOR: "*" | "/"
POWER_OPERATOR: "**"
MINUS_OPERATOR: "-"
L_RANGE_EXC: "!["
L_RANGE_INC: "["
R_RANGE_EXC: "]!"
R_RANGE_INC: "]"
ROS_NAME: /[\/~]?[a-zA-Z][0-9a-zA-Z_]*(\/[a-zA-Z][0-9a-zA-Z_]*)*/
ROS_MSG_NAME: /[a-zA-Z][0-9a-zA-Z_]*\/[a-zA-Z][0-9a-zA-Z_]*/
VAR_REF: "@" CNAME
TIME_UNIT: "s" | "ms"
FREQ_UNIT: "hz"
%import common.CNAME
%import common.INT
%import common.NUMBER
%import common.SIGNED_NUMBER
%import common.ESCAPED_STRING
%import common.WS
%ignore WS
""" | /rigel-hpl-0.1.0.tar.gz/rigel-hpl-0.1.0/src/hpl/grammar.py | 0.580233 | 0.175803 | grammar.py | pypi |
from .ast import (
HplArrayAccess,
HplAstObject,
HplBinaryOperator,
HplContradiction,
HplEvent,
HplEventDisjunction,
HplExpression,
HplFieldAccess,
HplFunctionCall,
HplLiteral,
HplPattern,
HplPredicate,
HplProperty,
HplQuantifier,
HplRange,
HplScope,
HplSet,
HplSimpleEvent,
HplSpecification,
HplThisMessage,
HplUnaryOperator,
HplVacuousTruth,
HplValue,
HplVarReference
)
from typing import Protocol
class HplAstVisitor(Protocol):
"""
This class specifies the interface all visitor instances must adhere to.
"""
def visit_hpl_array_access(self, node: HplArrayAccess) -> None:
"""
Use this function to visit nodes of type HplArrayAccess.
"""
...
def visit_hpl_ast_object(self, node: HplAstObject) -> None:
"""
Use this function to visit nodes of type HplAstObject.
"""
...
def visit_hpl_binary_operator(self, node: HplBinaryOperator) -> None:
"""
Use this function to visit nodes of type HplBinaryOperator.
"""
...
def visit_hpl_contradiction(self, node: HplContradiction) -> None:
"""
Use this function to visit nodes of type HplContradiction.
"""
...
def visit_hpl_event(self, node: HplEvent) -> None:
"""
Use this function to visit nodes of type HplEvent.
"""
...
def visit_hpl_event_disjunction(self, node: HplEventDisjunction) -> None:
"""
Use this function to visit nodes of type HplEventDisjunction.
"""
...
def visit_hpl_expression(self, node: HplPattern) -> None:
"""
Use this function to visit nodes of type HplPattern.
"""
...
def visit_hpl_field_access(self, node: HplFieldAccess) -> None:
"""
Use this function to visit nodes of type HplFieldAccess.
"""
...
def visit_hpl_functional_call(self, node: HplFunctionCall) -> None:
"""
Use this function to visit nodes of type HplFunctionCall.
"""
...
def visit_hpl_literal(self, node: HplLiteral) -> None:
"""
Use this function to visit nodes of type HplLiteral.
"""
...
def visit_hpl_pattern(self, node: HplExpression) -> None:
"""
Use this function to visit nodes of type HplExpression.
"""
...
def visit_hpl_predicate(self, node: HplPredicate) -> None:
"""
Use this function to visit nodes of type HplPredicate.
"""
...
def visit_hpl_property(self, node: HplProperty) -> None:
"""
Use this function to visit nodes of type HplProperty.
"""
...
def visit_hpl_quantifier(self, node: HplQuantifier) -> None:
"""
Use this function to visit nodes of type HplQuantifier.
"""
...
def visit_hpl_range(self, node: HplRange) -> None:
"""
Use this function to visit nodes of type HplRange.
"""
...
def visit_hpl_scope(self, node: HplScope) -> None:
"""
Use this function to visit nodes of type HplScope.
"""
...
def visit_hpl_set(self, node: HplSet) -> None:
"""
Use this function to visit nodes of type HplSet.
"""
...
def visit_hpl_simple_event(self, node: HplSimpleEvent) -> None:
"""
Use this function to visit nodes of type HplSimpleEvent.
"""
...
def visit_hpl_specification(self, node: HplSpecification) -> None:
"""
Use this function to visit nodes of type HplSpecification.
"""
...
def visit_hpl_this_message(self, node: HplThisMessage) -> None:
"""
Use this function to visit nodes of type HplThisMessage.
"""
...
def visit_hpl_unary_operator(self, node: HplUnaryOperator) -> None:
"""
Use this function to visit nodes of type HplUnaryOperator.
"""
...
def visit_hpl_vacuous_truth(self, node: HplVacuousTruth) -> None:
"""
Use this function to visit nodes of type HplVacuousTruth.
"""
...
def visit_hpl_value(self, node: HplValue) -> None:
"""
Use this function to visit nodes of type HplValue.
"""
...
def visit_hpl_var_reference(self, node: HplVarReference) -> None:
"""
Use this function to visit nodes of type HplVarReference.
"""
... | /rigel-hpl-0.1.0.tar.gz/rigel-hpl-0.1.0/src/hpl/visitor.py | 0.677261 | 0.560012 | visitor.py | pypi |
import docker
import uuid
from rigelcore.clients import (
DockerClient,
ROSBridgeClient
)
from rigelcore.loggers import MessageLogger
from rigelcore.simulations.requirements import SimulationRequirementsManager
from pydantic import BaseModel, PrivateAttr
from typing import Any, Dict, List, Optional
class ROSPackageContainer(BaseModel):
"""
A placeholder for information regarding a containerized ROS package.
:type name: string
:param name: The Docker container name.
:type image: string
:param name: The Docker image.
:type command: Optional[str]
:param command: The command to be executed inside the container.
:type environment: Optional[List[str]]
:param environment: The list of environment variables to set inside the container.
:type instrospection: List[SimulationRequirement].
:param instrospection: The list of conditions that must be fulfilled.
:type network: Optional[str]
:param network: The name of the network to connect the container to.
:type ports: Optional[Dict[str, Optional[int]]]
:param ports: The container ports to expose.
:type volumes: Optional[List[str]]
:param volumes: The list of volumes to be mounted inside the container.
"""
# Required fields.
name: str
image: str
# Optional fields.
command: Optional[str] = None
environment: Optional[List[str]] = []
introspection: bool = False
network: Optional[str] = None
ports: Optional[Dict[str, Optional[int]]] = None
privileged: bool = False
volumes: Optional[List[str]] = None
class Plugin(BaseModel):
"""
A plugin for Rigel to locally run a containerized ROS application.
:type distro: string
:param distro: The ROS distribution
:type images: List[rigel_local_simulation_plugin.ROSPackageContainer]
:param images: The list of containerized packages.
"""
# List of required fields.
distro: str
packages: List[ROSPackageContainer]
# List of private fields.
_docker_client: DockerClient = PrivateAttr()
_message_logger: MessageLogger = PrivateAttr()
_network_name: str = PrivateAttr()
_requirements_manager: SimulationRequirementsManager = PrivateAttr()
_simulation_uuid: str = PrivateAttr()
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args[1:], **kwargs)
self._requirements_manager = args[0]
self._docker_client = DockerClient()
self._message_logger = MessageLogger()
self._simulation_uuid = str(uuid.uuid1())
self._network_name = f'rigel-simulation-{self._simulation_uuid}'
def create_simulation_network(self) -> None:
"""
Create dedicated Docker network created for a simulation.
"""
self._docker_client.create_network(self._network_name, 'bridge')
def remove_simulation_network(self) -> None:
"""
Remove dedicated Docker network created for a simulation.
"""
self._docker_client.remove_network(self._network_name)
def run_ros_package_container(self, package: ROSPackageContainer) -> docker.models.containers.Container:
"""
Launch a single containerized ROS node.
:type package: rigel_local_simulation_plugin.ROSPackageContainer
:param package: Information about the ROS package container.
:rtype: docker.models.containers.Container
:return: The Docker container serving as ROS master.
"""
self._docker_client.run_container(
package.name,
package.image,
command=package.command,
detach=True,
environment=package.environment,
hostname=package.name,
network=self._network_name,
privileged=package.privileged,
volumes=package.volumes
)
self._docker_client.wait_for_container_status(package.name, 'running')
return self._docker_client.get_container(package.name) # this call to get_container ensures updated container data
def bringup_ros_nodes(self) -> None:
"""
Launch all containerized ROS nodes required for a given simulation.
"""
# Start containerize ROS application
for package in self.packages:
ros_common_env_variables = ['ROS_MASTER_URI=http://master:11311', f'ROS_HOSTNAME={package.name}']
# Ensure that all ROS nodes connect to the same ROS master node
assert package.environment is not None # NOTE: required by mypy to ensure that the addition is possible
package.environment = package.environment + ros_common_env_variables
node_container = self.run_ros_package_container(package)
node_container_addr = node_container.attrs['NetworkSettings']['Networks'][self._network_name]['IPAddress']
self._message_logger.info(f"Created container '{package.name}' ({node_container_addr})")
if package.introspection:
# Connect to ROS bridge inside container
rosbridge_client = ROSBridgeClient(node_container_addr, 9090)
self._message_logger.info(f"Connected to ROS bridge server at '{node_container_addr}:9090'")
self._requirements_manager.connect_to_rosbridge(rosbridge_client)
def run(self) -> None:
"""
Plugin entrypoint.
Create simulation network and all containers required for a given simulation.
"""
# Create Docker network for entire simulation
self.create_simulation_network()
self._message_logger.info(f"Created Docker network {self._network_name}")
# Add container with ROS master node
self.packages.insert(0, ROSPackageContainer(
name='master',
image=f'ros:{self.distro}',
command='roscore'
))
# Bringup all remaining ROS nodes
self.bringup_ros_nodes()
def stop(self) -> None:
"""
Plugin graceful closing mechanism.
"""
# Remove containers
for package in self.packages:
self._docker_client.remove_container(package.name)
self._message_logger.info(f"Removed Docker container '{package.name}'")
# Remove simulation network
self.remove_simulation_network()
self._message_logger.info(f"Removed Docker network '{self._network_name}'.") | /rigel_local_simulation_plugin-0.1.2.tar.gz/rigel_local_simulation_plugin-0.1.2/rigel_local_simulation_plugin/plugin.py | 0.861261 | 0.335324 | plugin.py | pypi |
import os
from pydantic import BaseModel, PrivateAttr
from rigelcore.clients import DockerClient
from rigelcore.exceptions import UndeclaredEnvironmentVariableError
from rigelcore.loggers import MessageLogger
from typing import Any
class GenericCredentials(BaseModel):
"""
Pair of login credentials to be used with a Dicker image registry.
:type username: string
:ivar username: The user to be authenticated.
:type password: string
:ivar password: The secret user password.
"""
# Required fields.
username: str
password: str
class GenericDockerRegistryPlugin(BaseModel):
"""
A plugin for Rigel to deploy Docker images to a Docker image registry.
:type credentials: GenericCredentials
:ivar credentials: The credentials to authenticate with the Docker image registry.
:type image: string
:ivar image: The name ofor the deployed imaged.
:type local_image: string
:ivar local_image: The name of the image to deploy
:type registry: string
:ivar registry: The Docker image registry. Defaults to DockerHub.
"""
# List of required fields.
credentials: GenericCredentials
image: str
# List of optional fields.
local_image: str = 'rigel:temp'
registry: str = '' # defaults to DockerHub
# List of private fields.
_complete_image_name: str = PrivateAttr()
_docker_client: DockerClient = PrivateAttr()
_logger: MessageLogger = PrivateAttr()
def __init__(self, *args: Any, **kwargs: Any) -> None:
self._docker_client = kwargs.pop('docker_client')
self._logger = kwargs.pop('logger')
super().__init__(*args, **kwargs)
if self.registry:
self._complete_image_name = f"{self.registry}/{self.image}"
else:
self._complete_image_name = self.image
def tag(self) -> None:
"""
Tag existent Docker image to the desired tag.
"""
self._docker_client.tag_image(
self.local_image,
self._complete_image_name
)
def authenticate(self) -> None:
"""
Authenticate with the specified Docker image registr.
"""
def __get_env_var_value(env: str) -> str:
"""
Retrieve a value stored in an environment variable.
:type env: string
:param env: Name of environment variable.
:rtype: string
:return: The value of the environment variable.
"""
value = os.environ.get(env)
if value is None:
raise UndeclaredEnvironmentVariableError(env=env)
return value
self._docker_client.login(
self.registry,
__get_env_var_value(self.credentials.username),
__get_env_var_value(self.credentials.password)
)
def deploy(self) -> None:
"""
Deploy Docker image to the specified Docker image registry.
"""
self._docker_client.push_image(
self._complete_image_name
)
def run(self) -> None:
"""
Plugin entrypoint.
"""
self.tag()
self._logger.info(f"Created Docker image {self.image} from {self.local_image}.")
self.authenticate()
self._logger.info(f"Authenticated with Docker image registry {self.registry or 'DockerHub'}.")
self.deploy()
self._logger.info("Docker image {} was pushed with sucess to {}.".format(
self._complete_image_name,
self.registry or 'DockerHub'
))
def stop(self) -> None:
"""
Plugin graceful closing mechanism.
"""
pass | /rigel_registry_plugin-0.1.6.tar.gz/rigel_registry_plugin-0.1.6/rigel_registry_plugin/registries/generic.py | 0.786664 | 0.170439 | generic.py | pypi |
from typing import Any, Dict, List, Optional, Type
import gym
import torch as th
from torch import nn
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
NatureCNN,
create_mlp,
)
from stable_baselines3.common.type_aliases import Schedule
class QNetwork(BasePolicy):
"""
Action-Value (Q-Value) network for DQN
:param observation_space: Observation space
:param action_space: Action space
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor: nn.Module,
features_dim: int,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
):
super().__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
if net_arch is None:
net_arch = [64, 64]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.features_extractor = features_extractor
self.features_dim = features_dim
self.normalize_images = normalize_images
action_dim = self.action_space.n # number of actions
q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
self.q_net = nn.Sequential(*q_net)
def forward(self, obs: th.Tensor) -> th.Tensor:
"""
Predict the q-values.
:param obs: Observation
:return: The estimated Q-Value for each action.
"""
return self.q_net(self.extract_features(obs))
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
q_values = self(observation)
# Greedy action
action = q_values.argmax(dim=1).reshape(-1)
return action
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
features_dim=self.features_dim,
activation_fn=self.activation_fn,
features_extractor=self.features_extractor,
)
)
return data
class DQNPolicy(BasePolicy):
"""
Policy class with Q-Value Net and target net for DQN
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
)
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [64, 64]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.normalize_images = normalize_images
self.net_args = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"net_arch": self.net_arch,
"activation_fn": self.activation_fn,
"normalize_images": normalize_images,
}
self.q_net, self.q_net_target = None, None
self._build(lr_schedule)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the network and the optimizer.
Put the target network into evaluation mode.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self.q_net = self.make_q_net()
self.q_net_target = self.make_q_net()
self.q_net_target.load_state_dict(self.q_net.state_dict())
self.q_net_target.set_training_mode(False)
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def make_q_net(self) -> QNetwork:
# Make sure we always have separate networks for features extractors etc
net_args = self._update_features_extractor(self.net_args, features_extractor=None)
return QNetwork(**net_args).to(self.device)
def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self.q_net._predict(obs, deterministic=deterministic)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_args["net_arch"],
activation_fn=self.net_args["activation_fn"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def set_training_mode(self, mode: bool) -> None:
"""
Put the policy in either training or evaluation mode.
This affects certain modules, such as batch normalisation and dropout.
:param mode: if true, set to training mode, else set to evaluation mode
"""
self.q_net.set_training_mode(mode)
self.training = mode
MlpPolicy = DQNPolicy
class CnnPolicy(DQNPolicy):
"""
Policy class for DQN when using images as input.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class MultiInputPolicy(DQNPolicy):
"""
Policy class for DQN when using dict observations as input.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Dict,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
) | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/dqn/policies.py | 0.966068 | 0.517083 | policies.py | pypi |
import warnings
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.preprocessing import maybe_transpose
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import get_linear_fn, is_vectorized_observation, polyak_update
from stable_baselines3.dqn.policies import CnnPolicy, DQNPolicy, MlpPolicy, MultiInputPolicy
class DQN(OffPolicyAlgorithm):
"""
Deep Q-Network (DQN)
Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236
Default hyperparameters are taken from the nature paper,
except for the optimizer and learning rate that were taken from Stable Baselines defaults.
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param target_update_interval: update the target network every ``target_update_interval``
environment steps.
:param exploration_fraction: fraction of entire training period over which the exploration rate is reduced
:param exploration_initial_eps: initial value of random action probability
:param exploration_final_eps: final value of random action probability
:param max_grad_norm: The maximum value for the gradient clipping
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
policy_aliases: Dict[str, Type[BasePolicy]] = {
"MlpPolicy": MlpPolicy,
"CnnPolicy": CnnPolicy,
"MultiInputPolicy": MultiInputPolicy,
}
def __init__(
self,
policy: Union[str, Type[DQNPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-4,
buffer_size: int = 1_000_000, # 1e6
learning_starts: int = 50000,
batch_size: int = 32,
tau: float = 1.0,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = 4,
gradient_steps: int = 1,
replay_buffer_class: Optional[ReplayBuffer] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
target_update_interval: int = 10000,
exploration_fraction: float = 0.1,
exploration_initial_eps: float = 1.0,
exploration_final_eps: float = 0.05,
max_grad_norm: float = 10,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super().__init__(
policy,
env,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
action_noise=None, # No action noise
replay_buffer_class=replay_buffer_class,
replay_buffer_kwargs=replay_buffer_kwargs,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(gym.spaces.Discrete,),
support_multi_env=True,
)
self.exploration_initial_eps = exploration_initial_eps
self.exploration_final_eps = exploration_final_eps
self.exploration_fraction = exploration_fraction
self.target_update_interval = target_update_interval
# For updating the target network with multiple envs:
self._n_calls = 0
self.max_grad_norm = max_grad_norm
# "epsilon" for the epsilon-greedy exploration
self.exploration_rate = 0.0
# Linear schedule will be defined in `_setup_model()`
self.exploration_schedule = None
self.q_net, self.q_net_target = None, None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super()._setup_model()
self._create_aliases()
self.exploration_schedule = get_linear_fn(
self.exploration_initial_eps,
self.exploration_final_eps,
self.exploration_fraction,
)
# Account for multiple environments
# each call to step() corresponds to n_envs transitions
if self.n_envs > 1:
if self.n_envs > self.target_update_interval:
warnings.warn(
"The number of environments used is greater than the target network "
f"update interval ({self.n_envs} > {self.target_update_interval}), "
"therefore the target network will be updated after each call to env.step() "
f"which corresponds to {self.n_envs} steps."
)
self.target_update_interval = max(self.target_update_interval // self.n_envs, 1)
def _create_aliases(self) -> None:
self.q_net = self.policy.q_net
self.q_net_target = self.policy.q_net_target
def _on_step(self) -> None:
"""
Update the exploration rate and target network if needed.
This method is called in ``collect_rollouts()`` after each step in the environment.
"""
self._n_calls += 1
if self._n_calls % self.target_update_interval == 0:
polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)
self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)
self.logger.record("rollout/exploration_rate", self.exploration_rate)
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update learning rate according to schedule
self._update_learning_rate(self.policy.optimizer)
losses = []
for _ in range(gradient_steps):
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
with th.no_grad():
# Compute the next Q-values using the target network
next_q_values = self.q_net_target(replay_data.next_observations)
# Follow greedy policy: use the one with the highest value
next_q_values, _ = next_q_values.max(dim=1)
# Avoid potential broadcast issue
next_q_values = next_q_values.reshape(-1, 1)
# 1-step TD target
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
# Get current Q-values estimates
current_q_values = self.q_net(replay_data.observations)
# Retrieve the q-values for the actions from the replay buffer
current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long())
# Compute Huber loss (less sensitive to outliers)
loss = F.smooth_l1_loss(current_q_values, target_q_values)
losses.append(loss.item())
# Optimize the policy
self.policy.optimizer.zero_grad()
loss.backward()
# Clip gradient norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
# Increase update counter
self._n_updates += gradient_steps
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/loss", np.mean(losses))
def predict(
self,
observation: np.ndarray,
state: Optional[Tuple[np.ndarray, ...]] = None,
episode_start: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:
"""
Overrides the base_class predict function to include epsilon-greedy exploration.
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param episode_start: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next state
(used in recurrent policies)
"""
if not deterministic and np.random.rand() < self.exploration_rate:
if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space):
if isinstance(self.observation_space, gym.spaces.Dict):
n_batch = observation[list(observation.keys())[0]].shape[0]
else:
n_batch = observation.shape[0]
action = np.array([self.action_space.sample() for _ in range(n_batch)])
else:
action = np.array(self.action_space.sample())
else:
action, state = self.policy.predict(observation, state, episode_start, deterministic)
return action, state
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "DQN",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super().learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super()._excluded_save_params() + ["q_net", "q_net_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, [] | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/dqn/dqn.py | 0.930899 | 0.590071 | dqn.py | pypi |
from typing import Any, Dict, Optional, Tuple, Type, Union
import torch as th
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.td3.policies import TD3Policy
from stable_baselines3.td3.td3 import TD3
class DDPG(TD3):
"""
Deep Deterministic Policy Gradient (DDPG).
Deterministic Policy Gradient: http://proceedings.mlr.press/v32/silver14.pdf
DDPG Paper: https://arxiv.org/abs/1509.02971
Introduction to DDPG: https://spinningup.openai.com/en/latest/algorithms/ddpg.html
Note: we treat DDPG as a special case of its successor TD3.
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
def __init__(
self,
policy: Union[str, Type[TD3Policy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-3,
buffer_size: int = 1_000_000, # 1e6
learning_starts: int = 100,
batch_size: int = 100,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = (1, "episode"),
gradient_steps: int = -1,
action_noise: Optional[ActionNoise] = None,
replay_buffer_class: Optional[ReplayBuffer] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super().__init__(
policy=policy,
env=env,
learning_rate=learning_rate,
buffer_size=buffer_size,
learning_starts=learning_starts,
batch_size=batch_size,
tau=tau,
gamma=gamma,
train_freq=train_freq,
gradient_steps=gradient_steps,
action_noise=action_noise,
replay_buffer_class=replay_buffer_class,
replay_buffer_kwargs=replay_buffer_kwargs,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
optimize_memory_usage=optimize_memory_usage,
# Remove all tricks from TD3 to obtain DDPG:
# we still need to specify target_policy_noise > 0 to avoid errors
policy_delay=1,
target_noise_clip=0.0,
target_policy_noise=0.1,
_init_setup_model=False,
)
# Use only one critic
if "n_critics" not in self.policy_kwargs:
self.policy_kwargs["n_critics"] = 1
if _init_setup_model:
self._setup_model()
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "DDPG",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super().learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
) | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/ddpg/ddpg.py | 0.92853 | 0.602909 | ddpg.py | pypi |
import warnings
from typing import Any, Dict, Optional, Type, Union
import numpy as np
import torch as th
from gym import spaces
from torch.nn import functional as F
from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, BasePolicy, MultiInputActorCriticPolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import explained_variance, get_schedule_fn
class PPO(OnPolicyAlgorithm):
"""
Proximal Policy Optimization algorithm (PPO) (clip version)
Paper: https://arxiv.org/abs/1707.06347
Code: This implementation borrows code from OpenAI Spinning Up (https://github.com/openai/spinningup/)
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
Stable Baselines (PPO2 from https://github.com/hill-a/stable-baselines)
Introduction to PPO: https://spinningup.openai.com/en/latest/algorithms/ppo.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. rollout buffer size is n_steps * n_envs where n_envs is number of environment copies running in parallel)
NOTE: n_steps * n_envs must be greater than 1 (because of the advantage normalization)
See https://github.com/pytorch/pytorch/issues/29372
:param batch_size: Minibatch size
:param n_epochs: Number of epoch when optimizing the surrogate loss
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param clip_range: Clipping parameter, it can be a function of the current progress
remaining (from 1 to 0).
:param clip_range_vf: Clipping parameter for the value function,
it can be a function of the current progress remaining (from 1 to 0).
This is a parameter specific to the OpenAI implementation. If None is passed (default),
no clipping will be done on the value function.
IMPORTANT: this clipping depends on the reward scaling.
:param normalize_advantage: Whether to normalize or not the advantage
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param target_kl: Limit the KL divergence between updates,
because the clipping is not enough to prevent large update
see issue #213 (cf https://github.com/hill-a/stable-baselines/issues/213)
By default, there is no limit on the kl div.
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
policy_aliases: Dict[str, Type[BasePolicy]] = {
"MlpPolicy": ActorCriticPolicy,
"CnnPolicy": ActorCriticCnnPolicy,
"MultiInputPolicy": MultiInputActorCriticPolicy,
}
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
n_steps: int = 2048,
batch_size: int = 64,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: Union[float, Schedule] = 0.2,
clip_range_vf: Union[None, float, Schedule] = None,
normalize_advantage: bool = True,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
use_sde: bool = False,
sde_sample_freq: int = -1,
target_kl: Optional[float] = None,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super().__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
_init_setup_model=False,
supported_action_spaces=(
spaces.Box,
spaces.Discrete,
spaces.MultiDiscrete,
spaces.MultiBinary,
),
)
# Sanity check, otherwise it will lead to noisy gradient and NaN
# because of the advantage normalization
if normalize_advantage:
assert (
batch_size > 1
), "`batch_size` must be greater than 1. See https://github.com/DLR-RM/stable-baselines3/issues/440"
if self.env is not None:
# Check that `n_steps * n_envs > 1` to avoid NaN
# when doing advantage normalization
buffer_size = self.env.num_envs * self.n_steps
assert (
buffer_size > 1
), f"`n_steps * n_envs` must be greater than 1. Currently n_steps={self.n_steps} and n_envs={self.env.num_envs}"
# Check that the rollout buffer size is a multiple of the mini-batch size
untruncated_batches = buffer_size // batch_size
if buffer_size % batch_size > 0:
warnings.warn(
f"You have specified a mini-batch size of {batch_size},"
f" but because the `RolloutBuffer` is of size `n_steps * n_envs = {buffer_size}`,"
f" after every {untruncated_batches} untruncated mini-batches,"
f" there will be a truncated mini-batch of size {buffer_size % batch_size}\n"
f"We recommend using a `batch_size` that is a factor of `n_steps * n_envs`.\n"
f"Info: (n_steps={self.n_steps} and n_envs={self.env.num_envs})"
)
self.batch_size = batch_size
self.n_epochs = n_epochs
self.clip_range = clip_range
self.clip_range_vf = clip_range_vf
self.normalize_advantage = normalize_advantage
self.target_kl = target_kl
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super()._setup_model()
# Initialize schedules for policy/value clipping
self.clip_range = get_schedule_fn(self.clip_range)
if self.clip_range_vf is not None:
if isinstance(self.clip_range_vf, (float, int)):
assert self.clip_range_vf > 0, "`clip_range_vf` must be positive, " "pass `None` to deactivate vf clipping"
self.clip_range_vf = get_schedule_fn(self.clip_range_vf)
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value function
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
entropy_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
continue_training = True
# train for n_epochs epochs
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data in self.rollout_buffer.get(self.batch_size):
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
# Re-sample the noise matrix because the log_std has changed
if self.use_sde:
self.policy.reset_noise(self.batch_size)
values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)
values = values.flatten()
# Normalize advantage
advantages = rollout_data.advantages
if self.normalize_advantage:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantages * ratio
policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()
clip_fractions.append(clip_fraction)
if self.clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
values_pred = rollout_data.old_values + th.clamp(
values - rollout_data.old_values, -clip_range_vf, clip_range_vf
)
# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(rollout_data.returns, values_pred)
value_losses.append(value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
entropy_losses.append(entropy_loss.item())
loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss
# Calculate approximate form of reverse KL Divergence for early stopping
# see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417
# and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419
# and Schulman blog: http://joschu.net/blog/kl-approx.html
with th.no_grad():
log_ratio = log_prob - rollout_data.old_log_prob
approx_kl_div = th.mean((th.exp(log_ratio) - 1) - log_ratio).cpu().numpy()
approx_kl_divs.append(approx_kl_div)
if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl:
continue_training = False
if self.verbose >= 1:
print(f"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}")
break
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
if not continue_training:
break
self._n_updates += self.n_epochs
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
self.logger.record("train/entropy_loss", np.mean(entropy_losses))
self.logger.record("train/policy_gradient_loss", np.mean(pg_losses))
self.logger.record("train/value_loss", np.mean(value_losses))
self.logger.record("train/approx_kl", np.mean(approx_kl_divs))
self.logger.record("train/clip_fraction", np.mean(clip_fractions))
self.logger.record("train/loss", loss.item())
self.logger.record("train/explained_variance", explained_var)
if hasattr(self.policy, "log_std"):
self.logger.record("train/std", th.exp(self.policy.log_std).mean().item())
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/clip_range", clip_range)
if self.clip_range_vf is not None:
self.logger.record("train/clip_range_vf", clip_range_vf)
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "PPO",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "PPO":
return super().learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
) | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/ppo/ppo.py | 0.933537 | 0.575827 | ppo.py | pypi |
from typing import Any, Dict, Optional, Type, Union
import torch as th
from gym import spaces
from torch.nn import functional as F
from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, BasePolicy, MultiInputActorCriticPolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import explained_variance
class A2C(OnPolicyAlgorithm):
"""
Advantage Actor Critic (A2C)
Paper: https://arxiv.org/abs/1602.01783
Code: This implementation borrows code from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail and
and Stable Baselines (https://github.com/hill-a/stable-baselines)
Introduction to A2C: https://hackernoon.com/intuitive-rl-intro-to-advantage-actor-critic-a2c-4ff545978752
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator
Equivalent to classic advantage when set to 1.
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param rms_prop_eps: RMSProp epsilon. It stabilizes square root computation in denominator
of RMSProp update
:param use_rms_prop: Whether to use RMSprop (default) or Adam as optimizer
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param normalize_advantage: Whether to normalize or not the advantage
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
policy_aliases: Dict[str, Type[BasePolicy]] = {
"MlpPolicy": ActorCriticPolicy,
"CnnPolicy": ActorCriticCnnPolicy,
"MultiInputPolicy": MultiInputActorCriticPolicy,
}
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 7e-4,
n_steps: int = 5,
gamma: float = 0.99,
gae_lambda: float = 1.0,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
rms_prop_eps: float = 1e-5,
use_rms_prop: bool = True,
use_sde: bool = False,
sde_sample_freq: int = -1,
normalize_advantage: bool = False,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super().__init__(
policy,
env,
learning_rate=learning_rate,
n_steps=n_steps,
gamma=gamma,
gae_lambda=gae_lambda,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
_init_setup_model=False,
supported_action_spaces=(
spaces.Box,
spaces.Discrete,
spaces.MultiDiscrete,
spaces.MultiBinary,
),
)
self.normalize_advantage = normalize_advantage
# Update optimizer inside the policy if we want to use RMSProp
# (original implementation) rather than Adam
if use_rms_prop and "optimizer_class" not in self.policy_kwargs:
self.policy_kwargs["optimizer_class"] = th.optim.RMSprop
self.policy_kwargs["optimizer_kwargs"] = dict(alpha=0.99, eps=rms_prop_eps, weight_decay=0)
if _init_setup_model:
self._setup_model()
def train(self) -> None:
"""
Update policy using the currently gathered
rollout buffer (one gradient step over whole data).
"""
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# This will only loop once (get all data in one go)
for rollout_data in self.rollout_buffer.get(batch_size=None):
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = actions.long().flatten()
values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)
values = values.flatten()
# Normalize advantage (not present in the original implementation)
advantages = rollout_data.advantages
if self.normalize_advantage:
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# Policy gradient loss
policy_loss = -(advantages * log_prob).mean()
# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(rollout_data.returns, values)
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
self._n_updates += 1
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/explained_variance", explained_var)
self.logger.record("train/entropy_loss", entropy_loss.item())
self.logger.record("train/policy_loss", policy_loss.item())
self.logger.record("train/value_loss", value_loss.item())
if hasattr(self.policy, "log_std"):
self.logger.record("train/std", th.exp(self.policy.log_std).mean().item())
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 100,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "A2C",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "A2C":
return super().learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
) | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/a2c/a2c.py | 0.967349 | 0.484014 | a2c.py | pypi |
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch.nn import functional as F
from stable_baselines3.common.buffers import ReplayBuffer
from stable_baselines3.common.noise import ActionNoise
from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import polyak_update
from stable_baselines3.td3.policies import CnnPolicy, MlpPolicy, MultiInputPolicy, TD3Policy
class TD3(OffPolicyAlgorithm):
"""
Twin Delayed DDPG (TD3)
Addressing Function Approximation Error in Actor-Critic Methods.
Original implementation: https://github.com/sfujim/TD3
Paper: https://arxiv.org/abs/1802.09477
Introduction to TD3: https://spinningup.openai.com/en/latest/algorithms/td3.html
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: learning rate for adam optimizer,
the same learning rate will be used for all networks (Q-Values, Actor and Value function)
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param policy_delay: Policy and target networks will only be updated once every policy_delay steps
per training steps. The Q values will be updated policy_delay more often (update every training step).
:param target_policy_noise: Standard deviation of Gaussian noise added to target policy
(smoothing noise)
:param target_noise_clip: Limit for absolute value of target policy smoothing noise.
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
"""
policy_aliases: Dict[str, Type[BasePolicy]] = {
"MlpPolicy": MlpPolicy,
"CnnPolicy": CnnPolicy,
"MultiInputPolicy": MultiInputPolicy,
}
def __init__(
self,
policy: Union[str, Type[TD3Policy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 1e-3,
buffer_size: int = 1_000_000, # 1e6
learning_starts: int = 100,
batch_size: int = 100,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = (1, "episode"),
gradient_steps: int = -1,
action_noise: Optional[ActionNoise] = None,
replay_buffer_class: Optional[ReplayBuffer] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
policy_delay: int = 2,
target_policy_noise: float = 0.2,
target_noise_clip: float = 0.5,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
):
super().__init__(
policy,
env,
learning_rate,
buffer_size,
learning_starts,
batch_size,
tau,
gamma,
train_freq,
gradient_steps,
action_noise=action_noise,
replay_buffer_class=replay_buffer_class,
replay_buffer_kwargs=replay_buffer_kwargs,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
sde_support=False,
optimize_memory_usage=optimize_memory_usage,
supported_action_spaces=(gym.spaces.Box),
support_multi_env=True,
)
self.policy_delay = policy_delay
self.target_noise_clip = target_noise_clip
self.target_policy_noise = target_policy_noise
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
super()._setup_model()
self._create_aliases()
def _create_aliases(self) -> None:
self.actor = self.policy.actor
self.actor_target = self.policy.actor_target
self.critic = self.policy.critic
self.critic_target = self.policy.critic_target
def train(self, gradient_steps: int, batch_size: int = 100) -> None:
# Switch to train mode (this affects batch norm / dropout)
self.policy.set_training_mode(True)
# Update learning rate according to lr schedule
self._update_learning_rate([self.actor.optimizer, self.critic.optimizer])
actor_losses, critic_losses = [], []
for _ in range(gradient_steps):
self._n_updates += 1
# Sample replay buffer
replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)
with th.no_grad():
# Select action according to policy and add clipped noise
noise = replay_data.actions.clone().data.normal_(0, self.target_policy_noise)
noise = noise.clamp(-self.target_noise_clip, self.target_noise_clip)
next_actions = (self.actor_target(replay_data.next_observations) + noise).clamp(-1, 1)
# Compute the next Q-values: min over all critics targets
next_q_values = th.cat(self.critic_target(replay_data.next_observations, next_actions), dim=1)
next_q_values, _ = th.min(next_q_values, dim=1, keepdim=True)
target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values
# Get current Q-values estimates for each critic network
current_q_values = self.critic(replay_data.observations, replay_data.actions)
# Compute critic loss
critic_loss = sum(F.mse_loss(current_q, target_q_values) for current_q in current_q_values)
critic_losses.append(critic_loss.item())
# Optimize the critics
self.critic.optimizer.zero_grad()
critic_loss.backward()
self.critic.optimizer.step()
# Delayed policy updates
if self._n_updates % self.policy_delay == 0:
# Compute actor loss
actor_loss = -self.critic.q1_forward(replay_data.observations, self.actor(replay_data.observations)).mean()
actor_losses.append(actor_loss.item())
# Optimize the actor
self.actor.optimizer.zero_grad()
actor_loss.backward()
self.actor.optimizer.step()
polyak_update(self.critic.parameters(), self.critic_target.parameters(), self.tau)
polyak_update(self.actor.parameters(), self.actor_target.parameters(), self.tau)
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
if len(actor_losses) > 0:
self.logger.record("train/actor_loss", np.mean(actor_losses))
self.logger.record("train/critic_loss", np.mean(critic_losses))
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "TD3",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> OffPolicyAlgorithm:
return super().learn(
total_timesteps=total_timesteps,
callback=callback,
log_interval=log_interval,
eval_env=eval_env,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
tb_log_name=tb_log_name,
eval_log_path=eval_log_path,
reset_num_timesteps=reset_num_timesteps,
)
def _excluded_save_params(self) -> List[str]:
return super()._excluded_save_params() + ["actor", "critic", "actor_target", "critic_target"]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "actor.optimizer", "critic.optimizer"]
return state_dicts, [] | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/td3/td3.py | 0.954827 | 0.651715 | td3.py | pypi |
import warnings
from typing import Union
import gym
import numpy as np
from gym import spaces
from stable_baselines3.common.preprocessing import is_image_space_channels_first
from stable_baselines3.common.vec_env import DummyVecEnv, VecCheckNan
def _is_numpy_array_space(space: spaces.Space) -> bool:
"""
Returns False if provided space is not representable as a single numpy array
(e.g. Dict and Tuple spaces return False)
"""
return not isinstance(space, (spaces.Dict, spaces.Tuple))
def _check_image_input(observation_space: spaces.Box, key: str = "") -> None:
"""
Check that the input will be compatible with Stable-Baselines
when the observation is apparently an image.
"""
if observation_space.dtype != np.uint8:
warnings.warn(
f"It seems that your observation {key} is an image but the `dtype` "
"of your observation_space is not `np.uint8`. "
"If your observation is not an image, we recommend you to flatten the observation "
"to have only a 1D vector"
)
if np.any(observation_space.low != 0) or np.any(observation_space.high != 255):
warnings.warn(
f"It seems that your observation space {key} is an image but the "
"upper and lower bounds are not in [0, 255]. "
"Because the CNN policy normalize automatically the observation "
"you may encounter issue if the values are not in that range."
)
non_channel_idx = 0
# Check only if width/height of the image is big enough
if is_image_space_channels_first(observation_space):
non_channel_idx = -1
if observation_space.shape[non_channel_idx] < 36 or observation_space.shape[1] < 36:
warnings.warn(
"The minimal resolution for an image is 36x36 for the default `CnnPolicy`. "
"You might need to use a custom feature extractor "
"cf. https://stable-baselines3.readthedocs.io/en/master/guide/custom_policy.html"
)
def _check_unsupported_spaces(env: gym.Env, observation_space: spaces.Space, action_space: spaces.Space) -> None:
"""Emit warnings when the observation space or action space used is not supported by Stable-Baselines."""
if isinstance(observation_space, spaces.Dict):
nested_dict = False
for space in observation_space.spaces.values():
if isinstance(space, spaces.Dict):
nested_dict = True
if nested_dict:
warnings.warn(
"Nested observation spaces are not supported by Stable Baselines3 "
"(Dict spaces inside Dict space). "
"You should flatten it to have only one level of keys."
"For example, `dict(space1=dict(space2=Box(), space3=Box()), spaces4=Discrete())` "
"is not supported but `dict(space2=Box(), spaces3=Box(), spaces4=Discrete())` is."
)
if isinstance(observation_space, spaces.Tuple):
warnings.warn(
"The observation space is a Tuple,"
"this is currently not supported by Stable Baselines3. "
"However, you can convert it to a Dict observation space "
"(cf. https://github.com/openai/gym/blob/master/gym/spaces/dict.py). "
"which is supported by SB3."
)
if not _is_numpy_array_space(action_space):
warnings.warn(
"The action space is not based off a numpy array. Typically this means it's either a Dict or Tuple space. "
"This type of action space is currently not supported by Stable Baselines 3. You should try to flatten the "
"action using a wrapper."
)
def _check_nan(env: gym.Env) -> None:
"""Check for Inf and NaN using the VecWrapper."""
vec_env = VecCheckNan(DummyVecEnv([lambda: env]))
for _ in range(10):
action = np.array([env.action_space.sample()])
_, _, _, _ = vec_env.step(action)
def _check_obs(obs: Union[tuple, dict, np.ndarray, int], observation_space: spaces.Space, method_name: str) -> None:
"""
Check that the observation returned by the environment
correspond to the declared one.
"""
if not isinstance(observation_space, spaces.Tuple):
assert not isinstance(
obs, tuple
), f"The observation returned by the `{method_name}()` method should be a single value, not a tuple"
# The check for a GoalEnv is done by the base class
if isinstance(observation_space, spaces.Discrete):
assert isinstance(obs, int), f"The observation returned by `{method_name}()` method must be an int"
elif _is_numpy_array_space(observation_space):
assert isinstance(obs, np.ndarray), f"The observation returned by `{method_name}()` method must be a numpy array"
assert observation_space.contains(
obs
), f"The observation returned by the `{method_name}()` method does not match the given observation space"
def _check_box_obs(observation_space: spaces.Box, key: str = "") -> None:
"""
Check that the observation space is correctly formatted
when dealing with a ``Box()`` space. In particular, it checks:
- that the dimensions are big enough when it is an image, and that the type matches
- that the observation has an expected shape (warn the user if not)
"""
# If image, check the low and high values, the type and the number of channels
# and the shape (minimal value)
if len(observation_space.shape) == 3:
_check_image_input(observation_space)
if len(observation_space.shape) not in [1, 3]:
warnings.warn(
f"Your observation {key} has an unconventional shape (neither an image, nor a 1D vector). "
"We recommend you to flatten the observation "
"to have only a 1D vector or use a custom policy to properly process the data."
)
def _check_returned_values(env: gym.Env, observation_space: spaces.Space, action_space: spaces.Space) -> None:
"""
Check the returned values by the env when calling `.reset()` or `.step()` methods.
"""
# because env inherits from gym.Env, we assume that `reset()` and `step()` methods exists
obs = env.reset()
if isinstance(observation_space, spaces.Dict):
assert isinstance(obs, dict), "The observation returned by `reset()` must be a dictionary"
for key in observation_space.spaces.keys():
try:
_check_obs(obs[key], observation_space.spaces[key], "reset")
except AssertionError as e:
raise AssertionError(f"Error while checking key={key}: " + str(e)) from e
else:
_check_obs(obs, observation_space, "reset")
# Sample a random action
action = action_space.sample()
data = env.step(action)
assert len(data) == 4, "The `step()` method must return four values: obs, reward, done, info"
# Unpack
obs, reward, done, info = data
if isinstance(observation_space, spaces.Dict):
assert isinstance(obs, dict), "The observation returned by `step()` must be a dictionary"
for key in observation_space.spaces.keys():
try:
_check_obs(obs[key], observation_space.spaces[key], "step")
except AssertionError as e:
raise AssertionError(f"Error while checking key={key}: " + str(e)) from e
else:
_check_obs(obs, observation_space, "step")
# We also allow int because the reward will be cast to float
assert isinstance(reward, (float, int)), "The reward returned by `step()` must be a float"
assert isinstance(done, bool), "The `done` signal must be a boolean"
assert isinstance(info, dict), "The `info` returned by `step()` must be a python dictionary"
if isinstance(env, gym.GoalEnv):
# For a GoalEnv, the keys are checked at reset
assert reward == env.compute_reward(obs["achieved_goal"], obs["desired_goal"], info)
def _check_spaces(env: gym.Env) -> None:
"""
Check that the observation and action spaces are defined
and inherit from gym.spaces.Space.
"""
# Helper to link to the code, because gym has no proper documentation
gym_spaces = " cf https://github.com/openai/gym/blob/master/gym/spaces/"
assert hasattr(env, "observation_space"), "You must specify an observation space (cf gym.spaces)" + gym_spaces
assert hasattr(env, "action_space"), "You must specify an action space (cf gym.spaces)" + gym_spaces
assert isinstance(env.observation_space, spaces.Space), "The observation space must inherit from gym.spaces" + gym_spaces
assert isinstance(env.action_space, spaces.Space), "The action space must inherit from gym.spaces" + gym_spaces
# Check render cannot be covered by CI
def _check_render(env: gym.Env, warn: bool = True, headless: bool = False) -> None: # pragma: no cover
"""
Check the declared render modes and the `render()`/`close()`
method of the environment.
:param env: The environment to check
:param warn: Whether to output additional warnings
:param headless: Whether to disable render modes
that require a graphical interface. False by default.
"""
render_modes = env.metadata.get("render.modes")
if render_modes is None:
if warn:
warnings.warn(
"No render modes was declared in the environment "
" (env.metadata['render.modes'] is None or not defined), "
"you may have trouble when calling `.render()`"
)
else:
# Don't check render mode that require a
# graphical interface (useful for CI)
if headless and "human" in render_modes:
render_modes.remove("human")
# Check all declared render modes
for render_mode in render_modes:
env.render(mode=render_mode)
env.close()
def check_env(env: gym.Env, warn: bool = True, skip_render_check: bool = True) -> None:
"""
Check that an environment follows Gym API.
This is particularly useful when using a custom environment.
Please take a look at https://github.com/openai/gym/blob/master/gym/core.py
for more information about the API.
It also optionally check that the environment is compatible with Stable-Baselines.
:param env: The Gym environment that will be checked
:param warn: Whether to output additional warnings
mainly related to the interaction with Stable Baselines
:param skip_render_check: Whether to skip the checks for the render method.
True by default (useful for the CI)
"""
assert isinstance(
env, gym.Env
), "Your environment must inherit from the gym.Env class cf https://github.com/openai/gym/blob/master/gym/core.py"
# ============= Check the spaces (observation and action) ================
_check_spaces(env)
# Define aliases for convenience
observation_space = env.observation_space
action_space = env.action_space
# Warn the user if needed.
# A warning means that the environment may run but not work properly with Stable Baselines algorithms
if warn:
_check_unsupported_spaces(env, observation_space, action_space)
obs_spaces = observation_space.spaces if isinstance(observation_space, spaces.Dict) else {"": observation_space}
for key, space in obs_spaces.items():
if isinstance(space, spaces.Box):
_check_box_obs(space, key)
# Check for the action space, it may lead to hard-to-debug issues
if isinstance(action_space, spaces.Box) and (
np.any(np.abs(action_space.low) != np.abs(action_space.high))
or np.any(action_space.low != -1)
or np.any(action_space.high != 1)
):
warnings.warn(
"We recommend you to use a symmetric and normalized Box action space (range=[-1, 1]) "
"cf https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html"
)
if isinstance(action_space, spaces.Box):
assert np.all(
np.isfinite(np.array([action_space.low, action_space.high]))
), "Continuous action space must have a finite lower and upper bound"
if isinstance(action_space, spaces.Box) and action_space.dtype != np.dtype(np.float32):
warnings.warn(
f"Your action space has dtype {action_space.dtype}, we recommend using np.float32 to avoid cast errors."
)
# ============ Check the returned values ===============
_check_returned_values(env, observation_space, action_space)
# ==== Check the render method and the declared render modes ====
if not skip_render_check:
_check_render(env, warn=warn) # pragma: no cover
# The check only works with numpy arrays
if _is_numpy_array_space(observation_space) and _is_numpy_array_space(action_space):
_check_nan(env) | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/common/env_checker.py | 0.940817 | 0.687525 | env_checker.py | pypi |
import io
import pathlib
import time
import warnings
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common.base_class import BaseAlgorithm
from stable_baselines3.common.buffers import DictReplayBuffer, ReplayBuffer
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.noise import ActionNoise, VectorizedActionNoise
from stable_baselines3.common.policies import BasePolicy
from stable_baselines3.common.save_util import load_from_pkl, save_to_pkl
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, RolloutReturn, Schedule, TrainFreq, TrainFrequencyUnit
from stable_baselines3.common.utils import safe_mean, should_collect_more_steps
from stable_baselines3.common.vec_env import VecEnv
from stable_baselines3.her.her_replay_buffer import HerReplayBuffer
class OffPolicyAlgorithm(BaseAlgorithm):
"""
The base for Off-Policy algorithms (ex: SAC/TD3)
:param policy: Policy object
:param env: The environment to learn from
(if registered in Gym, can be str. Can be None for loading trained models)
:param learning_rate: learning rate for the optimizer,
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit
like ``(5, "step")`` or ``(2, "episode")``.
:param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).
If ``None``, it will be automatically selected.
:param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.
:param optimize_memory_usage: Enable a memory efficient variant of the replay buffer
at a cost of more complexity.
See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195
:param policy_kwargs: Additional arguments to be passed to the policy on creation
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param verbose: The verbosity level: 0 none, 1 training information, 2 debug
:param device: Device on which the code should run.
By default, it will try to use a Cuda compatible device and fallback to cpu
if it is not possible.
:param support_multi_env: Whether the algorithm supports training
with multiple environments (as in A2C)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param seed: Seed for the pseudo random generators
:param use_sde: Whether to use State Dependent Exploration (SDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling
during the warm up phase (before learning starts)
:param sde_support: Whether the model support gSDE or not
:param supported_action_spaces: The action spaces supported by the algorithm.
"""
def __init__(
self,
policy: Type[BasePolicy],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule],
buffer_size: int = 1_000_000, # 1e6
learning_starts: int = 100,
batch_size: int = 256,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: Union[int, Tuple[int, str]] = (1, "step"),
gradient_steps: int = 1,
action_noise: Optional[ActionNoise] = None,
replay_buffer_class: Optional[ReplayBuffer] = None,
replay_buffer_kwargs: Optional[Dict[str, Any]] = None,
optimize_memory_usage: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
tensorboard_log: Optional[str] = None,
verbose: int = 0,
device: Union[th.device, str] = "auto",
support_multi_env: bool = False,
create_eval_env: bool = False,
monitor_wrapper: bool = True,
seed: Optional[int] = None,
use_sde: bool = False,
sde_sample_freq: int = -1,
use_sde_at_warmup: bool = False,
sde_support: bool = True,
supported_action_spaces: Optional[Tuple[gym.spaces.Space, ...]] = None,
):
super().__init__(
policy=policy,
env=env,
learning_rate=learning_rate,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
support_multi_env=support_multi_env,
create_eval_env=create_eval_env,
monitor_wrapper=monitor_wrapper,
seed=seed,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
supported_action_spaces=supported_action_spaces,
)
self.buffer_size = buffer_size
self.batch_size = batch_size
self.learning_starts = learning_starts
self.tau = tau
self.gamma = gamma
self.gradient_steps = gradient_steps
self.action_noise = action_noise
self.optimize_memory_usage = optimize_memory_usage
self.replay_buffer_class = replay_buffer_class
if replay_buffer_kwargs is None:
replay_buffer_kwargs = {}
self.replay_buffer_kwargs = replay_buffer_kwargs
self._episode_storage = None
# Save train freq parameter, will be converted later to TrainFreq object
self.train_freq = train_freq
self.actor = None # type: Optional[th.nn.Module]
self.replay_buffer = None # type: Optional[ReplayBuffer]
# Update policy keyword arguments
if sde_support:
self.policy_kwargs["use_sde"] = self.use_sde
# For gSDE only
self.use_sde_at_warmup = use_sde_at_warmup
def _convert_train_freq(self) -> None:
"""
Convert `train_freq` parameter (int or tuple)
to a TrainFreq object.
"""
if not isinstance(self.train_freq, TrainFreq):
train_freq = self.train_freq
# The value of the train frequency will be checked later
if not isinstance(train_freq, tuple):
train_freq = (train_freq, "step")
try:
train_freq = (train_freq[0], TrainFrequencyUnit(train_freq[1]))
except ValueError as e:
raise ValueError(
f"The unit of the `train_freq` must be either 'step' or 'episode' not '{train_freq[1]}'!"
) from e
if not isinstance(train_freq[0], int):
raise ValueError(f"The frequency of `train_freq` must be an integer and not {train_freq[0]}")
self.train_freq = TrainFreq(*train_freq)
def _setup_model(self) -> None:
self._setup_lr_schedule()
self.set_random_seed(self.seed)
# Use DictReplayBuffer if needed
if self.replay_buffer_class is None:
if isinstance(self.observation_space, gym.spaces.Dict):
self.replay_buffer_class = DictReplayBuffer
else:
self.replay_buffer_class = ReplayBuffer
elif self.replay_buffer_class == HerReplayBuffer:
assert self.env is not None, "You must pass an environment when using `HerReplayBuffer`"
# If using offline sampling, we need a classic replay buffer too
if self.replay_buffer_kwargs.get("online_sampling", True):
replay_buffer = None
else:
replay_buffer = DictReplayBuffer(
self.buffer_size,
self.observation_space,
self.action_space,
device=self.device,
optimize_memory_usage=self.optimize_memory_usage,
)
self.replay_buffer = HerReplayBuffer(
self.env,
self.buffer_size,
device=self.device,
replay_buffer=replay_buffer,
**self.replay_buffer_kwargs,
)
if self.replay_buffer is None:
self.replay_buffer = self.replay_buffer_class(
self.buffer_size,
self.observation_space,
self.action_space,
device=self.device,
n_envs=self.n_envs,
optimize_memory_usage=self.optimize_memory_usage,
**self.replay_buffer_kwargs,
)
self.policy = self.policy_class( # pytype:disable=not-instantiable
self.observation_space,
self.action_space,
self.lr_schedule,
**self.policy_kwargs, # pytype:disable=not-instantiable
)
self.policy = self.policy.to(self.device)
# Convert train freq parameter to TrainFreq object
self._convert_train_freq()
def save_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None:
"""
Save the replay buffer as a pickle file.
:param path: Path to the file where the replay buffer should be saved.
if path is a str or pathlib.Path, the path is automatically created if necessary.
"""
assert self.replay_buffer is not None, "The replay buffer is not defined"
save_to_pkl(path, self.replay_buffer, self.verbose)
def load_replay_buffer(
self,
path: Union[str, pathlib.Path, io.BufferedIOBase],
truncate_last_traj: bool = True,
) -> None:
"""
Load a replay buffer from a pickle file.
:param path: Path to the pickled replay buffer.
:param truncate_last_traj: When using ``HerReplayBuffer`` with online sampling:
If set to ``True``, we assume that the last trajectory in the replay buffer was finished
(and truncate it).
If set to ``False``, we assume that we continue the same trajectory (same episode).
"""
self.replay_buffer = load_from_pkl(path, self.verbose)
assert isinstance(self.replay_buffer, ReplayBuffer), "The replay buffer must inherit from ReplayBuffer class"
# Backward compatibility with SB3 < 2.1.0 replay buffer
# Keep old behavior: do not handle timeout termination separately
if not hasattr(self.replay_buffer, "handle_timeout_termination"): # pragma: no cover
self.replay_buffer.handle_timeout_termination = False
self.replay_buffer.timeouts = np.zeros_like(self.replay_buffer.dones)
if isinstance(self.replay_buffer, HerReplayBuffer):
assert self.env is not None, "You must pass an environment at load time when using `HerReplayBuffer`"
self.replay_buffer.set_env(self.get_env())
if truncate_last_traj:
self.replay_buffer.truncate_last_trajectory()
def _setup_learn(
self,
total_timesteps: int,
eval_env: Optional[GymEnv],
callback: MaybeCallback = None,
eval_freq: int = 10000,
n_eval_episodes: int = 5,
log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
tb_log_name: str = "run",
) -> Tuple[int, BaseCallback]:
"""
cf `BaseAlgorithm`.
"""
# Prevent continuity issue by truncating trajectory
# when using memory efficient replay buffer
# see https://github.com/DLR-RM/stable-baselines3/issues/46
# Special case when using HerReplayBuffer,
# the classic replay buffer is inside it when using offline sampling
if isinstance(self.replay_buffer, HerReplayBuffer):
replay_buffer = self.replay_buffer.replay_buffer
else:
replay_buffer = self.replay_buffer
truncate_last_traj = (
self.optimize_memory_usage
and reset_num_timesteps
and replay_buffer is not None
and (replay_buffer.full or replay_buffer.pos > 0)
)
if truncate_last_traj:
warnings.warn(
"The last trajectory in the replay buffer will be truncated, "
"see https://github.com/DLR-RM/stable-baselines3/issues/46."
"You should use `reset_num_timesteps=False` or `optimize_memory_usage=False`"
"to avoid that issue."
)
# Go to the previous index
pos = (replay_buffer.pos - 1) % replay_buffer.buffer_size
replay_buffer.dones[pos] = True
return super()._setup_learn(
total_timesteps,
eval_env,
callback,
eval_freq,
n_eval_episodes,
log_path,
reset_num_timesteps,
tb_log_name,
)
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "run",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "OffPolicyAlgorithm":
total_timesteps, callback = self._setup_learn(
total_timesteps,
eval_env,
callback,
eval_freq,
n_eval_episodes,
eval_log_path,
reset_num_timesteps,
tb_log_name,
)
callback.on_training_start(locals(), globals())
while self.num_timesteps < total_timesteps:
rollout = self.collect_rollouts(
self.env,
train_freq=self.train_freq,
action_noise=self.action_noise,
callback=callback,
learning_starts=self.learning_starts,
replay_buffer=self.replay_buffer,
log_interval=log_interval,
)
if rollout.continue_training is False:
break
if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts:
# If no `gradient_steps` is specified,
# do as many gradients steps as steps performed during the rollout
gradient_steps = self.gradient_steps if self.gradient_steps >= 0 else rollout.episode_timesteps
# Special case when the user passes `gradient_steps=0`
if gradient_steps > 0:
self.train(batch_size=self.batch_size, gradient_steps=gradient_steps)
callback.on_training_end()
return self
def train(self, gradient_steps: int, batch_size: int) -> None:
"""
Sample the replay buffer and do the updates
(gradient descent and update target networks)
"""
raise NotImplementedError()
def _sample_action(
self,
learning_starts: int,
action_noise: Optional[ActionNoise] = None,
n_envs: int = 1,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Sample an action according to the exploration policy.
This is either done by sampling the probability distribution of the policy,
or sampling a random action (from a uniform distribution over the action space)
or by adding noise to the deterministic output.
:param action_noise: Action noise that will be used for exploration
Required for deterministic policy (e.g. TD3). This can also be used
in addition to the stochastic policy for SAC.
:param learning_starts: Number of steps before learning for the warm-up phase.
:param n_envs:
:return: action to take in the environment
and scaled action that will be stored in the replay buffer.
The two differs when the action space is not normalized (bounds are not [-1, 1]).
"""
# Select action randomly or according to policy
if self.num_timesteps < learning_starts and not (self.use_sde and self.use_sde_at_warmup):
# Warmup phase
unscaled_action = np.array([self.action_space.sample() for _ in range(n_envs)])
else:
# Note: when using continuous actions,
# we assume that the policy uses tanh to scale the action
# We use non-deterministic action in the case of SAC, for TD3, it does not matter
unscaled_action, _ = self.predict(self._last_obs, deterministic=False)
# Rescale the action from [low, high] to [-1, 1]
if isinstance(self.action_space, gym.spaces.Box):
scaled_action = self.policy.scale_action(unscaled_action)
# Add noise to the action (improve exploration)
if action_noise is not None:
scaled_action = np.clip(scaled_action + action_noise(), -1, 1)
# We store the scaled action in the buffer
buffer_action = scaled_action
action = self.policy.unscale_action(scaled_action)
else:
# Discrete case, no need to normalize or clip
buffer_action = unscaled_action
action = buffer_action
return action, buffer_action
def _dump_logs(self) -> None:
"""
Write log.
"""
time_elapsed = time.time() - self.start_time
fps = int((self.num_timesteps - self._num_timesteps_at_start) / (time_elapsed + 1e-8))
self.logger.record("time/episodes", self._episode_num, exclude="tensorboard")
if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
self.logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]))
self.logger.record("rollout/ep_len_mean", safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]))
self.logger.record("time/fps", fps)
self.logger.record("time/time_elapsed", int(time_elapsed), exclude="tensorboard")
self.logger.record("time/total_timesteps", self.num_timesteps, exclude="tensorboard")
if self.use_sde:
self.logger.record("train/std", (self.actor.get_std()).mean().item())
if len(self.ep_success_buffer) > 0:
self.logger.record("rollout/success_rate", safe_mean(self.ep_success_buffer))
# Pass the number of timesteps for tensorboard
self.logger.dump(step=self.num_timesteps)
def _on_step(self) -> None:
"""
Method called after each step in the environment.
It is meant to trigger DQN target network update
but can be used for other purposes
"""
pass
def _store_transition(
self,
replay_buffer: ReplayBuffer,
buffer_action: np.ndarray,
new_obs: Union[np.ndarray, Dict[str, np.ndarray]],
reward: np.ndarray,
dones: np.ndarray,
infos: List[Dict[str, Any]],
) -> None:
"""
Store transition in the replay buffer.
We store the normalized action and the unnormalized observation.
It also handles terminal observations (because VecEnv resets automatically).
:param replay_buffer: Replay buffer object where to store the transition.
:param buffer_action: normalized action
:param new_obs: next observation in the current episode
or first observation of the episode (when dones is True)
:param reward: reward for the current transition
:param dones: Termination signal
:param infos: List of additional information about the transition.
It may contain the terminal observations and information about timeout.
"""
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs()
reward_ = self._vec_normalize_env.get_original_reward()
else:
# Avoid changing the original ones
self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward
# Avoid modification by reference
next_obs = deepcopy(new_obs_)
# As the VecEnv resets automatically, new_obs is already the
# first observation of the next episode
for i, done in enumerate(dones):
if done and infos[i].get("terminal_observation") is not None:
if isinstance(next_obs, dict):
next_obs_ = infos[i]["terminal_observation"]
# VecNormalize normalizes the terminal observation
if self._vec_normalize_env is not None:
next_obs_ = self._vec_normalize_env.unnormalize_obs(next_obs_)
# Replace next obs for the correct envs
for key in next_obs.keys():
next_obs[key][i] = next_obs_[key]
else:
next_obs[i] = infos[i]["terminal_observation"]
# VecNormalize normalizes the terminal observation
if self._vec_normalize_env is not None:
next_obs[i] = self._vec_normalize_env.unnormalize_obs(next_obs[i, :])
replay_buffer.add(
self._last_original_obs,
next_obs,
buffer_action,
reward_,
dones,
infos,
)
self._last_obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
self._last_original_obs = new_obs_
def collect_rollouts(
self,
env: VecEnv,
callback: BaseCallback,
train_freq: TrainFreq,
replay_buffer: ReplayBuffer,
action_noise: Optional[ActionNoise] = None,
learning_starts: int = 0,
log_interval: Optional[int] = None,
) -> RolloutReturn:
"""
Collect experiences and store them into a ``ReplayBuffer``.
:param env: The training environment
:param callback: Callback that will be called at each step
(and at the beginning and end of the rollout)
:param train_freq: How much experience to collect
by doing rollouts of current policy.
Either ``TrainFreq(<n>, TrainFrequencyUnit.STEP)``
or ``TrainFreq(<n>, TrainFrequencyUnit.EPISODE)``
with ``<n>`` being an integer greater than 0.
:param action_noise: Action noise that will be used for exploration
Required for deterministic policy (e.g. TD3). This can also be used
in addition to the stochastic policy for SAC.
:param learning_starts: Number of steps before learning for the warm-up phase.
:param replay_buffer:
:param log_interval: Log data every ``log_interval`` episodes
:return:
"""
# Switch to eval mode (this affects batch norm / dropout)
self.policy.set_training_mode(False)
num_collected_steps, num_collected_episodes = 0, 0
assert isinstance(env, VecEnv), "You must pass a VecEnv"
assert train_freq.frequency > 0, "Should at least collect one step or episode."
if env.num_envs > 1:
assert train_freq.unit == TrainFrequencyUnit.STEP, "You must use only one env when doing episodic training."
# Vectorize action noise if needed
if action_noise is not None and env.num_envs > 1 and not isinstance(action_noise, VectorizedActionNoise):
action_noise = VectorizedActionNoise(action_noise, env.num_envs)
if self.use_sde:
self.actor.reset_noise(env.num_envs)
callback.on_rollout_start()
continue_training = True
while should_collect_more_steps(train_freq, num_collected_steps, num_collected_episodes):
if self.use_sde and self.sde_sample_freq > 0 and num_collected_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.actor.reset_noise(env.num_envs)
# Select action randomly or according to policy
actions, buffer_actions = self._sample_action(learning_starts, action_noise, env.num_envs)
# Rescale and perform action
new_obs, rewards, dones, infos = env.step(actions)
self.num_timesteps += env.num_envs
num_collected_steps += 1
# Give access to local variables
callback.update_locals(locals())
# Only stop training if return value is False, not when it is None.
if callback.on_step() is False:
return RolloutReturn(num_collected_steps * env.num_envs, num_collected_episodes, continue_training=False)
# Retrieve reward and episode length if using Monitor wrapper
self._update_info_buffer(infos, dones)
# Store data in replay buffer (normalized action and unnormalized observation)
self._store_transition(replay_buffer, buffer_actions, new_obs, rewards, dones, infos)
self._update_current_progress_remaining(self.num_timesteps, self._total_timesteps)
# For DQN, check if the target network should be updated
# and update the exploration schedule
# For SAC/TD3, the update is dones as the same time as the gradient update
# see https://github.com/hill-a/stable-baselines/issues/900
self._on_step()
for idx, done in enumerate(dones):
if done:
# Update stats
num_collected_episodes += 1
self._episode_num += 1
if action_noise is not None:
kwargs = dict(indices=[idx]) if env.num_envs > 1 else {}
action_noise.reset(**kwargs)
# Log training infos
if log_interval is not None and self._episode_num % log_interval == 0:
self._dump_logs()
callback.on_rollout_end()
return RolloutReturn(num_collected_steps * env.num_envs, num_collected_episodes, continue_training) | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/common/off_policy_algorithm.py | 0.876463 | 0.469095 | off_policy_algorithm.py | pypi |
import copy
from abc import ABC, abstractmethod
from typing import Iterable, List, Optional
import numpy as np
class ActionNoise(ABC):
"""
The action noise base class
"""
def __init__(self):
super().__init__()
def reset(self) -> None:
"""
call end of episode reset for the noise
"""
pass
@abstractmethod
def __call__(self) -> np.ndarray:
raise NotImplementedError()
class NormalActionNoise(ActionNoise):
"""
A Gaussian action noise
:param mean: the mean value of the noise
:param sigma: the scale of the noise (std here)
"""
def __init__(self, mean: np.ndarray, sigma: np.ndarray):
self._mu = mean
self._sigma = sigma
super().__init__()
def __call__(self) -> np.ndarray:
return np.random.normal(self._mu, self._sigma)
def __repr__(self) -> str:
return f"NormalActionNoise(mu={self._mu}, sigma={self._sigma})"
class OrnsteinUhlenbeckActionNoise(ActionNoise):
"""
An Ornstein Uhlenbeck action noise, this is designed to approximate Brownian motion with friction.
Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
:param mean: the mean of the noise
:param sigma: the scale of the noise
:param theta: the rate of mean reversion
:param dt: the timestep for the noise
:param initial_noise: the initial value for the noise output, (if None: 0)
"""
def __init__(
self,
mean: np.ndarray,
sigma: np.ndarray,
theta: float = 0.15,
dt: float = 1e-2,
initial_noise: Optional[np.ndarray] = None,
):
self._theta = theta
self._mu = mean
self._sigma = sigma
self._dt = dt
self.initial_noise = initial_noise
self.noise_prev = np.zeros_like(self._mu)
self.reset()
super().__init__()
def __call__(self) -> np.ndarray:
noise = (
self.noise_prev
+ self._theta * (self._mu - self.noise_prev) * self._dt
+ self._sigma * np.sqrt(self._dt) * np.random.normal(size=self._mu.shape)
)
self.noise_prev = noise
return noise
def reset(self) -> None:
"""
reset the Ornstein Uhlenbeck noise, to the initial position
"""
self.noise_prev = self.initial_noise if self.initial_noise is not None else np.zeros_like(self._mu)
def __repr__(self) -> str:
return f"OrnsteinUhlenbeckActionNoise(mu={self._mu}, sigma={self._sigma})"
class VectorizedActionNoise(ActionNoise):
"""
A Vectorized action noise for parallel environments.
:param base_noise: ActionNoise The noise generator to use
:param n_envs: The number of parallel environments
"""
def __init__(self, base_noise: ActionNoise, n_envs: int):
try:
self.n_envs = int(n_envs)
assert self.n_envs > 0
except (TypeError, AssertionError) as e:
raise ValueError(f"Expected n_envs={n_envs} to be positive integer greater than 0") from e
self.base_noise = base_noise
self.noises = [copy.deepcopy(self.base_noise) for _ in range(n_envs)]
def reset(self, indices: Optional[Iterable[int]] = None) -> None:
"""
Reset all the noise processes, or those listed in indices
:param indices: Optional[Iterable[int]] The indices to reset. Default: None.
If the parameter is None, then all processes are reset to their initial position.
"""
if indices is None:
indices = range(len(self.noises))
for index in indices:
self.noises[index].reset()
def __repr__(self) -> str:
return f"VecNoise(BaseNoise={repr(self.base_noise)}), n_envs={len(self.noises)})"
def __call__(self) -> np.ndarray:
"""
Generate and stack the action noise from each noise object
"""
noise = np.stack([noise() for noise in self.noises])
return noise
@property
def base_noise(self) -> ActionNoise:
return self._base_noise
@base_noise.setter
def base_noise(self, base_noise: ActionNoise) -> None:
if base_noise is None:
raise ValueError("Expected base_noise to be an instance of ActionNoise, not None", ActionNoise)
if not isinstance(base_noise, ActionNoise):
raise TypeError("Expected base_noise to be an instance of type ActionNoise", ActionNoise)
self._base_noise = base_noise
@property
def noises(self) -> List[ActionNoise]:
return self._noises
@noises.setter
def noises(self, noises: List[ActionNoise]) -> None:
noises = list(noises) # raises TypeError if not iterable
assert len(noises) == self.n_envs, f"Expected a list of {self.n_envs} ActionNoises, found {len(noises)}."
different_types = [i for i, noise in enumerate(noises) if not isinstance(noise, type(self.base_noise))]
if len(different_types):
raise ValueError(
f"Noise instances at indices {different_types} don't match the type of base_noise", type(self.base_noise)
)
self._noises = noises
for noise in noises:
noise.reset() | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/common/noise.py | 0.920923 | 0.625581 | noise.py | pypi |
import warnings
from typing import Dict, Tuple, Union
import numpy as np
import torch as th
from gym import spaces
from torch.nn import functional as F
def is_image_space_channels_first(observation_space: spaces.Box) -> bool:
"""
Check if an image observation space (see ``is_image_space``)
is channels-first (CxHxW, True) or channels-last (HxWxC, False).
Use a heuristic that channel dimension is the smallest of the three.
If second dimension is smallest, raise an exception (no support).
:param observation_space:
:return: True if observation space is channels-first image, False if channels-last.
"""
smallest_dimension = np.argmin(observation_space.shape).item()
if smallest_dimension == 1:
warnings.warn("Treating image space as channels-last, while second dimension was smallest of the three.")
return smallest_dimension == 0
def is_image_space(
observation_space: spaces.Space,
check_channels: bool = False,
) -> bool:
"""
Check if a observation space has the shape, limits and dtype
of a valid image.
The check is conservative, so that it returns False if there is a doubt.
Valid images: RGB, RGBD, GrayScale with values in [0, 255]
:param observation_space:
:param check_channels: Whether to do or not the check for the number of channels.
e.g., with frame-stacking, the observation space may have more channels than expected.
:return:
"""
if isinstance(observation_space, spaces.Box) and len(observation_space.shape) == 3:
# Check the type
if observation_space.dtype != np.uint8:
return False
# Check the value range
if np.any(observation_space.low != 0) or np.any(observation_space.high != 255):
return False
# Skip channels check
if not check_channels:
return True
# Check the number of channels
if is_image_space_channels_first(observation_space):
n_channels = observation_space.shape[0]
else:
n_channels = observation_space.shape[-1]
# RGB, RGBD, GrayScale
return n_channels in [1, 3, 4]
return False
def maybe_transpose(observation: np.ndarray, observation_space: spaces.Space) -> np.ndarray:
"""
Handle the different cases for images as PyTorch use channel first format.
:param observation:
:param observation_space:
:return: channel first observation if observation is an image
"""
# Avoid circular import
from stable_baselines3.common.vec_env import VecTransposeImage
if is_image_space(observation_space):
if not (observation.shape == observation_space.shape or observation.shape[1:] == observation_space.shape):
# Try to re-order the channels
transpose_obs = VecTransposeImage.transpose_image(observation)
if transpose_obs.shape == observation_space.shape or transpose_obs.shape[1:] == observation_space.shape:
observation = transpose_obs
return observation
def preprocess_obs(
obs: th.Tensor,
observation_space: spaces.Space,
normalize_images: bool = True,
) -> Union[th.Tensor, Dict[str, th.Tensor]]:
"""
Preprocess observation to be to a neural network.
For images, it normalizes the values by dividing them by 255 (to have values in [0, 1])
For discrete observations, it create a one hot vector.
:param obs: Observation
:param observation_space:
:param normalize_images: Whether to normalize images or not
(True by default)
:return:
"""
if isinstance(observation_space, spaces.Box):
if is_image_space(observation_space) and normalize_images:
return obs.float() / 255.0
return obs.float()
elif isinstance(observation_space, spaces.Discrete):
# One hot encoding and convert to float to avoid errors
return F.one_hot(obs.long(), num_classes=observation_space.n).float()
elif isinstance(observation_space, spaces.MultiDiscrete):
# Tensor concatenation of one hot encodings of each Categorical sub-space
return th.cat(
[
F.one_hot(obs_.long(), num_classes=int(observation_space.nvec[idx])).float()
for idx, obs_ in enumerate(th.split(obs.long(), 1, dim=1))
],
dim=-1,
).view(obs.shape[0], sum(observation_space.nvec))
elif isinstance(observation_space, spaces.MultiBinary):
return obs.float()
elif isinstance(observation_space, spaces.Dict):
# Do not modify by reference the original observation
preprocessed_obs = {}
for key, _obs in obs.items():
preprocessed_obs[key] = preprocess_obs(_obs, observation_space[key], normalize_images=normalize_images)
return preprocessed_obs
else:
raise NotImplementedError(f"Preprocessing not implemented for {observation_space}")
def get_obs_shape(
observation_space: spaces.Space,
) -> Union[Tuple[int, ...], Dict[str, Tuple[int, ...]]]:
"""
Get the shape of the observation (useful for the buffers).
:param observation_space:
:return:
"""
if isinstance(observation_space, spaces.Box):
return observation_space.shape
elif isinstance(observation_space, spaces.Discrete):
# Observation is an int
return (1,)
elif isinstance(observation_space, spaces.MultiDiscrete):
# Number of discrete features
return (int(len(observation_space.nvec)),)
elif isinstance(observation_space, spaces.MultiBinary):
# Number of binary features
return (int(observation_space.n),)
elif isinstance(observation_space, spaces.Dict):
return {key: get_obs_shape(subspace) for (key, subspace) in observation_space.spaces.items()}
else:
raise NotImplementedError(f"{observation_space} observation space is not supported")
def get_flattened_obs_dim(observation_space: spaces.Space) -> int:
"""
Get the dimension of the observation space when flattened.
It does not apply to image observation space.
Used by the ``FlattenExtractor`` to compute the input shape.
:param observation_space:
:return:
"""
# See issue https://github.com/openai/gym/issues/1915
# it may be a problem for Dict/Tuple spaces too...
if isinstance(observation_space, spaces.MultiDiscrete):
return sum(observation_space.nvec)
else:
# Use Gym internal method
return spaces.utils.flatdim(observation_space)
def get_action_dim(action_space: spaces.Space) -> int:
"""
Get the dimension of the action space.
:param action_space:
:return:
"""
if isinstance(action_space, spaces.Box):
return int(np.prod(action_space.shape))
elif isinstance(action_space, spaces.Discrete):
# Action is an int
return 1
elif isinstance(action_space, spaces.MultiDiscrete):
# Number of discrete actions
return int(len(action_space.nvec))
elif isinstance(action_space, spaces.MultiBinary):
# Number of binary actions
return int(action_space.n)
else:
raise NotImplementedError(f"{action_space} action space is not supported")
def check_for_nested_spaces(obs_space: spaces.Space):
"""
Make sure the observation space does not have nested spaces (Dicts/Tuples inside Dicts/Tuples).
If so, raise an Exception informing that there is no support for this.
:param obs_space: an observation space
:return:
"""
if isinstance(obs_space, (spaces.Dict, spaces.Tuple)):
sub_spaces = obs_space.spaces.values() if isinstance(obs_space, spaces.Dict) else obs_space.spaces
for sub_space in sub_spaces:
if isinstance(sub_space, (spaces.Dict, spaces.Tuple)):
raise NotImplementedError(
"Nested observation spaces are not supported (Tuple/Dict space inside Tuple/Dict space)."
) | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/common/preprocessing.py | 0.936183 | 0.713694 | preprocessing.py | pypi |
import time
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from stable_baselines3.common.base_class import BaseAlgorithm
from stable_baselines3.common.buffers import DictRolloutBuffer, RolloutBuffer
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.policies import ActorCriticPolicy
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
from stable_baselines3.common.utils import obs_as_tensor, safe_mean
from stable_baselines3.common.vec_env import VecEnv
class OnPolicyAlgorithm(BaseAlgorithm):
"""
The base for On-Policy algorithms (ex: A2C/PPO).
:param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)
:param env: The environment to learn from (if registered in Gym, can be str)
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator.
Equivalent to classic advantage when set to 1.
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
:param supported_action_spaces: The action spaces supported by the algorithm.
"""
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule],
n_steps: int,
gamma: float,
gae_lambda: float,
ent_coef: float,
vf_coef: float,
max_grad_norm: float,
use_sde: bool,
sde_sample_freq: int,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
monitor_wrapper: bool = True,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
supported_action_spaces: Optional[Tuple[gym.spaces.Space, ...]] = None,
):
super().__init__(
policy=policy,
env=env,
learning_rate=learning_rate,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
create_eval_env=create_eval_env,
support_multi_env=True,
seed=seed,
tensorboard_log=tensorboard_log,
supported_action_spaces=supported_action_spaces,
)
self.n_steps = n_steps
self.gamma = gamma
self.gae_lambda = gae_lambda
self.ent_coef = ent_coef
self.vf_coef = vf_coef
self.max_grad_norm = max_grad_norm
self.rollout_buffer = None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
self._setup_lr_schedule()
self.set_random_seed(self.seed)
buffer_cls = DictRolloutBuffer if isinstance(self.observation_space, gym.spaces.Dict) else RolloutBuffer
self.rollout_buffer = buffer_cls(
self.n_steps,
self.observation_space,
self.action_space,
device=self.device,
gamma=self.gamma,
gae_lambda=self.gae_lambda,
n_envs=self.n_envs,
)
self.policy = self.policy_class( # pytype:disable=not-instantiable
self.observation_space,
self.action_space,
self.lr_schedule,
use_sde=self.use_sde,
**self.policy_kwargs # pytype:disable=not-instantiable
)
self.policy = self.policy.to(self.device)
def collect_rollouts(
self,
env: VecEnv,
callback: BaseCallback,
rollout_buffer: RolloutBuffer,
n_rollout_steps: int,
) -> bool:
"""
Collect experiences using the current policy and fill a ``RolloutBuffer``.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
:param env: The training environment
:param callback: Callback that will be called at each step
(and at the beginning and end of the rollout)
:param rollout_buffer: Buffer to fill with rollouts
:param n_steps: Number of experiences to collect per environment
:return: True if function returned with at least `n_rollout_steps`
collected, False if callback terminated rollout prematurely.
"""
assert self._last_obs is not None, "No previous observation was provided"
# Switch to eval mode (this affects batch norm / dropout)
self.policy.set_training_mode(False)
n_steps = 0
rollout_buffer.reset()
# Sample new weights for the state dependent exploration
if self.use_sde:
self.policy.reset_noise(env.num_envs)
callback.on_rollout_start()
while n_steps < n_rollout_steps:
if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.policy.reset_noise(env.num_envs)
with th.no_grad():
# Convert to pytorch tensor or to TensorDict
obs_tensor = obs_as_tensor(self._last_obs, self.device)
actions, values, log_probs = self.policy(obs_tensor)
actions = actions.cpu().numpy()
# Rescale and perform action
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high)
new_obs, rewards, dones, infos = env.step(clipped_actions)
self.num_timesteps += env.num_envs
# Give access to local variables
callback.update_locals(locals())
if callback.on_step() is False:
return False
self._update_info_buffer(infos)
n_steps += 1
if isinstance(self.action_space, gym.spaces.Discrete):
# Reshape in case of discrete action
actions = actions.reshape(-1, 1)
# Handle timeout by bootstraping with value function
# see GitHub issue #633
for idx, done in enumerate(dones):
if (
done
and infos[idx].get("terminal_observation") is not None
and infos[idx].get("TimeLimit.truncated", False)
):
terminal_obs = self.policy.obs_to_tensor(infos[idx]["terminal_observation"])[0]
with th.no_grad():
terminal_value = self.policy.predict_values(terminal_obs)[0]
rewards[idx] += self.gamma * terminal_value
rollout_buffer.add(self._last_obs, actions, rewards, self._last_episode_starts, values, log_probs)
self._last_obs = new_obs
self._last_episode_starts = dones
with th.no_grad():
# Compute value for the last timestep
values = self.policy.predict_values(obs_as_tensor(new_obs, self.device))
rollout_buffer.compute_returns_and_advantage(last_values=values, dones=dones)
callback.on_rollout_end()
return True
def train(self) -> None:
"""
Consume current rollout data and update policy parameters.
Implemented by individual algorithms.
"""
raise NotImplementedError
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
eval_env: Optional[GymEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "OnPolicyAlgorithm",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "OnPolicyAlgorithm":
iteration = 0
total_timesteps, callback = self._setup_learn(
total_timesteps, eval_env, callback, eval_freq, n_eval_episodes, eval_log_path, reset_num_timesteps, tb_log_name
)
callback.on_training_start(locals(), globals())
while self.num_timesteps < total_timesteps:
continue_training = self.collect_rollouts(self.env, callback, self.rollout_buffer, n_rollout_steps=self.n_steps)
if continue_training is False:
break
iteration += 1
self._update_current_progress_remaining(self.num_timesteps, total_timesteps)
# Display training infos
if log_interval is not None and iteration % log_interval == 0:
fps = int((self.num_timesteps - self._num_timesteps_at_start) / (time.time() - self.start_time))
self.logger.record("time/iterations", iteration, exclude="tensorboard")
if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
self.logger.record("rollout/ep_rew_mean", safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]))
self.logger.record("rollout/ep_len_mean", safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]))
self.logger.record("time/fps", fps)
self.logger.record("time/time_elapsed", int(time.time() - self.start_time), exclude="tensorboard")
self.logger.record("time/total_timesteps", self.num_timesteps, exclude="tensorboard")
self.logger.dump(step=self.num_timesteps)
self.train()
callback.on_training_end()
return self
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, [] | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/common/on_policy_algorithm.py | 0.938794 | 0.448909 | on_policy_algorithm.py | pypi |
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Tuple, Union
import gym
import torch as th
from gym import spaces
from torch import nn
from torch.distributions import Bernoulli, Categorical, Normal
from stable_baselines3.common.preprocessing import get_action_dim
class Distribution(ABC):
"""Abstract base class for distributions."""
def __init__(self):
super().__init__()
self.distribution = None
@abstractmethod
def proba_distribution_net(self, *args, **kwargs) -> Union[nn.Module, Tuple[nn.Module, nn.Parameter]]:
"""Create the layers and parameters that represent the distribution.
Subclasses must define this, but the arguments and return type vary between
concrete classes."""
@abstractmethod
def proba_distribution(self, *args, **kwargs) -> "Distribution":
"""Set parameters of the distribution.
:return: self
"""
@abstractmethod
def log_prob(self, x: th.Tensor) -> th.Tensor:
"""
Returns the log likelihood
:param x: the taken action
:return: The log likelihood of the distribution
"""
@abstractmethod
def entropy(self) -> Optional[th.Tensor]:
"""
Returns Shannon's entropy of the probability
:return: the entropy, or None if no analytical form is known
"""
@abstractmethod
def sample(self) -> th.Tensor:
"""
Returns a sample from the probability distribution
:return: the stochastic action
"""
@abstractmethod
def mode(self) -> th.Tensor:
"""
Returns the most likely action (deterministic output)
from the probability distribution
:return: the stochastic action
"""
def get_actions(self, deterministic: bool = False) -> th.Tensor:
"""
Return actions according to the probability distribution.
:param deterministic:
:return:
"""
if deterministic:
return self.mode()
return self.sample()
@abstractmethod
def actions_from_params(self, *args, **kwargs) -> th.Tensor:
"""
Returns samples from the probability distribution
given its parameters.
:return: actions
"""
@abstractmethod
def log_prob_from_params(self, *args, **kwargs) -> Tuple[th.Tensor, th.Tensor]:
"""
Returns samples and the associated log probabilities
from the probability distribution given its parameters.
:return: actions and log prob
"""
def sum_independent_dims(tensor: th.Tensor) -> th.Tensor:
"""
Continuous actions are usually considered to be independent,
so we can sum components of the ``log_prob`` or the entropy.
:param tensor: shape: (n_batch, n_actions) or (n_batch,)
:return: shape: (n_batch,)
"""
if len(tensor.shape) > 1:
tensor = tensor.sum(dim=1)
else:
tensor = tensor.sum()
return tensor
class DiagGaussianDistribution(Distribution):
"""
Gaussian distribution with diagonal covariance matrix, for continuous actions.
:param action_dim: Dimension of the action space.
"""
def __init__(self, action_dim: int):
super().__init__()
self.action_dim = action_dim
self.mean_actions = None
self.log_std = None
def proba_distribution_net(self, latent_dim: int, log_std_init: float = 0.0) -> Tuple[nn.Module, nn.Parameter]:
"""
Create the layers and parameter that represent the distribution:
one output will be the mean of the Gaussian, the other parameter will be the
standard deviation (log std in fact to allow negative values)
:param latent_dim: Dimension of the last layer of the policy (before the action layer)
:param log_std_init: Initial value for the log standard deviation
:return:
"""
mean_actions = nn.Linear(latent_dim, self.action_dim)
# TODO: allow action dependent std
log_std = nn.Parameter(th.ones(self.action_dim) * log_std_init, requires_grad=True)
return mean_actions, log_std
def proba_distribution(self, mean_actions: th.Tensor, log_std: th.Tensor) -> "DiagGaussianDistribution":
"""
Create the distribution given its parameters (mean, std)
:param mean_actions:
:param log_std:
:return:
"""
action_std = th.ones_like(mean_actions) * log_std.exp()
self.distribution = Normal(mean_actions, action_std)
return self
def log_prob(self, actions: th.Tensor) -> th.Tensor:
"""
Get the log probabilities of actions according to the distribution.
Note that you must first call the ``proba_distribution()`` method.
:param actions:
:return:
"""
log_prob = self.distribution.log_prob(actions)
return sum_independent_dims(log_prob)
def entropy(self) -> th.Tensor:
return sum_independent_dims(self.distribution.entropy())
def sample(self) -> th.Tensor:
# Reparametrization trick to pass gradients
return self.distribution.rsample()
def mode(self) -> th.Tensor:
return self.distribution.mean
def actions_from_params(self, mean_actions: th.Tensor, log_std: th.Tensor, deterministic: bool = False) -> th.Tensor:
# Update the proba distribution
self.proba_distribution(mean_actions, log_std)
return self.get_actions(deterministic=deterministic)
def log_prob_from_params(self, mean_actions: th.Tensor, log_std: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
"""
Compute the log probability of taking an action
given the distribution parameters.
:param mean_actions:
:param log_std:
:return:
"""
actions = self.actions_from_params(mean_actions, log_std)
log_prob = self.log_prob(actions)
return actions, log_prob
class SquashedDiagGaussianDistribution(DiagGaussianDistribution):
"""
Gaussian distribution with diagonal covariance matrix, followed by a squashing function (tanh) to ensure bounds.
:param action_dim: Dimension of the action space.
:param epsilon: small value to avoid NaN due to numerical imprecision.
"""
def __init__(self, action_dim: int, epsilon: float = 1e-6):
super().__init__(action_dim)
# Avoid NaN (prevents division by zero or log of zero)
self.epsilon = epsilon
self.gaussian_actions = None
def proba_distribution(self, mean_actions: th.Tensor, log_std: th.Tensor) -> "SquashedDiagGaussianDistribution":
super().proba_distribution(mean_actions, log_std)
return self
def log_prob(self, actions: th.Tensor, gaussian_actions: Optional[th.Tensor] = None) -> th.Tensor:
# Inverse tanh
# Naive implementation (not stable): 0.5 * torch.log((1 + x) / (1 - x))
# We use numpy to avoid numerical instability
if gaussian_actions is None:
# It will be clipped to avoid NaN when inversing tanh
gaussian_actions = TanhBijector.inverse(actions)
# Log likelihood for a Gaussian distribution
log_prob = super().log_prob(gaussian_actions)
# Squash correction (from original SAC implementation)
# this comes from the fact that tanh is bijective and differentiable
log_prob -= th.sum(th.log(1 - actions**2 + self.epsilon), dim=1)
return log_prob
def entropy(self) -> Optional[th.Tensor]:
# No analytical form,
# entropy needs to be estimated using -log_prob.mean()
return None
def sample(self) -> th.Tensor:
# Reparametrization trick to pass gradients
self.gaussian_actions = super().sample()
return th.tanh(self.gaussian_actions)
def mode(self) -> th.Tensor:
self.gaussian_actions = super().mode()
# Squash the output
return th.tanh(self.gaussian_actions)
def log_prob_from_params(self, mean_actions: th.Tensor, log_std: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
action = self.actions_from_params(mean_actions, log_std)
log_prob = self.log_prob(action, self.gaussian_actions)
return action, log_prob
class CategoricalDistribution(Distribution):
"""
Categorical distribution for discrete actions.
:param action_dim: Number of discrete actions
"""
def __init__(self, action_dim: int):
super().__init__()
self.action_dim = action_dim
def proba_distribution_net(self, latent_dim: int) -> nn.Module:
"""
Create the layer that represents the distribution:
it will be the logits of the Categorical distribution.
You can then get probabilities using a softmax.
:param latent_dim: Dimension of the last layer
of the policy network (before the action layer)
:return:
"""
action_logits = nn.Linear(latent_dim, self.action_dim)
return action_logits
def proba_distribution(self, action_logits: th.Tensor) -> "CategoricalDistribution":
self.distribution = Categorical(logits=action_logits)
return self
def log_prob(self, actions: th.Tensor) -> th.Tensor:
return self.distribution.log_prob(actions)
def entropy(self) -> th.Tensor:
return self.distribution.entropy()
def sample(self) -> th.Tensor:
return self.distribution.sample()
def mode(self) -> th.Tensor:
return th.argmax(self.distribution.probs, dim=1)
def actions_from_params(self, action_logits: th.Tensor, deterministic: bool = False) -> th.Tensor:
# Update the proba distribution
self.proba_distribution(action_logits)
return self.get_actions(deterministic=deterministic)
def log_prob_from_params(self, action_logits: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
actions = self.actions_from_params(action_logits)
log_prob = self.log_prob(actions)
return actions, log_prob
class MultiCategoricalDistribution(Distribution):
"""
MultiCategorical distribution for multi discrete actions.
:param action_dims: List of sizes of discrete action spaces
"""
def __init__(self, action_dims: List[int]):
super().__init__()
self.action_dims = action_dims
def proba_distribution_net(self, latent_dim: int) -> nn.Module:
"""
Create the layer that represents the distribution:
it will be the logits (flattened) of the MultiCategorical distribution.
You can then get probabilities using a softmax on each sub-space.
:param latent_dim: Dimension of the last layer
of the policy network (before the action layer)
:return:
"""
action_logits = nn.Linear(latent_dim, sum(self.action_dims))
return action_logits
def proba_distribution(self, action_logits: th.Tensor) -> "MultiCategoricalDistribution":
self.distribution = [Categorical(logits=split) for split in th.split(action_logits, tuple(self.action_dims), dim=1)]
return self
def log_prob(self, actions: th.Tensor) -> th.Tensor:
# Extract each discrete action and compute log prob for their respective distributions
return th.stack(
[dist.log_prob(action) for dist, action in zip(self.distribution, th.unbind(actions, dim=1))], dim=1
).sum(dim=1)
def entropy(self) -> th.Tensor:
return th.stack([dist.entropy() for dist in self.distribution], dim=1).sum(dim=1)
def sample(self) -> th.Tensor:
return th.stack([dist.sample() for dist in self.distribution], dim=1)
def mode(self) -> th.Tensor:
return th.stack([th.argmax(dist.probs, dim=1) for dist in self.distribution], dim=1)
def actions_from_params(self, action_logits: th.Tensor, deterministic: bool = False) -> th.Tensor:
# Update the proba distribution
self.proba_distribution(action_logits)
return self.get_actions(deterministic=deterministic)
def log_prob_from_params(self, action_logits: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
actions = self.actions_from_params(action_logits)
log_prob = self.log_prob(actions)
return actions, log_prob
class BernoulliDistribution(Distribution):
"""
Bernoulli distribution for MultiBinary action spaces.
:param action_dim: Number of binary actions
"""
def __init__(self, action_dims: int):
super().__init__()
self.action_dims = action_dims
def proba_distribution_net(self, latent_dim: int) -> nn.Module:
"""
Create the layer that represents the distribution:
it will be the logits of the Bernoulli distribution.
:param latent_dim: Dimension of the last layer
of the policy network (before the action layer)
:return:
"""
action_logits = nn.Linear(latent_dim, self.action_dims)
return action_logits
def proba_distribution(self, action_logits: th.Tensor) -> "BernoulliDistribution":
self.distribution = Bernoulli(logits=action_logits)
return self
def log_prob(self, actions: th.Tensor) -> th.Tensor:
return self.distribution.log_prob(actions).sum(dim=1)
def entropy(self) -> th.Tensor:
return self.distribution.entropy().sum(dim=1)
def sample(self) -> th.Tensor:
return self.distribution.sample()
def mode(self) -> th.Tensor:
return th.round(self.distribution.probs)
def actions_from_params(self, action_logits: th.Tensor, deterministic: bool = False) -> th.Tensor:
# Update the proba distribution
self.proba_distribution(action_logits)
return self.get_actions(deterministic=deterministic)
def log_prob_from_params(self, action_logits: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
actions = self.actions_from_params(action_logits)
log_prob = self.log_prob(actions)
return actions, log_prob
class StateDependentNoiseDistribution(Distribution):
"""
Distribution class for using generalized State Dependent Exploration (gSDE).
Paper: https://arxiv.org/abs/2005.05719
It is used to create the noise exploration matrix and
compute the log probability of an action with that noise.
:param action_dim: Dimension of the action space.
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,)
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this ensures bounds are satisfied.
:param learn_features: Whether to learn features for gSDE or not.
This will enable gradients to be backpropagated through the features
``latent_sde`` in the code.
:param epsilon: small value to avoid NaN due to numerical imprecision.
"""
def __init__(
self,
action_dim: int,
full_std: bool = True,
use_expln: bool = False,
squash_output: bool = False,
learn_features: bool = False,
epsilon: float = 1e-6,
):
super().__init__()
self.action_dim = action_dim
self.latent_sde_dim = None
self.mean_actions = None
self.log_std = None
self.weights_dist = None
self.exploration_mat = None
self.exploration_matrices = None
self._latent_sde = None
self.use_expln = use_expln
self.full_std = full_std
self.epsilon = epsilon
self.learn_features = learn_features
if squash_output:
self.bijector = TanhBijector(epsilon)
else:
self.bijector = None
def get_std(self, log_std: th.Tensor) -> th.Tensor:
"""
Get the standard deviation from the learned parameter
(log of it by default). This ensures that the std is positive.
:param log_std:
:return:
"""
if self.use_expln:
# From gSDE paper, it allows to keep variance
# above zero and prevent it from growing too fast
below_threshold = th.exp(log_std) * (log_std <= 0)
# Avoid NaN: zeros values that are below zero
safe_log_std = log_std * (log_std > 0) + self.epsilon
above_threshold = (th.log1p(safe_log_std) + 1.0) * (log_std > 0)
std = below_threshold + above_threshold
else:
# Use normal exponential
std = th.exp(log_std)
if self.full_std:
return std
# Reduce the number of parameters:
return th.ones(self.latent_sde_dim, self.action_dim).to(log_std.device) * std
def sample_weights(self, log_std: th.Tensor, batch_size: int = 1) -> None:
"""
Sample weights for the noise exploration matrix,
using a centered Gaussian distribution.
:param log_std:
:param batch_size:
"""
std = self.get_std(log_std)
self.weights_dist = Normal(th.zeros_like(std), std)
# Reparametrization trick to pass gradients
self.exploration_mat = self.weights_dist.rsample()
# Pre-compute matrices in case of parallel exploration
self.exploration_matrices = self.weights_dist.rsample((batch_size,))
def proba_distribution_net(
self, latent_dim: int, log_std_init: float = -2.0, latent_sde_dim: Optional[int] = None
) -> Tuple[nn.Module, nn.Parameter]:
"""
Create the layers and parameter that represent the distribution:
one output will be the deterministic action, the other parameter will be the
standard deviation of the distribution that control the weights of the noise matrix.
:param latent_dim: Dimension of the last layer of the policy (before the action layer)
:param log_std_init: Initial value for the log standard deviation
:param latent_sde_dim: Dimension of the last layer of the features extractor
for gSDE. By default, it is shared with the policy network.
:return:
"""
# Network for the deterministic action, it represents the mean of the distribution
mean_actions_net = nn.Linear(latent_dim, self.action_dim)
# When we learn features for the noise, the feature dimension
# can be different between the policy and the noise network
self.latent_sde_dim = latent_dim if latent_sde_dim is None else latent_sde_dim
# Reduce the number of parameters if needed
log_std = th.ones(self.latent_sde_dim, self.action_dim) if self.full_std else th.ones(self.latent_sde_dim, 1)
# Transform it to a parameter so it can be optimized
log_std = nn.Parameter(log_std * log_std_init, requires_grad=True)
# Sample an exploration matrix
self.sample_weights(log_std)
return mean_actions_net, log_std
def proba_distribution(
self, mean_actions: th.Tensor, log_std: th.Tensor, latent_sde: th.Tensor
) -> "StateDependentNoiseDistribution":
"""
Create the distribution given its parameters (mean, std)
:param mean_actions:
:param log_std:
:param latent_sde:
:return:
"""
# Stop gradient if we don't want to influence the features
self._latent_sde = latent_sde if self.learn_features else latent_sde.detach()
variance = th.mm(self._latent_sde**2, self.get_std(log_std) ** 2)
self.distribution = Normal(mean_actions, th.sqrt(variance + self.epsilon))
return self
def log_prob(self, actions: th.Tensor) -> th.Tensor:
if self.bijector is not None:
gaussian_actions = self.bijector.inverse(actions)
else:
gaussian_actions = actions
# log likelihood for a gaussian
log_prob = self.distribution.log_prob(gaussian_actions)
# Sum along action dim
log_prob = sum_independent_dims(log_prob)
if self.bijector is not None:
# Squash correction (from original SAC implementation)
log_prob -= th.sum(self.bijector.log_prob_correction(gaussian_actions), dim=1)
return log_prob
def entropy(self) -> Optional[th.Tensor]:
if self.bijector is not None:
# No analytical form,
# entropy needs to be estimated using -log_prob.mean()
return None
return sum_independent_dims(self.distribution.entropy())
def sample(self) -> th.Tensor:
noise = self.get_noise(self._latent_sde)
actions = self.distribution.mean + noise
if self.bijector is not None:
return self.bijector.forward(actions)
return actions
def mode(self) -> th.Tensor:
actions = self.distribution.mean
if self.bijector is not None:
return self.bijector.forward(actions)
return actions
def get_noise(self, latent_sde: th.Tensor) -> th.Tensor:
latent_sde = latent_sde if self.learn_features else latent_sde.detach()
# Default case: only one exploration matrix
if len(latent_sde) == 1 or len(latent_sde) != len(self.exploration_matrices):
return th.mm(latent_sde, self.exploration_mat)
# Use batch matrix multiplication for efficient computation
# (batch_size, n_features) -> (batch_size, 1, n_features)
latent_sde = latent_sde.unsqueeze(1)
# (batch_size, 1, n_actions)
noise = th.bmm(latent_sde, self.exploration_matrices)
return noise.squeeze(1)
def actions_from_params(
self, mean_actions: th.Tensor, log_std: th.Tensor, latent_sde: th.Tensor, deterministic: bool = False
) -> th.Tensor:
# Update the proba distribution
self.proba_distribution(mean_actions, log_std, latent_sde)
return self.get_actions(deterministic=deterministic)
def log_prob_from_params(
self, mean_actions: th.Tensor, log_std: th.Tensor, latent_sde: th.Tensor
) -> Tuple[th.Tensor, th.Tensor]:
actions = self.actions_from_params(mean_actions, log_std, latent_sde)
log_prob = self.log_prob(actions)
return actions, log_prob
class TanhBijector:
"""
Bijective transformation of a probability distribution
using a squashing function (tanh)
TODO: use Pyro instead (https://pyro.ai/)
:param epsilon: small value to avoid NaN due to numerical imprecision.
"""
def __init__(self, epsilon: float = 1e-6):
super().__init__()
self.epsilon = epsilon
@staticmethod
def forward(x: th.Tensor) -> th.Tensor:
return th.tanh(x)
@staticmethod
def atanh(x: th.Tensor) -> th.Tensor:
"""
Inverse of Tanh
Taken from Pyro: https://github.com/pyro-ppl/pyro
0.5 * torch.log((1 + x ) / (1 - x))
"""
return 0.5 * (x.log1p() - (-x).log1p())
@staticmethod
def inverse(y: th.Tensor) -> th.Tensor:
"""
Inverse tanh.
:param y:
:return:
"""
eps = th.finfo(y.dtype).eps
# Clip the action to avoid NaN
return TanhBijector.atanh(y.clamp(min=-1.0 + eps, max=1.0 - eps))
def log_prob_correction(self, x: th.Tensor) -> th.Tensor:
# Squash correction (from original SAC implementation)
return th.log(1.0 - th.tanh(x) ** 2 + self.epsilon)
def make_proba_distribution(
action_space: gym.spaces.Space, use_sde: bool = False, dist_kwargs: Optional[Dict[str, Any]] = None
) -> Distribution:
"""
Return an instance of Distribution for the correct type of action space
:param action_space: the input action space
:param use_sde: Force the use of StateDependentNoiseDistribution
instead of DiagGaussianDistribution
:param dist_kwargs: Keyword arguments to pass to the probability distribution
:return: the appropriate Distribution object
"""
if dist_kwargs is None:
dist_kwargs = {}
if isinstance(action_space, spaces.Box):
assert len(action_space.shape) == 1, "Error: the action space must be a vector"
cls = StateDependentNoiseDistribution if use_sde else DiagGaussianDistribution
return cls(get_action_dim(action_space), **dist_kwargs)
elif isinstance(action_space, spaces.Discrete):
return CategoricalDistribution(action_space.n, **dist_kwargs)
elif isinstance(action_space, spaces.MultiDiscrete):
return MultiCategoricalDistribution(action_space.nvec, **dist_kwargs)
elif isinstance(action_space, spaces.MultiBinary):
return BernoulliDistribution(action_space.n, **dist_kwargs)
else:
raise NotImplementedError(
"Error: probability distribution, not implemented for action space"
f"of type {type(action_space)}."
" Must be of type Gym Spaces: Box, Discrete, MultiDiscrete or MultiBinary."
)
def kl_divergence(dist_true: Distribution, dist_pred: Distribution) -> th.Tensor:
"""
Wrapper for the PyTorch implementation of the full form KL Divergence
:param dist_true: the p distribution
:param dist_pred: the q distribution
:return: KL(dist_true||dist_pred)
"""
# KL Divergence for different distribution types is out of scope
assert dist_true.__class__ == dist_pred.__class__, "Error: input distributions should be the same type"
# MultiCategoricalDistribution is not a PyTorch Distribution subclass
# so we need to implement it ourselves!
if isinstance(dist_pred, MultiCategoricalDistribution):
assert dist_pred.action_dims == dist_true.action_dims, "Error: distributions must have the same input space"
return th.stack(
[th.distributions.kl_divergence(p, q) for p, q in zip(dist_true.distribution, dist_pred.distribution)],
dim=1,
).sum(dim=1)
# Use the PyTorch kl_divergence implementation
else:
return th.distributions.kl_divergence(dist_true.distribution, dist_pred.distribution) | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/common/distributions.py | 0.948953 | 0.643875 | distributions.py | pypi |
import os
from typing import Any, Callable, Dict, Optional, Type, Union
import gym
from stable_baselines3.common.atari_wrappers import AtariWrapper
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnv
def unwrap_wrapper(env: gym.Env, wrapper_class: Type[gym.Wrapper]) -> Optional[gym.Wrapper]:
"""
Retrieve a ``VecEnvWrapper`` object by recursively searching.
:param env: Environment to unwrap
:param wrapper_class: Wrapper to look for
:return: Environment unwrapped till ``wrapper_class`` if it has been wrapped with it
"""
env_tmp = env
while isinstance(env_tmp, gym.Wrapper):
if isinstance(env_tmp, wrapper_class):
return env_tmp
env_tmp = env_tmp.env
return None
def is_wrapped(env: Type[gym.Env], wrapper_class: Type[gym.Wrapper]) -> bool:
"""
Check if a given environment has been wrapped with a given wrapper.
:param env: Environment to check
:param wrapper_class: Wrapper class to look for
:return: True if environment has been wrapped with ``wrapper_class``.
"""
return unwrap_wrapper(env, wrapper_class) is not None
def make_vec_env(
env_id: Union[str, Type[gym.Env]],
n_envs: int = 1,
seed: Optional[int] = None,
start_index: int = 0,
monitor_dir: Optional[str] = None,
wrapper_class: Optional[Callable[[gym.Env], gym.Env]] = None,
env_kwargs: Optional[Dict[str, Any]] = None,
vec_env_cls: Optional[Type[Union[DummyVecEnv, SubprocVecEnv]]] = None,
vec_env_kwargs: Optional[Dict[str, Any]] = None,
monitor_kwargs: Optional[Dict[str, Any]] = None,
wrapper_kwargs: Optional[Dict[str, Any]] = None,
) -> VecEnv:
"""
Create a wrapped, monitored ``VecEnv``.
By default it uses a ``DummyVecEnv`` which is usually faster
than a ``SubprocVecEnv``.
:param env_id: the environment ID or the environment class
:param n_envs: the number of environments you wish to have in parallel
:param seed: the initial seed for the random number generator
:param start_index: start rank index
:param monitor_dir: Path to a folder where the monitor files will be saved.
If None, no file will be written, however, the env will still be wrapped
in a Monitor wrapper to provide additional information about training.
:param wrapper_class: Additional wrapper to use on the environment.
This can also be a function with single argument that wraps the environment in many things.
:param env_kwargs: Optional keyword argument to pass to the env constructor
:param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.
:param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.
:param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.
:param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.
:return: The wrapped environment
"""
env_kwargs = {} if env_kwargs is None else env_kwargs
vec_env_kwargs = {} if vec_env_kwargs is None else vec_env_kwargs
monitor_kwargs = {} if monitor_kwargs is None else monitor_kwargs
wrapper_kwargs = {} if wrapper_kwargs is None else wrapper_kwargs
def make_env(rank):
def _init():
if isinstance(env_id, str):
env = gym.make(env_id, **env_kwargs)
else:
env = env_id(**env_kwargs)
if seed is not None:
env.seed(seed + rank)
env.action_space.seed(seed + rank)
# Wrap the env in a Monitor wrapper
# to have additional training information
monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None
# Create the monitor folder if needed
if monitor_path is not None:
os.makedirs(monitor_dir, exist_ok=True)
env = Monitor(env, filename=monitor_path, **monitor_kwargs)
# Optionally, wrap the environment with the provided wrapper
if wrapper_class is not None:
env = wrapper_class(env, **wrapper_kwargs)
return env
return _init
# No custom VecEnv is passed
if vec_env_cls is None:
# Default: use a DummyVecEnv
vec_env_cls = DummyVecEnv
return vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)
def make_atari_env(
env_id: Union[str, Type[gym.Env]],
n_envs: int = 1,
seed: Optional[int] = None,
start_index: int = 0,
monitor_dir: Optional[str] = None,
wrapper_kwargs: Optional[Dict[str, Any]] = None,
env_kwargs: Optional[Dict[str, Any]] = None,
vec_env_cls: Optional[Union[DummyVecEnv, SubprocVecEnv]] = None,
vec_env_kwargs: Optional[Dict[str, Any]] = None,
monitor_kwargs: Optional[Dict[str, Any]] = None,
) -> VecEnv:
"""
Create a wrapped, monitored VecEnv for Atari.
It is a wrapper around ``make_vec_env`` that includes common preprocessing for Atari games.
:param env_id: the environment ID or the environment class
:param n_envs: the number of environments you wish to have in parallel
:param seed: the initial seed for the random number generator
:param start_index: start rank index
:param monitor_dir: Path to a folder where the monitor files will be saved.
If None, no file will be written, however, the env will still be wrapped
in a Monitor wrapper to provide additional information about training.
:param wrapper_kwargs: Optional keyword argument to pass to the ``AtariWrapper``
:param env_kwargs: Optional keyword argument to pass to the env constructor
:param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.
:param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.
:param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.
:return: The wrapped environment
"""
if wrapper_kwargs is None:
wrapper_kwargs = {}
def atari_wrapper(env: gym.Env) -> gym.Env:
env = AtariWrapper(env, **wrapper_kwargs)
return env
return make_vec_env(
env_id,
n_envs=n_envs,
seed=seed,
start_index=start_index,
monitor_dir=monitor_dir,
wrapper_class=atari_wrapper,
env_kwargs=env_kwargs,
vec_env_cls=vec_env_cls,
vec_env_kwargs=vec_env_kwargs,
monitor_kwargs=monitor_kwargs,
) | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/common/env_util.py | 0.863852 | 0.341583 | env_util.py | pypi |
import warnings
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import gym
import numpy as np
from stable_baselines3.common import base_class
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv, VecMonitor, is_vecenv_wrapped
def evaluate_policy(
model: "base_class.BaseAlgorithm",
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 10,
deterministic: bool = True,
render: bool = False,
callback: Optional[Callable[[Dict[str, Any], Dict[str, Any]], None]] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
If a vector env is passed in, this divides the episodes to evaluate onto the
different elements of the vector env. This static division of work is done to
remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more
details and discussion.
.. note::
If environment has not been wrapped with ``Monitor`` wrapper, reward and
episode lengths are counted as it appears with ``env.step`` calls. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with ``Monitor``
wrapper before anything else.
:param model: The RL agent you want to evaluate.
:param env: The gym environment or ``VecEnv`` environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks,
called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episode lengths
per episode will be returned instead of the mean.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
is_monitor_wrapped = False
# Avoid circular import
from stable_baselines3.common.monitor import Monitor
if not isinstance(env, VecEnv):
env = DummyVecEnv([lambda: env])
is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0]
if not is_monitor_wrapped and warn:
warnings.warn(
"Evaluation environment is not wrapped with a ``Monitor`` wrapper. "
"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. "
"Consider wrapping environment first with ``Monitor`` wrapper.",
UserWarning,
)
n_envs = env.num_envs
episode_rewards = []
episode_lengths = []
episode_counts = np.zeros(n_envs, dtype="int")
# Divides episodes among different sub environments in the vector as evenly as possible
episode_count_targets = np.array([(n_eval_episodes + i) // n_envs for i in range(n_envs)], dtype="int")
current_rewards = np.zeros(n_envs)
current_lengths = np.zeros(n_envs, dtype="int")
observations = env.reset()
states = None
episode_starts = np.ones((env.num_envs,), dtype=bool)
while (episode_counts < episode_count_targets).any():
actions, states = model.predict(observations, state=states, episode_start=episode_starts, deterministic=deterministic)
observations, rewards, dones, infos = env.step(actions)
current_rewards += rewards
current_lengths += 1
for i in range(n_envs):
if episode_counts[i] < episode_count_targets[i]:
# unpack values so that the callback can access the local variables
reward = rewards[i]
done = dones[i]
info = infos[i]
episode_starts[i] = done
if callback is not None:
callback(locals(), globals())
if dones[i]:
if is_monitor_wrapped:
# Atari wrapper can send a "done" signal when
# the agent loses a life, but it does not correspond
# to the true end of episode
if "episode" in info.keys():
# Do not trust "done" with episode endings.
# Monitor wrapper includes "episode" key in info if environment
# has been wrapped with it. Use those rewards instead.
episode_rewards.append(info["episode"]["r"])
episode_lengths.append(info["episode"]["l"])
# Only increment at the real end of an episode
episode_counts[i] += 1
else:
episode_rewards.append(current_rewards[i])
episode_lengths.append(current_lengths[i])
episode_counts[i] += 1
current_rewards[i] = 0
current_lengths[i] = 0
if render:
env.render()
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
if reward_threshold is not None:
assert mean_reward > reward_threshold, "Mean reward below threshold: " f"{mean_reward:.2f} < {reward_threshold:.2f}"
if return_episode_rewards:
return episode_rewards, episode_lengths
return mean_reward, std_reward | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/common/evaluation.py | 0.921329 | 0.601242 | evaluation.py | pypi |
__all__ = ["Monitor", "ResultsWriter", "get_monitor_files", "load_results"]
import csv
import json
import os
import time
from glob import glob
from typing import Dict, List, Optional, Tuple, Union
import gym
import numpy as np
import pandas
from stable_baselines3.common.type_aliases import GymObs, GymStepReturn
class Monitor(gym.Wrapper):
"""
A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.
:param env: The environment
:param filename: the location to save a log file, can be None for no log
:param allow_early_resets: allows the reset of the environment before it is done
:param reset_keywords: extra keywords for the reset call,
if extra parameters are needed at reset
:param info_keywords: extra information to log, from the information return of env.step()
"""
EXT = "monitor.csv"
def __init__(
self,
env: gym.Env,
filename: Optional[str] = None,
allow_early_resets: bool = True,
reset_keywords: Tuple[str, ...] = (),
info_keywords: Tuple[str, ...] = (),
):
super().__init__(env=env)
self.t_start = time.time()
if filename is not None:
self.results_writer = ResultsWriter(
filename,
header={"t_start": self.t_start, "env_id": env.spec and env.spec.id},
extra_keys=reset_keywords + info_keywords,
)
else:
self.results_writer = None
self.reset_keywords = reset_keywords
self.info_keywords = info_keywords
self.allow_early_resets = allow_early_resets
self.rewards = None
self.needs_reset = True
self.episode_returns = []
self.episode_lengths = []
self.episode_times = []
self.total_steps = 0
self.current_reset_info = {} # extra info about the current episode, that was passed in during reset()
def reset(self, **kwargs) -> GymObs:
"""
Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True
:param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords
:return: the first observation of the environment
"""
if not self.allow_early_resets and not self.needs_reset:
raise RuntimeError(
"Tried to reset an environment before done. If you want to allow early resets, "
"wrap your env with Monitor(env, path, allow_early_resets=True)"
)
self.rewards = []
self.needs_reset = False
for key in self.reset_keywords:
value = kwargs.get(key)
if value is None:
raise ValueError(f"Expected you to pass keyword argument {key} into reset")
self.current_reset_info[key] = value
return self.env.reset(**kwargs)
def step(self, action: Union[np.ndarray, int]) -> GymStepReturn:
"""
Step the environment with the given action
:param action: the action
:return: observation, reward, done, information
"""
if self.needs_reset:
raise RuntimeError("Tried to step environment that needs reset")
observation, reward, done, info = self.env.step(action)
self.rewards.append(reward)
if done:
self.needs_reset = True
ep_rew = sum(self.rewards)
ep_len = len(self.rewards)
ep_info = {"r": round(ep_rew, 6), "l": ep_len, "t": round(time.time() - self.t_start, 6)}
for key in self.info_keywords:
ep_info[key] = info[key]
self.episode_returns.append(ep_rew)
self.episode_lengths.append(ep_len)
self.episode_times.append(time.time() - self.t_start)
ep_info.update(self.current_reset_info)
if self.results_writer:
self.results_writer.write_row(ep_info)
info["episode"] = ep_info
self.total_steps += 1
return observation, reward, done, info
def close(self) -> None:
"""
Closes the environment
"""
super().close()
if self.results_writer is not None:
self.results_writer.close()
def get_total_steps(self) -> int:
"""
Returns the total number of timesteps
:return:
"""
return self.total_steps
def get_episode_rewards(self) -> List[float]:
"""
Returns the rewards of all the episodes
:return:
"""
return self.episode_returns
def get_episode_lengths(self) -> List[int]:
"""
Returns the number of timesteps of all the episodes
:return:
"""
return self.episode_lengths
def get_episode_times(self) -> List[float]:
"""
Returns the runtime in seconds of all the episodes
:return:
"""
return self.episode_times
class LoadMonitorResultsError(Exception):
"""
Raised when loading the monitor log fails.
"""
pass
class ResultsWriter:
"""
A result writer that saves the data from the `Monitor` class
:param filename: the location to save a log file, can be None for no log
:param header: the header dictionary object of the saved csv
:param reset_keywords: the extra information to log, typically is composed of
``reset_keywords`` and ``info_keywords``
"""
def __init__(
self,
filename: str = "",
header: Optional[Dict[str, Union[float, str]]] = None,
extra_keys: Tuple[str, ...] = (),
):
if header is None:
header = {}
if not filename.endswith(Monitor.EXT):
if os.path.isdir(filename):
filename = os.path.join(filename, Monitor.EXT)
else:
filename = filename + "." + Monitor.EXT
# Prevent newline issue on Windows, see GH issue #692
self.file_handler = open(filename, "wt", newline="\n")
self.file_handler.write("#%s\n" % json.dumps(header))
self.logger = csv.DictWriter(self.file_handler, fieldnames=("r", "l", "t") + extra_keys)
self.logger.writeheader()
self.file_handler.flush()
def write_row(self, epinfo: Dict[str, Union[float, int]]) -> None:
"""
Close the file handler
:param epinfo: the information on episodic return, length, and time
"""
if self.logger:
self.logger.writerow(epinfo)
self.file_handler.flush()
def close(self) -> None:
"""
Close the file handler
"""
self.file_handler.close()
def get_monitor_files(path: str) -> List[str]:
"""
get all the monitor files in the given path
:param path: the logging folder
:return: the log files
"""
return glob(os.path.join(path, "*" + Monitor.EXT))
def load_results(path: str) -> pandas.DataFrame:
"""
Load all Monitor logs from a given directory path matching ``*monitor.csv``
:param path: the directory path containing the log file(s)
:return: the logged data
"""
monitor_files = get_monitor_files(path)
if len(monitor_files) == 0:
raise LoadMonitorResultsError(f"No monitor files of the form *{Monitor.EXT} found in {path}")
data_frames, headers = [], []
for file_name in monitor_files:
with open(file_name) as file_handler:
first_line = file_handler.readline()
assert first_line[0] == "#"
header = json.loads(first_line[1:])
data_frame = pandas.read_csv(file_handler, index_col=None)
headers.append(header)
data_frame["t"] += header["t_start"]
data_frames.append(data_frame)
data_frame = pandas.concat(data_frames)
data_frame.sort_values("t", inplace=True)
data_frame.reset_index(inplace=True)
data_frame["t"] -= min(header["t_start"] for header in headers)
return data_frame | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/common/monitor.py | 0.825801 | 0.291535 | monitor.py | pypi |
from itertools import zip_longest
from typing import Dict, List, Tuple, Type, Union
import gym
import torch as th
from torch import nn
from stable_baselines3.common.preprocessing import get_flattened_obs_dim, is_image_space
from stable_baselines3.common.type_aliases import TensorDict
from stable_baselines3.common.utils import get_device
class BaseFeaturesExtractor(nn.Module):
"""
Base class that represents a features extractor.
:param observation_space:
:param features_dim: Number of features extracted.
"""
def __init__(self, observation_space: gym.Space, features_dim: int = 0):
super().__init__()
assert features_dim > 0
self._observation_space = observation_space
self._features_dim = features_dim
@property
def features_dim(self) -> int:
return self._features_dim
def forward(self, observations: th.Tensor) -> th.Tensor:
raise NotImplementedError()
class FlattenExtractor(BaseFeaturesExtractor):
"""
Feature extract that flatten the input.
Used as a placeholder when feature extraction is not needed.
:param observation_space:
"""
def __init__(self, observation_space: gym.Space):
super().__init__(observation_space, get_flattened_obs_dim(observation_space))
self.flatten = nn.Flatten()
def forward(self, observations: th.Tensor) -> th.Tensor:
return self.flatten(observations)
class NatureCNN(BaseFeaturesExtractor):
"""
CNN from DQN nature paper:
Mnih, Volodymyr, et al.
"Human-level control through deep reinforcement learning."
Nature 518.7540 (2015): 529-533.
:param observation_space:
:param features_dim: Number of features extracted.
This corresponds to the number of unit for the last layer.
"""
def __init__(self, observation_space: gym.spaces.Box, features_dim: int = 512):
super().__init__(observation_space, features_dim)
# We assume CxHxW images (channels first)
# Re-ordering will be done by pre-preprocessing or wrapper
assert is_image_space(observation_space, check_channels=False), (
"You should use NatureCNN "
f"only with images not with {observation_space}\n"
"(you are probably using `CnnPolicy` instead of `MlpPolicy` or `MultiInputPolicy`)\n"
"If you are using a custom environment,\n"
"please check it using our env checker:\n"
"https://stable-baselines3.readthedocs.io/en/master/common/env_checker.html"
)
n_input_channels = observation_space.shape[0]
self.cnn = nn.Sequential(
nn.Conv2d(n_input_channels, 32, kernel_size=8, stride=4, padding=0),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
nn.Flatten(),
)
# Compute shape by doing one forward pass
with th.no_grad():
n_flatten = self.cnn(th.as_tensor(observation_space.sample()[None]).float()).shape[1]
self.linear = nn.Sequential(nn.Linear(n_flatten, features_dim), nn.ReLU())
def forward(self, observations: th.Tensor) -> th.Tensor:
return self.linear(self.cnn(observations))
def create_mlp(
input_dim: int,
output_dim: int,
net_arch: List[int],
activation_fn: Type[nn.Module] = nn.ReLU,
squash_output: bool = False,
) -> List[nn.Module]:
"""
Create a multi layer perceptron (MLP), which is
a collection of fully-connected layers each followed by an activation function.
:param input_dim: Dimension of the input vector
:param output_dim:
:param net_arch: Architecture of the neural net
It represents the number of units per layer.
The length of this list is the number of layers.
:param activation_fn: The activation function
to use after each layer.
:param squash_output: Whether to squash the output using a Tanh
activation function
:return:
"""
if len(net_arch) > 0:
modules = [nn.Linear(input_dim, net_arch[0]), activation_fn()]
else:
modules = []
for idx in range(len(net_arch) - 1):
modules.append(nn.Linear(net_arch[idx], net_arch[idx + 1]))
modules.append(activation_fn())
if output_dim > 0:
last_layer_dim = net_arch[-1] if len(net_arch) > 0 else input_dim
modules.append(nn.Linear(last_layer_dim, output_dim))
if squash_output:
modules.append(nn.Tanh())
return modules
class MlpExtractor(nn.Module):
"""
Constructs an MLP that receives the output from a previous feature extractor (i.e. a CNN) or directly
the observations (if no feature extractor is applied) as an input and outputs a latent representation
for the policy and a value network.
The ``net_arch`` parameter allows to specify the amount and size of the hidden layers and how many
of them are shared between the policy network and the value network. It is assumed to be a list with the following
structure:
1. An arbitrary length (zero allowed) number of integers each specifying the number of units in a shared layer.
If the number of ints is zero, there will be no shared layers.
2. An optional dict, to specify the following non-shared layers for the value network and the policy network.
It is formatted like ``dict(vf=[<value layer sizes>], pi=[<policy layer sizes>])``.
If it is missing any of the keys (pi or vf), no non-shared layers (empty list) is assumed.
For example to construct a network with one shared layer of size 55 followed by two non-shared layers for the value
network of size 255 and a single non-shared layer of size 128 for the policy network, the following layers_spec
would be used: ``[55, dict(vf=[255, 255], pi=[128])]``. A simple shared network topology with two layers of size 128
would be specified as [128, 128].
Adapted from Stable Baselines.
:param feature_dim: Dimension of the feature vector (can be the output of a CNN)
:param net_arch: The specification of the policy and value networks.
See above for details on its formatting.
:param activation_fn: The activation function to use for the networks.
:param device:
"""
def __init__(
self,
feature_dim: int,
net_arch: List[Union[int, Dict[str, List[int]]]],
activation_fn: Type[nn.Module],
device: Union[th.device, str] = "auto",
):
super().__init__()
device = get_device(device)
shared_net, policy_net, value_net = [], [], []
policy_only_layers = [] # Layer sizes of the network that only belongs to the policy network
value_only_layers = [] # Layer sizes of the network that only belongs to the value network
last_layer_dim_shared = feature_dim
# Iterate through the shared layers and build the shared parts of the network
for layer in net_arch:
if isinstance(layer, int): # Check that this is a shared layer
# TODO: give layer a meaningful name
shared_net.append(nn.Linear(last_layer_dim_shared, layer)) # add linear of size layer
shared_net.append(activation_fn())
last_layer_dim_shared = layer
else:
assert isinstance(layer, dict), "Error: the net_arch list can only contain ints and dicts"
if "pi" in layer:
assert isinstance(layer["pi"], list), "Error: net_arch[-1]['pi'] must contain a list of integers."
policy_only_layers = layer["pi"]
if "vf" in layer:
assert isinstance(layer["vf"], list), "Error: net_arch[-1]['vf'] must contain a list of integers."
value_only_layers = layer["vf"]
break # From here on the network splits up in policy and value network
last_layer_dim_pi = last_layer_dim_shared
last_layer_dim_vf = last_layer_dim_shared
# Build the non-shared part of the network
for pi_layer_size, vf_layer_size in zip_longest(policy_only_layers, value_only_layers):
if pi_layer_size is not None:
assert isinstance(pi_layer_size, int), "Error: net_arch[-1]['pi'] must only contain integers."
policy_net.append(nn.Linear(last_layer_dim_pi, pi_layer_size))
policy_net.append(activation_fn())
last_layer_dim_pi = pi_layer_size
if vf_layer_size is not None:
assert isinstance(vf_layer_size, int), "Error: net_arch[-1]['vf'] must only contain integers."
value_net.append(nn.Linear(last_layer_dim_vf, vf_layer_size))
value_net.append(activation_fn())
last_layer_dim_vf = vf_layer_size
# Save dim, used to create the distributions
self.latent_dim_pi = last_layer_dim_pi
self.latent_dim_vf = last_layer_dim_vf
# Create networks
# If the list of layers is empty, the network will just act as an Identity module
self.shared_net = nn.Sequential(*shared_net).to(device)
self.policy_net = nn.Sequential(*policy_net).to(device)
self.value_net = nn.Sequential(*value_net).to(device)
def forward(self, features: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
"""
:return: latent_policy, latent_value of the specified network.
If all layers are shared, then ``latent_policy == latent_value``
"""
shared_latent = self.shared_net(features)
return self.policy_net(shared_latent), self.value_net(shared_latent)
def forward_actor(self, features: th.Tensor) -> th.Tensor:
return self.policy_net(self.shared_net(features))
def forward_critic(self, features: th.Tensor) -> th.Tensor:
return self.value_net(self.shared_net(features))
class CombinedExtractor(BaseFeaturesExtractor):
"""
Combined feature extractor for Dict observation spaces.
Builds a feature extractor for each key of the space. Input from each space
is fed through a separate submodule (CNN or MLP, depending on input shape),
the output features are concatenated and fed through additional MLP network ("combined").
:param observation_space:
:param cnn_output_dim: Number of features to output from each CNN submodule(s). Defaults to
256 to avoid exploding network sizes.
"""
def __init__(self, observation_space: gym.spaces.Dict, cnn_output_dim: int = 256):
# TODO we do not know features-dim here before going over all the items, so put something there. This is dirty!
super().__init__(observation_space, features_dim=1)
extractors = {}
total_concat_size = 0
for key, subspace in observation_space.spaces.items():
if is_image_space(subspace):
extractors[key] = NatureCNN(subspace, features_dim=cnn_output_dim)
total_concat_size += cnn_output_dim
else:
# The observation key is a vector, flatten it if needed
extractors[key] = nn.Flatten()
total_concat_size += get_flattened_obs_dim(subspace)
self.extractors = nn.ModuleDict(extractors)
# Update the features dim manually
self._features_dim = total_concat_size
def forward(self, observations: TensorDict) -> th.Tensor:
encoded_tensor_list = []
for key, extractor in self.extractors.items():
encoded_tensor_list.append(extractor(observations[key]))
return th.cat(encoded_tensor_list, dim=1)
def get_actor_critic_arch(net_arch: Union[List[int], Dict[str, List[int]]]) -> Tuple[List[int], List[int]]:
"""
Get the actor and critic network architectures for off-policy actor-critic algorithms (SAC, TD3, DDPG).
The ``net_arch`` parameter allows to specify the amount and size of the hidden layers,
which can be different for the actor and the critic.
It is assumed to be a list of ints or a dict.
1. If it is a list, actor and critic networks will have the same architecture.
The architecture is represented by a list of integers (of arbitrary length (zero allowed))
each specifying the number of units per layer.
If the number of ints is zero, the network will be linear.
2. If it is a dict, it should have the following structure:
``dict(qf=[<critic network architecture>], pi=[<actor network architecture>])``.
where the network architecture is a list as described in 1.
For example, to have actor and critic that share the same network architecture,
you only need to specify ``net_arch=[256, 256]`` (here, two hidden layers of 256 units each).
If you want a different architecture for the actor and the critic,
then you can specify ``net_arch=dict(qf=[400, 300], pi=[64, 64])``.
.. note::
Compared to their on-policy counterparts, no shared layers (other than the features extractor)
between the actor and the critic are allowed (to prevent issues with target networks).
:param net_arch: The specification of the actor and critic networks.
See above for details on its formatting.
:return: The network architectures for the actor and the critic
"""
if isinstance(net_arch, list):
actor_arch, critic_arch = net_arch, net_arch
else:
assert isinstance(net_arch, dict), "Error: the net_arch can only contain be a list of ints or a dict"
assert "pi" in net_arch, "Error: no key 'pi' was provided in net_arch for the actor network"
assert "qf" in net_arch, "Error: no key 'qf' was provided in net_arch for the critic network"
actor_arch, critic_arch = net_arch["pi"], net_arch["qf"]
return actor_arch, critic_arch | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/common/torch_layers.py | 0.93133 | 0.717829 | torch_layers.py | pypi |
from typing import Tuple, Union
import numpy as np
class RunningMeanStd:
def __init__(self, epsilon: float = 1e-4, shape: Tuple[int, ...] = ()):
"""
Calulates the running mean and std of a data stream
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
:param epsilon: helps with arithmetic issues
:param shape: the shape of the data stream's output
"""
self.mean = np.zeros(shape, np.float64)
self.var = np.ones(shape, np.float64)
self.count = epsilon
def copy(self) -> "RunningMeanStd":
"""
:return: Return a copy of the current object.
"""
new_object = RunningMeanStd(shape=self.mean.shape)
new_object.mean = self.mean.copy()
new_object.var = self.var.copy()
new_object.count = float(self.count)
return new_object
def combine(self, other: "RunningMeanStd") -> None:
"""
Combine stats from another ``RunningMeanStd`` object.
:param other: The other object to combine with.
"""
self.update_from_moments(other.mean, other.var, other.count)
def update(self, arr: np.ndarray) -> None:
batch_mean = np.mean(arr, axis=0)
batch_var = np.var(arr, axis=0)
batch_count = arr.shape[0]
self.update_from_moments(batch_mean, batch_var, batch_count)
def update_from_moments(self, batch_mean: np.ndarray, batch_var: np.ndarray, batch_count: Union[int, float]) -> None:
delta = batch_mean - self.mean
tot_count = self.count + batch_count
new_mean = self.mean + delta * batch_count / tot_count
m_a = self.var * self.count
m_b = batch_var * batch_count
m_2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)
new_var = m_2 / (self.count + batch_count)
new_count = batch_count + self.count
self.mean = new_mean
self.var = new_var
self.count = new_count | /rigged_sb3-0.0.1-py3-none-any.whl/stable_baselines3/common/running_mean_std.py | 0.95321 | 0.626467 | running_mean_std.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.