docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Write the table corresponding to the specified name, equivalent to the
AMPL statement
.. code-block:: ampl
write table tableName;
Args:
tableName: Name of the table to be written.
|
def writeTable(self, tableName):
lock_and_call(
lambda: self._impl.writeTable(tableName),
self._lock
)
| 424,249
|
Writes on the current OutputHandler the outcome of the AMPL statement.
.. code-block:: ampl
display e1, e2, .., en;
where e1, ..., en are the strings passed to the procedure.
Args:
amplExpressions: Expressions to be evaluated.
|
def display(self, *amplExpressions):
exprs = list(map(str, amplExpressions))
lock_and_call(
lambda: self._impl.displayLst(exprs, len(exprs)),
self._lock
)
| 424,250
|
Sets a new output handler.
Args:
outputhandler: The function handling the AMPL output derived from
interpreting user commands.
|
def setOutputHandler(self, outputhandler):
class OutputHandlerInternal(amplpython.OutputHandler):
def output(self, kind, msg):
outputhandler.output(kind, msg)
self._outputhandler = outputhandler
self._outputhandler_internal = OutputHandlerInternal()
lock_and_call(
lambda: self._impl.setOutputHandler(
self._outputhandler_internal
),
self._lock
)
| 424,251
|
Sets a new error handler.
Args:
errorhandler: The object handling AMPL errors and warnings.
|
def setErrorHandler(self, errorhandler):
class ErrorHandlerWrapper(ErrorHandler):
def __init__(self, errorhandler):
self.errorhandler = errorhandler
self.last_exception = None
def error(self, exception):
if isinstance(exception, amplpython.AMPLException):
exception = AMPLException(exception)
try:
self.errorhandler.error(exception)
except Exception as e:
self.last_exception = e
def warning(self, exception):
if isinstance(exception, amplpython.AMPLException):
exception = AMPLException(exception)
try:
self.errorhandler.warning(exception)
except Exception as e:
self.last_exception = e
def check(self):
if self.last_exception is not None:
e, self.last_exception = self.last_exception, None
raise e
errorhandler_wrapper = ErrorHandlerWrapper(errorhandler)
class InnerErrorHandler(amplpython.ErrorHandler):
def error(self, exception):
errorhandler_wrapper.error(exception)
def warning(self, exception):
errorhandler_wrapper.warning(exception)
self._errorhandler = errorhandler
self._errorhandler_inner = InnerErrorHandler()
self._errorhandler_wrapper = errorhandler_wrapper
lock_and_call(
lambda: self._impl.setErrorHandler(self._errorhandler_inner),
self._lock
)
| 424,252
|
Create a .dat file with the data that has been loaded.
Args:
datfile: Path to the file (Relative to the current working
directory or absolute).
|
def exportData(self, datfile):
def ampl_set(name, values):
def format_entry(e):
return repr(e).replace(' ', '')
return 'set {0} := {1};'.format(
name, ','.join(format_entry(e) for e in values)
)
def ampl_param(name, values):
def format_entry(k, v):
k = repr(k).strip('()').replace(' ', '')
if v == inf:
v = "Infinity"
elif v == -inf:
v = "-Infinity"
else:
v = repr(v).strip('()').replace(' ', '')
return '[{0}]{1}'.format(k, v)
return 'param {0} := {1};'.format(
name, ''.join(format_entry(k, v) for k, v in values.items())
)
with open(datfile, 'w') as f:
for name, entity in self.getSets():
values = entity.getValues().toList()
print(ampl_set(name, values), file=f)
for name, entity in self.getParameters():
if entity.isScalar():
print(
'param {} := {};'.format(name, entity.value()),
file=f
)
else:
values = entity.getValues().toDict()
print(ampl_param(name, values), file=f)
| 424,265
|
Export the model to Gurobi as a gurobipy.Model object.
Args:
gurobiDriver: The name or the path of the Gurobi solver driver.
verbose: Whether should generate verbose output.
Returns:
A :class:`gurobipy.Model` object with the model loaded.
|
def exportGurobiModel(self, gurobiDriver='gurobi', verbose=False):
from gurobipy import GRB, read
from tempfile import mkdtemp
from shutil import rmtree
from os import path
import sys
if (sys.version_info > (3, 0)):
from io import StringIO
else:
from io import BytesIO as StringIO
tmp_dir = mkdtemp()
model_file = path.join(tmp_dir, 'model.mps')
previous = {
'solver': self.getOption('solver') or '',
'gurobi_auxfiles': self.getOption('auxfiles') or '',
'gurobi_options': self.getOption('gurobi_options') or '',
}
temporary = {
'solver': gurobiDriver,
'gurobi_auxfiles': 'rc',
'gurobi_options': .format(model_file)
}
for option in temporary:
self.setOption(option, temporary[option])
output = self.getOutput('solve;')
if not path.isfile(model_file):
raise RuntimeError(output)
for option in previous:
self.setOption(option, previous[option])
text_trap = StringIO()
stdout = sys.stdout
sys.stdout = text_trap
model = read(model_file)
sys.stdout = stdout
if verbose:
print(text_trap.getvalue())
if model_file.endswith('.mps'):
if not self.getCurrentObjective().minimization():
model.ModelSense = GRB.MAXIMIZE
model.setObjective(- model.getObjective())
model.update()
rmtree(tmp_dir)
return model
| 424,266
|
Import the solution from a gurobipy.Model object.
Args:
grbmodel: A :class:`gurobipy.Model` object with the model solved.
|
def importGurobiSolution(self, grbmodel):
self.eval(''.join(
'let {} := {};'.format(var.VarName, var.X)
for var in grbmodel.getVars()
if '$' not in var.VarName
))
| 424,267
|
Create a new DataFrame with specifed index and column headers.
Args:
index: Index column;
columns: Column headers.
|
def __init__(self, index, columns=tuple(), **kwargs):
if index is not None:
if isinstance(index, basestring):
index = (index,)
if isinstance(columns, basestring):
columns = (columns,)
index_names = [
col[0] if isinstance(col, tuple) else col
for col in index
]
column_names = [
col[0] if isinstance(col, tuple) else col
for col in columns
]
self._impl = amplpython.DataFrame.factory(
len(index_names),
list(index_names) + list(column_names),
len(index_names) + len(column_names)
)
for col in index:
if isinstance(col, tuple):
self.setColumn(col[0], col[1])
for col in columns:
if isinstance(col, tuple):
self.setColumn(col[0], col[1])
else:
self._impl = kwargs.get('_impl', None)
| 424,282
|
Add a row to the DataFrame. The size of the tuple must be equal to the
total number of columns in the dataframe.
Args:
value: A single argument with a tuple containing all the values
for the row to be added, or multiple arguments with the values for
each column.
|
def addRow(self, *value):
if len(value) == 1 and isinstance(value[0], (tuple, list)):
value = value[0]
assert len(value) == self.getNumCols()
self._impl.addRow(Tuple(value)._impl)
| 424,283
|
Add a new column with the corresponding header and values to the
dataframe.
Args:
header: The name of the new column.
values: A list of size :func:`~amplpy.DataFrame.getNumRows` with
all the values of the new column.
|
def addColumn(self, header, values=[]):
if len(values) == 0:
self._impl.addColumn(header)
else:
assert len(values) == self.getNumRows()
if any(isinstance(value, basestring) for value in values):
values = list(map(str, values))
self._impl.addColumnStr(header, values)
elif all(isinstance(value, Real) for value in values):
values = list(map(float, values))
self._impl.addColumnDbl(header, values)
else:
raise NotImplementedError
| 424,284
|
Set the values of a column.
Args:
header: The header of the column to be set.
values: The values to set.
|
def setColumn(self, header, values):
if any(isinstance(value, basestring) for value in values):
values = list(map(str, values))
self._impl.setColumnStr(header, values, len(values))
elif all(isinstance(value, Real) for value in values):
values = list(map(float, values))
self._impl.setColumnDbl(header, values, len(values))
else:
print(values)
raise NotImplementedError
| 424,285
|
Get a row by value of the indexing columns. If the index is not
specified, gets the only row of a dataframe with no indexing columns.
Args:
key: Tuple representing the index of the desired row.
Returns:
The row.
|
def getRow(self, key):
return Row(self._impl.getRow(Tuple(key)._impl))
| 424,286
|
Get row by numeric index.
Args:
index: Zero-based index of the row to get.
Returns:
The corresponding row.
|
def getRowByIndex(self, index):
assert isinstance(index, int)
return Row(self._impl.getRowByIndex(index))
| 424,287
|
Set the values of a DataFrame from a dictionary.
Args:
values: Dictionary with the values to set.
|
def setValues(self, values):
ncols = self.getNumCols()
nindices = self.getNumIndices()
for key, value in values.items():
key = Utils.convToList(key)
assert len(key) == nindices
value = Utils.convToList(value)
assert len(value) == ncols-nindices
self.addRow(key + value)
| 424,289
|
Set the value of a single instance of this parameter.
Args:
args: value if the parameter is scalar, index and value
otherwise.
Raises:
RuntimeError: If the entity has been deleted in the underlying
AMPL.
TypeError: If the parameter is not scalar and the index is not
provided.
|
def set(self, *args):
assert len(args) in (1, 2)
if len(args) == 1:
value = args[0]
self._impl.set(value)
else:
index, value = args
if isinstance(value, Real):
self._impl.setTplDbl(Tuple(index)._impl, value)
elif isinstance(value, basestring):
self._impl.setTplStr(Tuple(index)._impl, value)
else:
raise TypeError
| 424,298
|
Assign the values (string or float) to the parameter instances with the
specified indices, equivalent to the AMPL code:
.. code-block:: ampl
let {i in indices} par[i] := values[i];
Args:
values: list, dictionary or :class:`~amplpy.DataFrame` with the
indices and the values to be set.
Raises:
TypeError: If called on a scalar parameter.
|
def setValues(self, values):
if isinstance(values, dict):
indices, values = list(zip(*values.items()))
indices = Utils.toTupleArray(indices)
if any(isinstance(value, basestring) for value in values):
values = list(map(str, values))
self._impl.setValuesTaStr(indices, values, len(values))
elif all(isinstance(value, Real) for value in values):
values = list(map(float, values))
self._impl.setValuesTaDbl(indices, values, len(values))
else:
raise TypeError
elif isinstance(values, (list, tuple)):
if any(isinstance(value, basestring) for value in values):
values = list(map(str, values))
self._impl.setValuesStr(values, len(values))
elif all(isinstance(value, Real) for value in values):
values = list(map(float, values))
self._impl.setValuesDbl(values, len(values))
else:
raise TypeError
else:
if np is not None and isinstance(values, np.ndarray):
self.setValues(DataFrame.fromNumpy(values).toList())
return
Entity.setValues(self, values)
| 424,299
|
Constructor with ability to select the location of the AMPL binary.
Note that if binaryDirectory is set, the automatic lookup for an AMPL
executable will not be executed.
Args:
binaryDirectory: The directory in which look for the AMPL Binary.
|
def __init__(self, binaryDirectory=None):
if binaryDirectory is None:
self._impl = amplpython.Environment()
else:
self._impl = amplpython.Environment(binaryDirectory)
| 424,311
|
Register jupyter notebook magics ``%%ampl`` and ``%%ampl_eval``.
Args:
store_name: Name of the store where ``%%ampl cells`` will be stored.
ampl_object: Object used to evaluate ``%%ampl_eval`` cells.
|
def register_magics(store_name='_ampl_cells', ampl_object=None):
from IPython.core.magic import (
Magics, magics_class, cell_magic, line_magic
)
@magics_class
class StoreAMPL(Magics):
def __init__(self, shell=None, **kwargs):
Magics.__init__(self, shell=shell, **kwargs)
self._store = []
shell.user_ns[store_name] = self._store
@cell_magic
def ampl(self, line, cell):
self._store.append(cell)
@cell_magic
def ampl_eval(self, line, cell):
ampl_object.eval(cell)
@line_magic
def get_ampl(self, line):
return self._store
get_ipython().register_magics(StoreAMPL)
| 424,314
|
Fix all instances of this variable to a value if provided or to
their current value otherwise.
Args:
value: value to be set.
|
def fix(self, value=None):
if value is None:
self._impl.fix()
else:
self._impl.fix(value)
| 424,325
|
Change the timezone of the specified component.
Args:
cal (Component): the component to change
new_timezone (tzinfo): the timezone to change to
default (tzinfo): a timezone to assume if the dtstart or dtend in cal
doesn't have an existing timezone
utc_only (bool): only convert dates that are in utc
utc_tz (tzinfo): the tzinfo to compare to for UTC when processing
utc_only=True
|
def change_tz(cal, new_timezone, default, utc_only=False, utc_tz=icalendar.utc):
for vevent in getattr(cal, 'vevent_list', []):
start = getattr(vevent, 'dtstart', None)
end = getattr(vevent, 'dtend', None)
for node in (start, end):
if node:
dt = node.value
if (isinstance(dt, datetime) and
(not utc_only or dt.tzinfo == utc_tz)):
if dt.tzinfo is None:
dt = dt.replace(tzinfo = default)
node.value = dt.astimezone(new_timezone)
| 424,370
|
Initializing the IEC.
Args:
:param data: Historical Dataset. Last value must be current time
|
def __init__(self, data, prediction_window=16 * 60):
self.data = data
self.now = data.index[-1]
self.prediction_window = prediction_window
self.algorithms = {
"Simple Mean": self.simple_mean,
"Usage Zone Finder": self.usage_zone_finder,
"ARIMA": self.ARIMAforecast,
"Baseline Finder": partial(self.baseline_finder, training_window=1440 * 60, k=9, long_interp_range=250,
short_interp_range=25, half_window=70, similarity_interval=5, recent_baseline_length=250,
observation_length_addition=240, short_term_ease_method=easeOutSine,
long_term_ease_method=easeOutCirc),
"STLF": self.baseline_finder_dumb,
"b1": partial(self.baseline_finder, training_window=1440 * 60, k=9, long_interp_range=250,
short_interp_range=25, half_window=70, similarity_interval=5, recent_baseline_length=250,
observation_length_addition=240, short_term_ease_method=easeOutSine,
long_term_ease_method=easeOutCirc),
"b2": partial(self.baseline_finder, training_window=1440 * 60, k=3, long_interp_range=250,
short_interp_range=25, half_window=70, similarity_interval=5, recent_baseline_length=300,
observation_length_addition=240, short_term_ease_method=easeOutSine,
long_term_ease_method=easeOutCirc),
"b3": partial(self.baseline_finder, training_window=1440 * 60, k=6, long_interp_range=250,
short_interp_range=25, half_window=70, similarity_interval=5, recent_baseline_length=200,
observation_length_addition=240, short_term_ease_method=easeOutSine,
long_term_ease_method=easeOutCirc),
"b4": partial(self.baseline_finder, training_window=1440 * 60, k=12, long_interp_range=250,
short_interp_range=25, half_window=70, similarity_interval=5, recent_baseline_length=200,
observation_length_addition=240, short_term_ease_method=easeOutSine,
long_term_ease_method=easeOutCirc),
"b5": partial(self.baseline_finder, training_window=1440 * 60, k=9, long_interp_range=250,
short_interp_range=25, half_window=50, similarity_interval=5, recent_baseline_length=250,
observation_length_addition=240, short_term_ease_method=easeOutSine,
long_term_ease_method=easeOutCirc),
"b6": partial(self.baseline_finder, training_window=1440 * 60, k=9, long_interp_range=250,
short_interp_range=25, half_window=60, similarity_interval=5, recent_baseline_length=300,
observation_length_addition=240, short_term_ease_method=easeOutSine,
long_term_ease_method=easeOutCirc),
"b7": partial(self.baseline_finder, training_window=1440 * 60, k=9, long_interp_range=250,
short_interp_range=25, half_window=80, similarity_interval=5, recent_baseline_length=200,
observation_length_addition=240, short_term_ease_method=easeOutSine,
long_term_ease_method=easeOutCirc)
}
| 424,596
|
Runs the given pundat query and returns the results as a Python object.
Arguments:
[query]: the query string
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
|
def query(self, query, archiver="", timeout=DEFAULT_TIMEOUT):
if archiver == "":
archiver = self.archivers[0]
nonce = random.randint(0, 2**32)
ev = threading.Event()
response = {}
def _handleresult(msg):
# decode, throw away if not correct nonce
got_response = False
error = getError(nonce, msg)
if error is not None:
got_response = True
response["error"] = error
metadata = getMetadata(nonce, msg)
if metadata is not None:
got_response = True
response["metadata"] = metadata
timeseries = getTimeseries(nonce, msg)
if timeseries is not None:
got_response = True
response["timeseries"] = timeseries
if got_response:
ev.set()
vk = self.vk[:-1] # remove last part of VK because archiver doesn't expect it
# set up receiving
self.c.subscribe("{0}/s.giles/_/i.archiver/signal/{1},queries".format(archiver, vk), _handleresult)
# execute query
q_struct = msgpack.packb({"Query": query, "Nonce": nonce})
po = PayloadObject((2,0,8,1), None, q_struct)
self.c.publish("{0}/s.giles/_/i.archiver/slot/query".format(archiver), payload_objects=(po,))
ev.wait(timeout)
if len(response) == 0: # no results
raise TimeoutException("Query of {0} timed out".format(query))
return response
| 424,630
|
Using the given where-clause, finds all UUIDs that match
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
|
def uuids(self, where, archiver="", timeout=DEFAULT_TIMEOUT):
resp = self.query("select uuid where {0}".format(where), archiver, timeout)
uuids = []
for r in resp["metadata"]:
uuids.append(r["uuid"])
return uuids
| 424,631
|
Retrieves tags for all streams matching the given WHERE clause
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
|
def tags(self, where, archiver="", timeout=DEFAULT_TIMEOUT):
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{})
| 424,632
|
Retrieves tags for all streams with the provided UUIDs
Arguments:
[uuids]: list of UUIDs
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
|
def tags_uuids(self, uuids, archiver="", timeout=DEFAULT_TIMEOUT):
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{})
| 424,633
|
With the given WHERE clause, retrieves all RAW data between the 2 given timestamps
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
|
def data(self, where, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{})
| 424,634
|
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
|
def data_uuids(self, uuids, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{})
| 424,635
|
Return a color interpolated from the Palette.
In the case where continuous=False, serpentine=False, scale=1,
autoscale=False, and offset=0, this is exactly the same as plain old []
indexing, but with a wrap-around.
The constructor parameters affect this result as documented in the
constructor.
Arguments:
``position``:
May be any integer or floating point number
|
def get(self, position=0):
n = len(self)
if n == 1:
return self[0]
pos = position
if self.length and self.autoscale:
pos *= len(self)
pos /= self.length
pos *= self.scale
pos += self.offset
if not self.continuous:
if not self.serpentine:
return self[int(pos % n)]
# We want a color sequence of length 2n-2
# e.g. for n=5: a b c d | e d c b | a b c d ...
m = (2 * n) - 2
pos %= m
if pos < n:
return self[int(pos)]
else:
return self[int(m - pos)]
if self.serpentine:
pos %= (2 * n)
if pos > n:
pos = (2 * n) - pos
else:
pos %= n
# p is a number in [0, n): scale it to be in [0, n-1)
pos *= n - 1
pos /= n
index = int(pos)
fade = pos - index
if not fade:
return self[index]
r1, g1, b1 = self[index]
r2, g2, b2 = self[(index + 1) % len(self)]
dr, dg, db = r2 - r1, g2 - g1, b2 - b1
return r1 + fade * dr, g1 + fade * dg, b1 + fade * db
| 424,834
|
Compose a sequence of events into one event.
Arguments:
events: a sequence of objects looking like threading.Event
condition: a function taking a sequence of bools and returning a bool.
|
def compose_events(events, condition=all):
events = list(events)
master_event = threading.Event()
def changed():
if condition(e.is_set() for e in events):
master_event.set()
else:
master_event.clear()
def add_changed(f):
@functools.wraps(f)
def wrapped():
f()
changed()
return wrapped
for e in events:
e.set = add_changed(e.set)
e.clear = add_changed(e.clear)
changed()
return master_event
| 424,953
|
Create an instance.
Args:
priv_key (bytes): a private key.
|
def __init__(self, priv_key):
self.setup_curve()
length = len(priv_key)
if length != 32 and length != 96 and length != 104:
raise ValueError("Invalid private key")
self.PrivateKey = bytearray(priv_key[-32:])
pubkey_encoded_not_compressed = None
if length == 32:
try:
pubkey_encoded_not_compressed = bitcoin.privkey_to_pubkey(priv_key)
except Exception as e:
raise Exception("Could not determine public key")
elif length == 96 or length == 104:
skip = length - 96
pubkey_encoded_not_compressed = bytearray(b'\x04') + bytearray(priv_key[skip:skip + 64])
if pubkey_encoded_not_compressed:
pubkey_points = bitcoin.decode_pubkey(pubkey_encoded_not_compressed, 'bin')
pubx = pubkey_points[0]
puby = pubkey_points[1]
edcsa = ECDSA.secp256r1()
self.PublicKey = edcsa.Curve.point(pubx, puby)
self.PublicKeyHash = Crypto.ToScriptHash(self.PublicKey.encode_point(True), unhex=True)
| 426,425
|
Get the private key from a WIF key
Args:
wif (str): The wif key
Returns:
bytes: The private key
|
def PrivateKeyFromWIF(wif):
if wif is None or len(wif) is not 52:
raise ValueError('Please provide a wif with a length of 52 bytes (LEN: {0:d})'.format(len(wif)))
data = base58.b58decode(wif)
length = len(data)
if length is not 38 or data[0] is not 0x80 or data[33] is not 0x01:
raise ValueError("Invalid format!")
checksum = Crypto.Hash256(data[0:34])[0:4]
if checksum != data[34:]:
raise ValueError("Invalid WIF Checksum!")
return data[1:33]
| 426,426
|
Gets the private key from a NEP-2 encrypted private key
Args:
nep2_key (str): The nep-2 encrypted private key
passphrase (str): The password to encrypt the private key with, as unicode string
Returns:
bytes: The private key
|
def PrivateKeyFromNEP2(nep2_key, passphrase):
if not nep2_key or len(nep2_key) != 58:
raise ValueError('Please provide a nep2_key with a length of 58 bytes (LEN: {0:d})'.format(len(nep2_key)))
ADDRESS_HASH_SIZE = 4
ADDRESS_HASH_OFFSET = len(NEP_FLAG) + len(NEP_HEADER)
try:
decoded_key = base58.b58decode_check(nep2_key)
except Exception as e:
raise ValueError("Invalid nep2_key")
address_hash = decoded_key[ADDRESS_HASH_OFFSET:ADDRESS_HASH_OFFSET + ADDRESS_HASH_SIZE]
encrypted = decoded_key[-32:]
pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8')
derived = scrypt.hash(pwd_normalized, address_hash,
N=SCRYPT_ITERATIONS,
r=SCRYPT_BLOCKSIZE,
p=SCRYPT_PARALLEL_FACTOR,
buflen=SCRYPT_KEY_LEN_BYTES)
derived1 = derived[:32]
derived2 = derived[32:]
cipher = AES.new(derived2, AES.MODE_ECB)
decrypted = cipher.decrypt(encrypted)
private_key = xor_bytes(decrypted, derived1)
# Now check that the address hashes match. If they don't, the password was wrong.
kp_new = KeyPair(priv_key=private_key)
kp_new_address = kp_new.GetAddress()
kp_new_address_hash_tmp = hashlib.sha256(kp_new_address.encode("utf-8")).digest()
kp_new_address_hash_tmp2 = hashlib.sha256(kp_new_address_hash_tmp).digest()
kp_new_address_hash = kp_new_address_hash_tmp2[:4]
if (kp_new_address_hash != address_hash):
raise ValueError("Wrong passphrase")
return private_key
| 426,427
|
Export the encrypted private key in NEP-2 format.
Args:
passphrase (str): The password to encrypt the private key with, as unicode string
Returns:
str: The NEP-2 encrypted private key
|
def ExportNEP2(self, passphrase):
if len(passphrase) < 2:
raise ValueError("Passphrase must have a minimum of 2 characters")
# Hash address twice, then only use the first 4 bytes
address_hash_tmp = hashlib.sha256(self.GetAddress().encode("utf-8")).digest()
address_hash_tmp2 = hashlib.sha256(address_hash_tmp).digest()
address_hash = address_hash_tmp2[:4]
# Normalize password and run scrypt over it with the address_hash
pwd_normalized = bytes(unicodedata.normalize('NFC', passphrase), 'utf-8')
derived = scrypt.hash(pwd_normalized, address_hash,
N=SCRYPT_ITERATIONS,
r=SCRYPT_BLOCKSIZE,
p=SCRYPT_PARALLEL_FACTOR,
buflen=SCRYPT_KEY_LEN_BYTES)
# Split the scrypt-result into two parts
derived1 = derived[:32]
derived2 = derived[32:]
# Run XOR and encrypt the derived parts with AES
xor_ed = xor_bytes(bytes(self.PrivateKey), derived1)
cipher = AES.new(derived2, AES.MODE_ECB)
encrypted = cipher.encrypt(xor_ed)
# Assemble the final result
assembled = bytearray()
assembled.extend(NEP_HEADER)
assembled.extend(NEP_FLAG)
assembled.extend(address_hash)
assembled.extend(encrypted)
# Finally, encode with Base58Check
encrypted_key_nep2 = base58.b58encode_check(bytes(assembled))
return encrypted_key_nep2.decode("utf-8")
| 426,430
|
Create an instance.
Args:
stream (BytesIO): a stream to operate on. i.e. a neo.IO.MemoryStream or raw BytesIO.
|
def __init__(self, stream):
super(BinaryReader, self).__init__()
self.stream = stream
| 426,431
|
Unpack the stream contents according to the specified format in `fmt`.
For more information about the `fmt` format see: https://docs.python.org/3/library/struct.html
Args:
fmt (str): format string.
length (int): amount of bytes to read.
Returns:
variable: the result according to the specified format.
|
def unpack(self, fmt, length=1):
return struct.unpack(fmt, self.stream.read(length))[0]
| 426,432
|
Read a single byte.
Args:
do_ord (bool): (default True) convert the byte to an ordinal first.
Returns:
bytes: a single byte if successful. 0 (int) if an exception occurred.
|
def ReadByte(self, do_ord=True):
try:
if do_ord:
return ord(self.stream.read(1))
return self.stream.read(1)
except Exception as e:
logger.error("ord expected character but got none")
return 0
| 426,433
|
Read a variable length integer from the stream.
The NEO network protocol supports encoded storage for space saving. See: http://docs.neo.org/en-us/node/network-protocol.html#convention
Args:
max (int): (Optional) maximum number of bytes to read.
Returns:
int:
|
def ReadVarInt(self, max=sys.maxsize):
fb = self.ReadByte()
if fb is 0:
return fb
value = 0
if hex(fb) == '0xfd':
value = self.ReadUInt16()
elif hex(fb) == '0xfe':
value = self.ReadUInt32()
elif hex(fb) == '0xff':
value = self.ReadUInt64()
else:
value = fb
if value > max:
raise Exception("Invalid format")
return int(value)
| 426,435
|
Read a variable length of bytes from the stream.
The NEO network protocol supports encoded storage for space saving. See: http://docs.neo.org/en-us/node/network-protocol.html#convention
Args:
max (int): (Optional) maximum number of bytes to read.
Returns:
bytes:
|
def ReadVarBytes(self, max=sys.maxsize):
length = self.ReadVarInt(max)
return self.ReadBytes(length)
| 426,436
|
Similar to `ReadString` but expects a variable length indicator instead of the fixed 1 byte indicator.
Args:
max (int): (Optional) maximum number of bytes to read.
Returns:
bytes:
|
def ReadVarString(self, max=sys.maxsize):
length = self.ReadVarInt(max)
return self.unpack(str(length) + 's', length)
| 426,438
|
Deserialize a stream into the object specific by `class_name`.
Args:
class_name (str): a full path to the class to be deserialized into. e.g. 'neo.Core.Block.Block'
max (int): (Optional) maximum number of bytes to read.
Returns:
list: list of `class_name` objects deserialized from the stream.
|
def ReadSerializableArray(self, class_name, max=sys.maxsize):
module = '.'.join(class_name.split('.')[:-1])
klassname = class_name.split('.')[-1]
klass = getattr(importlib.import_module(module), klassname)
length = self.ReadVarInt(max=max)
items = []
# logger.info("READING ITEM %s %s " % (length, class_name))
try:
for i in range(0, length):
item = klass()
item.Deserialize(self)
# logger.info("deserialized item %s %s " % ( i, item))
items.append(item)
except Exception as e:
logger.error("Couldn't deserialize %s " % e)
return items
| 426,439
|
Get a script hash of the data.
Args:
data (bytes): data to hash.
unhex (bool): (Default) True. Set to unhexlify the stream. Use when the bytes are not raw bytes; i.e. b'aabb'
Returns:
UInt160: script hash.
|
def ToScriptHash(data, unhex=True):
if len(data) > 1 and unhex:
data = binascii.unhexlify(data)
return UInt160(data=binascii.unhexlify(bytes(Crypto.Hash160(data), encoding='utf-8')))
| 426,442
|
Sign the message with the given private key.
Args:
message (str): message to be signed
private_key (str): 32 byte key as a double digit hex string (e.g. having a length of 64)
Returns:
bytearray: the signature of the message.
|
def Sign(message, private_key):
hash = hashlib.sha256(binascii.unhexlify(message)).hexdigest()
v, r, s = bitcoin.ecdsa_raw_sign(hash, private_key)
rb = bytearray(r.to_bytes(32, 'big'))
sb = bytearray(s.to_bytes(32, 'big'))
sig = rb + sb
return sig
| 426,443
|
Verify the integrity of the message.
Args:
message (str): the message to verify.
signature (bytearray): the signature belonging to the message.
public_key (ECPoint): the public key to use for verifying the signature.
unhex (bool): whether the message should be unhexlified before verifying
Returns:
bool: True if verification passes. False otherwise.
|
def VerifySignature(self, message, signature, public_key, unhex=True):
return Crypto.VerifySignature(message, signature, public_key, unhex=unhex)
| 426,445
|
Crease an instance.
Args:
hashes (list): each hash is of bytearray type.
|
def __init__(self, hashes):
self.Root = MerkleTree.__Build([MerkleTreeNode(hash) for hash in hashes])
depth = 1
i = self.Root
while i.LeftChild is not None:
depth = depth + 1
i = i.LeftChild
self.Depth = depth
| 426,497
|
Build the merkle tree.
Args:
leaves (list): items are of type MerkleTreeNode.
Returns:
MerkleTreeNode: the root node.
|
def __Build(leaves):
if len(leaves) < 1:
raise Exception('Leaves must have length')
if len(leaves) == 1:
return leaves[0]
num_parents = int((len(leaves) + 1) / 2)
parents = [MerkleTreeNode() for i in range(0, num_parents)]
for i in range(0, num_parents):
node = parents[i]
node.LeftChild = leaves[i * 2]
leaves[i * 2].Parent = node
if (i * 2 + 1 == len(leaves)):
node.RightChild = node.LeftChild
else:
node.RightChild = leaves[i * 2 + 1]
leaves[i * 2 + 1].Parent = node
hasharray = bytearray(node.LeftChild.Hash.ToArray() + node.RightChild.Hash.ToArray())
node.Hash = UInt256(data=Crypto.Hash256(hasharray))
return MerkleTree.__Build(parents)
| 426,498
|
Compute the root hash.
Args:
hashes (list): the list of hashes to build the root from.
Returns:
bytes: the root hash.
|
def ComputeRoot(hashes):
if not len(hashes):
raise Exception('Hashes must have length')
if len(hashes) == 1:
return hashes[0]
tree = MerkleTree(hashes)
return tree.Root.Hash
| 426,499
|
Internal helper method.
Args:
node (MerkleTreeNode):
hashes (list): each item is a bytearray.
|
def __DepthFirstSearch(node, hashes):
if node.LeftChild is None:
hashes.add(node.Hash)
else:
MerkleTree.__DepthFirstSearch(node.LeftChild, hashes)
MerkleTree.__DepthFirstSearch(node.RightChild, hashes)
| 426,500
|
Trim the nodes from the tree keeping only the root hash.
Args:
flags: "0000" for trimming, any other value for keeping the nodes.
|
def Trim(self, flags):
logger.info("Trimming!")
flags = bytearray(flags)
length = 1 << self.Depth - 1
while len(flags) < length:
flags.append(0)
MerkleTree._TrimNode(self.Root, 0, self.Depth, flags)
| 426,502
|
Internal helper method to trim a node.
Args:
node (MerkleTreeNode):
index (int): flag index.
depth (int): node tree depth to start trim from.
flags (bytearray): of left/right pairs. 1 byte for the left node, 1 byte for the right node.
00 to erase, 11 to keep. Will keep the node if either left or right is not-0
|
def _TrimNode(node, index, depth, flags):
if depth == 1 or node.LeftChild is None:
return
if depth == 2:
if not flags[index * 2] and not flags[index * 2 + 1]:
node.LeftChild = None
node.RightChild = None
else:
MerkleTree._TrimNode(node.LeftChild, index * 2, depth - 1, flags)
MerkleTree._TrimNode(node.RightChild, index * 2, depth - 1, flags)
if node.LeftChild.LeftChild is None and node.RightChild.RightChild is None:
node.LeftChild = None
node.RightChild = None
| 426,503
|
Perform two SHA256 operations on the input.
Args:
ba (bytes): data to hash.
Returns:
str: hash as a double digit hex string.
|
def double_sha256(ba):
d1 = hashlib.sha256(ba)
d2 = hashlib.sha256()
d1.hexdigest()
d2.update(d1.digest())
return d2.hexdigest()
| 426,522
|
Convert a script hash to a public address.
Args:
scripthash (bytes):
Returns:
str: base58 encoded string representing the wallet address.
|
def scripthash_to_address(scripthash):
sb = bytearray([ADDRESS_VERSION]) + scripthash
c256 = bin_dbl_sha256(sb)[0:4]
outb = sb + bytearray(c256)
return base58.b58encode(bytes(outb)).decode("utf-8")
| 426,523
|
Get a hash of the provided message using the ripemd160 algorithm.
Args:
bts (str): message to hash.
Returns:
bytes: hash.
|
def bin_hash160Bytes(bts):
intermed = hashlib.sha256(bts).digest()
return hashlib.new('ripemd160', intermed).digest()
| 426,524
|
Get a hash of the provided message using the ripemd160 algorithm.
Args:
string (str): message to hash.
Returns:
str: hash as a double digit hex string.
|
def bin_hash160(string):
intermed = hashlib.sha256(string).digest()
return hashlib.new('ripemd160', intermed).hexdigest()
| 426,525
|
Encode the input with base256.
Args:
n (int): input value.
minwidth: minimum return value length.
Raises:
ValueError: if a negative number is provided.
Returns:
bytearray:
|
def base256_encode(n, minwidth=0): # int/long to byte array
if n > 0:
arr = []
while n:
n, rem = divmod(n, 256)
arr.append(rem)
b = bytearray(reversed(arr))
elif n == 0:
b = bytearray(b'\x00')
else:
raise ValueError("Negative numbers not supported")
if minwidth > 0 and len(b) < minwidth: # zero padding needed?
padding = (minwidth - len(b)) * b'\x00'
b = bytearray(padding) + b
b.reverse()
return b
| 426,526
|
XOR on two bytes objects
Args:
a (bytes): object 1
b (bytes): object 2
Returns:
bytes: The XOR result
|
def xor_bytes(a, b):
assert isinstance(a, bytes)
assert isinstance(b, bytes)
assert len(a) == len(b)
res = bytearray()
for i in range(len(a)):
res.append(a[i] ^ b[i])
return bytes(res)
| 426,527
|
Create an instance.
Args:
stream (BytesIO): a stream to operate on. i.e. a neo.IO.MemoryStream or raw BytesIO.
|
def __init__(self, stream):
super(BinaryWriter, self).__init__()
self.stream = stream
| 426,528
|
Write a `bytes` type to the stream.
Args:
value (bytes): array of bytes to write to the stream.
unhex (bool): (Default) True. Set to unhexlify the stream. Use when the bytes are not raw bytes; i.e. b'aabb'
Returns:
int: the number of bytes written.
|
def WriteBytes(self, value, unhex=True):
if unhex:
try:
value = binascii.unhexlify(value)
except binascii.Error:
pass
return self.stream.write(value)
| 426,529
|
Write bytes by packing them according to the provided format `fmt`.
For more information about the `fmt` format see: https://docs.python.org/3/library/struct.html
Args:
fmt (str): format string.
data (object): the data to write to the raw stream.
Returns:
int: the number of bytes written.
|
def pack(self, fmt, data):
return self.WriteBytes(struct.pack(fmt, data), unhex=False)
| 426,530
|
Write a UInt160 type to the stream.
Args:
value (UInt160):
Raises:
Exception: when `value` is not of neocore.UInt160 type.
|
def WriteUInt160(self, value):
if type(value) is UInt160:
value.Serialize(self)
else:
raise Exception("value must be UInt160 instance ")
| 426,531
|
Write a UInt256 type to the stream.
Args:
value (UInt256):
Raises:
Exception: when `value` is not of neocore.UInt256 type.
|
def WriteUInt256(self, value):
if type(value) is UInt256:
value.Serialize(self)
else:
raise Exception("Cannot write value that is not UInt256")
| 426,532
|
Write an integer value in a space saving way to the stream.
Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention
Args:
value (bytes):
endian (str): specify the endianness. (Default) Little endian ('<'). Use '>' for big endian.
Returns:
int: the number of bytes written.
|
def WriteVarBytes(self, value, endian="<"):
length = len(value)
self.WriteVarInt(length, endian)
return self.WriteBytes(value, unhex=False)
| 426,533
|
Write a string value to the stream.
Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention
Args:
value (string): value to write to the stream.
encoding (str): string encoding format.
|
def WriteVarString(self, value, encoding="utf-8"):
if type(value) is str:
value = value.encode(encoding)
length = len(value)
ba = bytearray(value)
byts = binascii.hexlify(ba)
string = byts.decode(encoding)
self.WriteVarInt(length)
self.WriteBytes(string)
| 426,534
|
Write a string value to the stream.
Args:
value (str): value to write to the stream.
length (int): length of the string to write.
|
def WriteFixedString(self, value, length):
towrite = value.encode('utf-8')
slen = len(towrite)
if slen > length:
raise Exception("string longer than fixed length: %s " % length)
self.WriteBytes(towrite)
diff = length - slen
while diff > 0:
self.WriteByte(0)
diff -= 1
| 426,535
|
Write an array of serializable objects to the stream.
Args:
array(list): a list of serializable objects. i.e. extending neo.IO.Mixins.SerializableMixin
|
def WriteSerializableArray(self, array):
if array is None:
self.WriteByte(0)
else:
self.WriteVarInt(len(array))
for item in array:
item.Serialize(self)
| 426,536
|
Write an array of 64 byte items to the stream.
Args:
arr (list): a list of 2000 items of 64 bytes in size.
|
def Write2000256List(self, arr):
for item in arr:
ba = bytearray(binascii.unhexlify(item))
ba.reverse()
self.WriteBytes(ba)
| 426,537
|
Write an array of hashes to the stream.
Args:
arr (list): a list of 32 byte hashes.
|
def WriteHashes(self, arr):
length = len(arr)
self.WriteVarInt(length)
for item in arr:
ba = bytearray(binascii.unhexlify(item))
ba.reverse()
# logger.info("WRITING HASH %s " % ba)
self.WriteBytes(ba)
| 426,538
|
get the type of the soma
Args:
points: Soma points
soma_class(str): one of 'contour' or 'cylinder' to specify the type
|
def _get_type(points, soma_class):
assert soma_class in (SOMA_CONTOUR, SOMA_CYLINDER)
npoints = len(points)
if soma_class == SOMA_CONTOUR:
return {0: None,
1: SomaSinglePoint,
2: None}.get(npoints, SomaSimpleContour)
if(npoints == 3 and
points[0][COLS.P] == -1 and
points[1][COLS.P] == 1 and
points[2][COLS.P] == 1):
L.warning('Using neuromorpho 3-Point soma')
# NeuroMorpho is the main provider of morphologies, but they
# with SWC as their default file format: they convert all
# uploads to SWC. In the process of conversion, they turn all
# somas into their custom 'Three-point soma representation':
# http://neuromorpho.org/SomaFormat.html
return SomaNeuromorphoThreePointCylinders
return {0: None,
1: SomaSinglePoint}.get(npoints, SomaCylinders)
| 427,009
|
Function to be used for viewing - plotting,
to initialize the matplotlib figure - axes.
Args:
new_fig(bool): Defines if a new figure will be created, if false current figure is used
subplot (tuple or matplolib subplot specifier string): Create axes with these parameters
params (dict): extra options passed to add_subplot()
Returns:
Matplotlib Figure and Axes
|
def get_figure(new_fig=True, subplot='111', params=None):
_get_plt()
if new_fig:
fig = plt.figure()
else:
fig = plt.gcf()
params = dict_if_none(params)
if isinstance(subplot, (tuple, list)):
ax = fig.add_subplot(*subplot, **params)
else:
ax = fig.add_subplot(subplot, **params)
return fig, ax
| 427,021
|
Set title options of a matplotlib plot
Args:
ax: matplotlib axes
pretitle(str): String to include before the general title of the figure
posttitle (str): String to include after the general title of the figure
title (str): Set the title for the figure
title_fontsize (int): Defines the size of the title's font
title_arg (dict): Addition arguments for matplotlib.title() call
|
def plot_title(ax, pretitle='', title='Figure', posttitle='', title_fontsize=14, title_arg=None):
current_title = ax.get_title()
if not current_title:
current_title = pretitle + title + posttitle
title_arg = dict_if_none(title_arg)
ax.set_title(current_title, fontsize=title_fontsize, **title_arg)
| 427,024
|
Sets the labels options of a matplotlib plot
Args:
ax: matplotlib axes
label_fontsize(int): Size of the labels' font
xlabel(str): The xlabel for the figure
xlabel_arg(dict): Passsed into matplotlib as xlabel arguments
ylabel(str): The ylabel for the figure
ylabel_arg(dict): Passsed into matplotlib as ylabel arguments
zlabel(str): The zlabel for the figure
zlabel_arg(dict): Passsed into matplotlib as zlabel arguments
|
def plot_labels(ax, label_fontsize=14,
xlabel=None, xlabel_arg=None,
ylabel=None, ylabel_arg=None,
zlabel=None, zlabel_arg=None):
xlabel = xlabel if xlabel is not None else ax.get_xlabel() or 'X'
ylabel = ylabel if ylabel is not None else ax.get_ylabel() or 'Y'
xlabel_arg = dict_if_none(xlabel_arg)
ylabel_arg = dict_if_none(ylabel_arg)
ax.set_xlabel(xlabel, fontsize=label_fontsize, **xlabel_arg)
ax.set_ylabel(ylabel, fontsize=label_fontsize, **ylabel_arg)
if hasattr(ax, 'zaxis'):
zlabel = zlabel if zlabel is not None else ax.get_zlabel() or 'Z'
zlabel_arg = dict_if_none(zlabel_arg)
ax.set_zlabel(zlabel, fontsize=label_fontsize, **zlabel_arg)
| 427,025
|
Sets the limit options of a matplotlib plot.
Args:
ax: matplotlib axes
white_space(float): whitespace added to surround the tight limit of the data
Note: This relies on ax.dataLim (in 2d) and ax.[xy, zz]_dataLim being set in 3d
|
def update_plot_limits(ax, white_space):
if hasattr(ax, 'zz_dataLim'):
bounds = ax.xy_dataLim.bounds
ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space)
bounds = ax.zz_dataLim.bounds
ax.set_zlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
else:
bounds = ax.dataLim.bounds
assert not any(map(np.isinf, bounds)), 'Cannot set bounds if dataLim has infinite elements'
ax.set_xlim(bounds[0] - white_space, bounds[0] + bounds[2] + white_space)
ax.set_ylim(bounds[1] - white_space, bounds[1] + bounds[3] + white_space)
| 427,027
|
Function that defines the legend options
of a matplotlib plot.
Args:
ax: matplotlib axes
no_legend (bool): Defines the presence of a legend in the figure
legend_arg (dict): Addition arguments for matplotlib.legend() call
|
def plot_legend(ax, no_legend=True, legend_arg=None):
legend_arg = dict_if_none(legend_arg)
if not no_legend:
ax.legend(**legend_arg)
| 427,028
|
Check if a neuron has an axon
Arguments:
neuron(Neuron): The neuron object to test
treefun: Optional function to calculate the tree type of
neuron's neurites
Returns:
CheckResult with result
|
def has_axon(neuron, treefun=_read_neurite_type):
return CheckResult(NeuriteType.axon in (treefun(n) for n in neuron.neurites))
| 427,042
|
Check if a neuron has apical dendrites
Arguments:
neuron(Neuron): The neuron object to test
min_number: minimum number of apical dendrites required
treefun: Optional function to calculate the tree type of neuron's
neurites
Returns:
CheckResult with result
|
def has_apical_dendrite(neuron, min_number=1, treefun=_read_neurite_type):
types = [treefun(n) for n in neuron.neurites]
return CheckResult(types.count(NeuriteType.apical_dendrite) >= min_number)
| 427,043
|
Check if a neuron has basal dendrites
Arguments:
neuron(Neuron): The neuron object to test
min_number: minimum number of basal dendrites required
treefun: Optional function to calculate the tree type of neuron's
neurites
Returns:
CheckResult with result
|
def has_basal_dendrite(neuron, min_number=1, treefun=_read_neurite_type):
types = [treefun(n) for n in neuron.neurites]
return CheckResult(types.count(NeuriteType.basal_dendrite) >= min_number)
| 427,044
|
Check that a neuron has no flat neurites
Arguments:
neuron(Neuron): The neuron object to test
tol(float): tolerance
method(string): way of determining flatness, 'tolerance', 'ratio' \
as described in :meth:`neurom.check.morphtree.get_flat_neurites`
Returns:
CheckResult with result
|
def has_no_flat_neurites(neuron, tol=0.1, method='ratio'):
return CheckResult(len(get_flat_neurites(neuron, tol, method)) == 0)
| 427,045
|
Check presence of neuron segments with length not above threshold
Arguments:
neuron(Neuron): The neuron object to test
threshold(float): value above which a segment length is considered to
be non-zero
Returns:
CheckResult with result including list of (section_id, segment_id)
of zero length segments
|
def has_all_nonzero_segment_lengths(neuron, threshold=0.0):
bad_ids = []
for sec in _nf.iter_sections(neuron):
p = sec.points
for i, s in enumerate(zip(p[:-1], p[1:])):
if segment_length(s) <= threshold:
bad_ids.append((sec.id, i))
return CheckResult(len(bad_ids) == 0, bad_ids)
| 427,046
|
Check presence of neuron sections with length not above threshold
Arguments:
neuron(Neuron): The neuron object to test
threshold(float): value above which a section length is considered
to be non-zero
Returns:
CheckResult with result including list of ids of bad sections
|
def has_all_nonzero_section_lengths(neuron, threshold=0.0):
bad_ids = [s.id for s in _nf.iter_sections(neuron.neurites)
if section_length(s.points) <= threshold]
return CheckResult(len(bad_ids) == 0, bad_ids)
| 427,047
|
Check presence of neurite points with radius not above threshold
Arguments:
neuron(Neuron): The neuron object to test
threshold: value above which a radius is considered to be non-zero
Returns:
CheckResult with result including list of (section ID, point ID) pairs
of zero-radius points
|
def has_all_nonzero_neurite_radii(neuron, threshold=0.0):
bad_ids = []
seen_ids = set()
for s in _nf.iter_sections(neuron):
for i, p in enumerate(s.points):
info = (s.id, i)
if p[COLS.R] <= threshold and info not in seen_ids:
seen_ids.add(info)
bad_ids.append(info)
return CheckResult(len(bad_ids) == 0, bad_ids)
| 427,048
|
Check if there are jumps (large movements in the `axis`)
Arguments:
neuron(Neuron): The neuron object to test
max_distance(float): value above which consecutive z-values are
considered a jump
axis(str): one of x/y/z, which axis to check for jumps
Returns:
CheckResult with result list of ids of bad sections
|
def has_no_jumps(neuron, max_distance=30.0, axis='z'):
bad_ids = []
axis = {'x': COLS.X, 'y': COLS.Y, 'z': COLS.Z, }[axis.lower()]
for neurite in iter_neurites(neuron):
section_segment = ((sec, seg) for sec in iter_sections(neurite)
for seg in iter_segments(sec))
for sec, (p0, p1) in islice(section_segment, 1, None): # Skip neurite root segment
if max_distance < abs(p0[axis] - p1[axis]):
bad_ids.append((sec.id, [p0, p1]))
return CheckResult(len(bad_ids) == 0, bad_ids)
| 427,049
|
Check if neurites have a narrow start
Arguments:
neuron(Neuron): The neuron object to test
frac(float): Ratio that the second point must be smaller than the first
Returns:
CheckResult with a list of all first segments of neurites with a narrow start
|
def has_no_narrow_start(neuron, frac=0.9):
bad_ids = [(neurite.root_node.id, [neurite.root_node.points[1]])
for neurite in neuron.neurites
if neurite.root_node.points[1][COLS.R] < frac * neurite.root_node.points[2][COLS.R]]
return CheckResult(len(bad_ids) == 0, bad_ids)
| 427,051
|
Check if the neuron has dendrites with narrow sections
Arguments:
neuron(Neuron): The neuron object to test
neurite_filter(callable): filter the neurites by this callable
radius_threshold(float): radii below this are considered narro
considered_section_min_length(float): sections with length below
this are not taken into account
Returns:
CheckResult with result. result.info contains the narrow section ids and their
first point
|
def has_no_narrow_neurite_section(neuron,
neurite_filter,
radius_threshold=0.05,
considered_section_min_length=50):
considered_sections = (sec for sec in iter_sections(neuron, neurite_filter=neurite_filter)
if sec.length > considered_section_min_length)
def narrow_section(section):
return section.points[:, COLS.R].mean() < radius_threshold
bad_ids = [(section.id, section.points[1])
for section in considered_sections if narrow_section(section)]
return CheckResult(len(bad_ids) == 0, bad_ids)
| 427,053
|
Rotation around unit vector following the right hand rule
Parameters:
obj : obj to be rotated (e.g. neurite, neuron).
Must implement a transform method.
axis : unit vector for the axis of rotation
angle : rotation angle in rads
Returns:
A copy of the object with the applied translation.
|
def rotate(obj, axis, angle, origin=None):
R = _rodrigues_to_dcm(axis, angle)
try:
return obj.transform(PivotRotation(R, origin))
except AttributeError:
raise NotImplementedError
| 427,064
|
Generates a 3d figure of the soma.
Args:
ax(matplotlib axes): on what to plot
soma(neurom.core.Soma): plotted soma
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
|
def plot_soma3d(ax, soma, color=None, alpha=_ALPHA):
color = _get_color(color, tree_type=NeuriteType.soma)
if isinstance(soma, SomaCylinders):
for start, end in zip(soma.points, soma.points[1:]):
common.plot_cylinder(ax,
start=start[COLS.XYZ], end=end[COLS.XYZ],
start_radius=start[COLS.R], end_radius=end[COLS.R],
color=color, alpha=alpha)
else:
common.plot_sphere(ax, center=soma.center[COLS.XYZ], radius=soma.radius,
color=color, alpha=alpha)
# unlike w/ 2d Axes, the dataLim isn't set by collections, so it has to be updated manually
_update_3d_datalim(ax, soma)
| 427,121
|
Dendrogram of `obj`
Args:
obj: Neuron or tree \
neurom.Neuron, neurom.Tree
show_diameters : boolean \
Determines if node diameters will \
be show or not.
|
def plot_dendrogram(ax, obj, show_diameters=True):
# create dendrogram and generate rectangle collection
dnd = Dendrogram(obj, show_diameters=show_diameters)
dnd.generate()
# render dendrogram and take into account neurite displacement which
# starts as zero. It is important to avoid overlapping of neurites
# and to determine tha limits of the figure.
_render_dendrogram(dnd, ax, 0.)
ax.set_title('Morphology Dendrogram')
ax.set_xlabel('micrometers (um)')
ax.set_ylabel('micrometers (um)')
ax.set_aspect('auto')
ax.legend()
| 427,125
|
compute vector between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
3-vector from p1 - p2
|
def vector(p1, p2):
return np.subtract(p1[COLS.XYZ], p2[COLS.XYZ])
| 427,133
|
Find the segment which corresponds to the fraction
of the path length along the piecewise linear curve which
is constructed from the set of points.
Args:
points: an iterable of indexable objects with indices
0, 1, 2 correspoding to 3D cartesian coordinates
fraction: path length fraction (0.0 <= fraction <= 1.0)
relative_offset: return absolute or relative segment distance
Returns:
(segment ID, segment offset) pair.
|
def path_fraction_id_offset(points, fraction, relative_offset=False):
if not (0. <= fraction <= 1.0):
raise ValueError("Invalid fraction: %.3f" % fraction)
pts = np.array(points)[:, COLS.XYZ]
lengths = np.linalg.norm(np.diff(pts, axis=0), axis=1)
cum_lengths = np.cumsum(lengths)
offset = cum_lengths[-1] * fraction
seg_id = np.argmin(cum_lengths < offset)
if seg_id > 0:
offset -= cum_lengths[seg_id - 1]
if relative_offset:
offset /= lengths[seg_id]
return seg_id, offset
| 427,136
|
Computes the point which corresponds to the fraction
of the path length along the piecewise linear curve which
is constructed from the set of points.
Args:
points: an iterable of indexable objects with indices
0, 1, 2 correspoding to 3D cartesian coordinates
fraction: path length fraction (0 <= fraction <= 1)
Returns:
The 3D coordinates of the aforementioned point
|
def path_fraction_point(points, fraction):
seg_id, offset = path_fraction_id_offset(points, fraction, relative_offset=True)
return linear_interpolate(points[seg_id], points[seg_id + 1], offset)
| 427,137
|
compute the scalar projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v
|
def scalar_projection(v1, v2):
return np.dot(v1, v2) / np.linalg.norm(v2)
| 427,138
|
compute the vector projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v
|
def vector_projection(v1, v2):
return scalar_projection(v1, v2) * v2 / np.linalg.norm(v2)
| 427,139
|
compute the orthogonal distance between from the line that goes through
the points l1, l2 and the point p
Args:
p, l1, l2 : iterable
point
indices 0, 1, 2 corresponding to cartesian coordinates
|
def dist_point_line(p, l1, l2):
cross_prod = np.cross(l2 - l1, p - l1)
return np.linalg.norm(cross_prod) / np.linalg.norm(l2 - l1)
| 427,140
|
compute the square of the euclidian distance between two 3D points
Args:
p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
The square of the euclidian distance between the points.
|
def point_dist2(p1, p2):
v = vector(p1, p2)
return np.dot(v, v)
| 427,141
|
compute the angle in radians between three 3D points
Calculated as the angle between p1-p0 and p2-p0.
Args:
p0, p1, p2: indexable objects with
indices 0, 1, 2 corresponding to 3D cartesian coordinates.
Returns:
Angle in radians between (p1-p0) and (p2-p0).
0.0 if p0==p1 or p0==p2.
|
def angle_3points(p0, p1, p2):
vec1 = vector(p1, p0)
vec2 = vector(p2, p0)
return math.atan2(np.linalg.norm(np.cross(vec1, vec2)),
np.dot(vec1, vec2))
| 427,142
|
Return the radial distance of a tree segment to a given point
The radial distance is the euclidian distance between the mid-point of
the segment and the point in question.
Parameters:
seg: tree segment
pos: origin to which distances are measured. It must have at lease 3
components. The first 3 components are (x, y, z).
|
def segment_radial_dist(seg, pos):
return point_dist(pos, np.divide(np.add(seg[0], seg[1]), 2.0))
| 427,147
|
Compute the taper rate between points p0 and p1
Args:
p0, p1: iterables with first 4 components containing (x, y, z, r)
Returns:
The taper rate, defined as the absolute value of the difference in
the diameters of p0 and p1 divided by the euclidian distance
between them.
|
def taper_rate(p0, p1):
return 2 * abs(p0[COLS.R] - p1[COLS.R]) / point_dist(p0, p1)
| 427,150
|
Plot a histogram of the selected feature for the population of neurons.
Plots x-axis versus y-axis on a scatter|histogram|binned values plot.
Parameters :
neurons : neuron list
feature : str
The feature of interest.
bins : int
Number of bins for the histogram.
cumulative : bool
Sets cumulative histogram on.
ax : axes object
the axes in which the plot is taking place
|
def histogram(neuron, feature, ax, bins=15, normed=True, cumulative=False):
feature_values = nm.get(feature, neuron)
# generate histogram
ax.hist(feature_values, bins=bins, cumulative=cumulative, normed=normed)
| 427,153
|
Register a feature to be applied to neurites
Parameters:
name: name of the feature, used for access via get() function.
func: single parameter function of a neurite.
|
def register_neurite_feature(name, func):
if name in NEURITEFEATURES:
raise NeuroMError('Attempt to hide registered feature %s' % name)
def _fun(neurites, neurite_type=_ntype.all):
return list(func(n) for n in _ineurites(neurites, filt=_is_type(neurite_type)))
NEURONFEATURES[name] = _fun
| 427,175
|
Obtain a feature from a set of morphology objects
Parameters:
feature(string): feature to extract
obj: a neuron, population or neurite tree
**kwargs: parameters to forward to underlying worker functions
Returns:
features as a 1D or 2D numpy array.
|
def get(feature, obj, **kwargs):
feature = (NEURITEFEATURES[feature] if feature in NEURITEFEATURES
else NEURONFEATURES[feature])
return _np.array(list(feature(obj, **kwargs)))
| 427,176
|
Read a file and return a `data_wrapper'd` data
* Tries to guess the format and the H5 version.
* Unpacks the first block it finds out of ('repaired', 'unraveled', 'raw')
Parameters:
remove_duplicates: boolean, If True removes duplicate points
from the beginning of each section.
|
def read(filename, remove_duplicates=False, data_wrapper=DataWrapper):
with h5py.File(filename, mode='r') as h5file:
version = get_version(h5file)
if version == 'H5V1':
points, groups = _unpack_v1(h5file)
elif version == 'H5V2':
stg = next(s for s in ('repaired', 'unraveled', 'raw')
if s in h5file['neuron1'])
points, groups = _unpack_v2(h5file, stage=stg)
if remove_duplicates:
points, groups = _remove_duplicate_points(points, groups)
neuron_builder = BlockNeuronBuilder()
points[:, POINT_DIAMETER] /= 2 # Store radius, not diameter
for id_, row in enumerate(zip_longest(groups,
groups[1:, GPFIRST],
fillvalue=len(points))):
(point_start, section_type, parent_id), point_end = row
neuron_builder.add_section(id_, int(parent_id), int(section_type),
points[point_start:point_end])
return neuron_builder.get_datawrapper(version, data_wrapper=data_wrapper)
| 427,179
|
Calculate the parameters of a fit of a distribution to a data set
Parameters:
data: array of data points to be fitted
Options:
distribution (str): type of distribution to fit. Default 'norm'.
Returns:
FitResults object with fitted parameters, errors and distribution type
Note:
Uses Kolmogorov-Smirnov test to estimate distance and p-value.
|
def fit(data, distribution='norm'):
params = getattr(_st, distribution).fit(data)
return FitResults(params, _st.kstest(data, distribution, params), distribution)
| 427,185
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.