docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Lower the ReshapeOperation.
Reshaping can require collective communication between processors.
We haven't yet implemented all possible reshapes. We try to handle the
common cases here - otherwise we raise a NotImplementedError.
Args:
lowering: a Lowering
Raises:
NotImplementedError: i... | def lower(self, lowering):
old_shape = self.inputs[0].shape
new_shape = self.outputs[0].shape
mesh_impl = lowering.mesh_impl(self)
slices = lowering.tensors[self.inputs[0]]
mesh_axis_to_cumprod_old = mesh_impl.mesh_axis_to_cumprod(old_shape)
mesh_axis_to_cumprod_new = mesh_impl.mesh_axis_to... | 213,822 |
Datatypes to use for the run.
Args:
master_dtype: string, datatype for checkpoints
keep this the same between training and eval/inference
slice_dtype: string, datatype for variables in memory
must be tf.float32 for training
activation_dtype: string, datatype for activations
less memory ... | def get_variable_dtype(
master_dtype=tf.bfloat16,
slice_dtype=tf.float32,
activation_dtype=tf.float32):
return mtf.VariableDType(
master_dtype=tf.as_dtype(master_dtype),
slice_dtype=tf.as_dtype(slice_dtype),
activation_dtype=tf.as_dtype(activation_dtype)) | 213,829 |
Decode from a text file.
Args:
estimator: a TPUEstimator
vocabulary: a mtf.transformer.vocabulary.Vocabulary
model_type: a string
batch_size: an integer
sequence_length: an integer (maximum decode length)
checkpoint_path: an optional string
input_filename: a string
output_filename: a ... | def decode_from_file(estimator,
vocabulary,
model_type,
batch_size,
sequence_length,
checkpoint_path="",
input_filename=gin.REQUIRED,
output_filename=gin.REQUIRED,
... | 213,832 |
Stop at EOS or padding or OOV.
Args:
ids: a list of integers
vocab_size: an integer
eos_id: EOS id
Returns:
a list of integers | def clean_decodes(ids, vocab_size, eos_id=1):
ret = []
for i in ids:
if i == eos_id:
break
if i >= vocab_size:
break
ret.append(int(i))
return ret | 213,833 |
Automatically compute batch size.
Args:
sequence_length: an integer
mesh_shape: an input to mtf.convert_to_shape()
layout_rules: an input to mtf.convert_to_layout_rules()
tokens_per_split: an integer
Returns:
an integer | def auto_batch_size(sequence_length,
mesh_shape,
layout_rules,
tokens_per_split=2048):
num_splits = mtf.tensor_dim_to_mesh_dim_size(
layout_rules, mesh_shape, mtf.Dimension("batch", 0))
ret = max(1, tokens_per_split // sequence_length) * num_split... | 213,834 |
Ring-order of a mxn mesh.
Args:
m: an integer
n: an integer
Returns:
a list of mxn pairs | def _ring_2d(m, n):
if m == 1:
return [(0, i) for i in range(n)]
if n == 1:
return [(i, 0) for i in range(m)]
if m % 2 != 0:
tf.logging.warning("Odd dimension")
return [(i % m, i // m) for i in range(n * m)]
ret = [(0, 0)]
for i in range(m // 2):
for j in range(1, n):
ret.append((... | 213,837 |
Grouped allreduce, (summed across the given dimensions).
Args:
x: a LaidOutTensor
mesh_axes: a list of integers
reduction_fn_string: "SUM"
Returns:
a LaidOutTensor
Raises:
ValueError: if the reduction is not yet implemented. | def allreduce(self, x, mesh_axes, reduction_fn_string):
if not mesh_axes:
return x
x = x.to_laid_out_tensor()
if reduction_fn_string == "SUM":
group_assignment = self._create_group_assignment(mesh_axes)
group_size = len(group_assignment[0])
tf_in = x.one_slice
dtype = tf_i... | 213,842 |
Grouped allconcat (like MPI allgather followed by concat).
TODO(noam): inefficient - replace with a XLA allconcat when available
Args:
x: a LaidOutTensor
mesh_axis: an integer - the mesh axis along which to group
concat_axis: an integer (the Tensor axis along which to concatenate)
stac... | def allconcat(self, x, mesh_axis, concat_axis, stack=False):
x = x.to_laid_out_tensor()
coord = self.laid_out_pcoord(mesh_axis)
t = x.one_slice
old_shape = t.shape.as_list()
num_parts = self.shape[mesh_axis].size
t = tf.expand_dims(t, concat_axis)
t *= tf.reshape(
tf.one_hot(coo... | 213,843 |
Grouped alltoall (like MPI alltoall with splitting and concatenation).
Args:
x: a LaidOutTensor
mesh_axis: an integer the mesh axis along which to group
split_axis: an integer (the Tensor axis along which to split)
concat_axis: an integer (the Tensor axis along which to concatenate)
Ret... | def alltoall(self, x, mesh_axis, split_axis, concat_axis):
x = x.to_laid_out_tensor()
t = x.one_slice
group_assignment = self._create_group_assignment([mesh_axis])
dtype = t.dtype
if dtype == tf.float32:
# There seems to be a bug with float32 alltoall.
# Do it in bfloat16 until the ... | 213,844 |
Execute a function in parallel on all slices.
Args:
fn: a function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.
*inputs: a list of inputs. Each input is either a LaidOutTensor or
is convertible to a tf.Tensor.
Returns:
a LaidOutTensor, or a tuple of LaidOutTensors if fn ret... | def slicewise(self, fn, *inputs):
if fn == tf.add:
assert len(inputs) == 2
if isinstance(inputs[0], mtf.LazyAllreduceSum):
# sum of LazyAllreduceSum (keep delaying the allreduce)
return inputs[0] + inputs[1]
# convert all inputs to LaidOutTensor where possible
inputs = mtf.c... | 213,847 |
Call a random tf operation (e.g. random_uniform).
Args:
shape: a Shape
tf_fn: a function such as tf.random.uniform
kwargs: kwargs to pass to tf_fn, except for seed
Returns:
a LaidOutTensor | def random(self, shape, tf_fn, kwargs):
# TODO(noam): can we make things better with stateless_random?
slice_shape = self.slice_shape(shape)
x = tf_fn(slice_shape, **kwargs)
# TPU does not have seeds enabled. Sync up the
# random choices by zeroing out all but the first core per group of
#... | 213,848 |
Turn a Tensor into a tf.Tensor.
Args:
x: a Tensor
laid_out_x: a LaidOutTensor
Returns:
a tf.Tensor | def export_to_tf_tensor(self, x, laid_out_x):
tensor_layout = self.tensor_layout(x.shape)
if not tensor_layout.is_fully_replicated:
raise NotImplementedError(
"SimdMeshImpl only supports export_to_tf_tensor of fully-replicated "
"Tensors. Try reshaping to new dimension names. "
... | 213,849 |
Predictions with the model. Returns posterior means and standard deviations at X. Note that this is different in GPy where the variances are given.
Parameters:
X (np.ndarray) - points to run the prediction for.
with_noise (bool) - whether to add noise to the prediction. Default is True. | def predict(self, X, with_noise=True):
m, v = self._predict(X, False, with_noise)
# We can take the square root because v is just a diagonal matrix of variances
return m, np.sqrt(v) | 214,905 |
Predicts the covariance matric for points in X.
Parameters:
X (np.ndarray) - points to run the prediction for.
with_noise (bool) - whether to add noise to the prediction. Default is True. | def predict_covariance(self, X, with_noise=True):
_, v = self._predict(X, True, with_noise)
return v | 214,906 |
Generates samples.
Parameters:
n_samples - number of samples to generate
log_p_function - a function that returns log density for a specific sample
burn_in_steps - number of burn-in steps for sampling
Returns a tuple of two array: (samples, log_p_function values for... | def get_samples(self, n_samples, log_p_function, burn_in_steps=50):
restarts = initial_design('random', self.space, n_samples)
sampler = emcee.EnsembleSampler(n_samples, self.space.input_dim(), log_p_function)
samples, samples_log, _ = sampler.run_mcmc(restarts, burn_in_steps)
... | 214,937 |
Decorator routes Alexa SessionEndedRequest to the wrapped view function to end the skill.
@ask.session_ended
def session_ended():
return "{}", 200
The wrapped function is registered as the session_ended view function
and renders the response for requests to the end of the s... | def session_ended(self, f):
self._session_ended_view_func = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f | 215,244 |
Decorator routes Alexa Display.ElementSelected request to the wrapped view function.
@ask.display_element_selected
def eval_element():
return "", 200
The wrapped function is registered as the display_element_selected view function
and renders the response for requests.
... | def display_element_selected(self, f):
self._display_element_selected_func = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f | 215,247 |
Returns a string that is valid JSON or YAML and contains all the
variables in every extra_vars_opt inside of extra_vars_list.
Args:
parse_kv (bool): whether to allow key=value syntax.
force_json (bool): if True, always output json. | def process_extra_vars(extra_vars_list, force_json=True):
# Read from all the different sources and put into dictionary
extra_vars = {}
extra_vars_yaml = ""
for extra_vars_opt in extra_vars_list:
# Load file content if necessary
if extra_vars_opt.startswith("@"):
with op... | 216,641 |
Expand PyYAML's built-in dumper to support parsing OrderedDict. Return
a string as parse result of the original data structure, which includes
OrderedDict.
Args:
data: the data structure to be dumped(parsed) which is supposed to
contain OrderedDict.
Dumper: the yaml serializer to be... | def ordered_dump(data, Dumper=yaml.Dumper, **kws):
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
return dumper.represent_mapping(
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
data.items())
OrderedDumper.add_representer(OrderedDict,
... | 216,642 |
Extract a tarfile described by a file object to a specified path.
Args:
fileobj (file): File object wrapping the target tarfile.
dest_path (str): Path to extract the contents of the tarfile to. | def tarfile_extract(fileobj, dest_path):
# Though this method doesn't fit cleanly into the TarPartition object,
# tarballs are only ever extracted for partitions so the logic jives
# for the most part.
tar = tarfile.open(mode='r|', fileobj=fileobj,
buf... | 216,785 |
Return Blobstore instance for a given storage layout
Args:
layout (StorageLayout): Target storage layout. | def get_blobstore(layout):
if layout.is_s3:
from wal_e.blobstore import s3
blobstore = s3
elif layout.is_wabs:
from wal_e.blobstore import wabs
blobstore = wabs
elif layout.is_swift:
from wal_e.blobstore import swift
blobstore = swift
elif layout.is_g... | 216,832 |
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
cnt = 0
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
... | 217,044 |
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
with nn.parameter_scope(self.name):
# Function loop in the forward order
for func in self.graph_info.funcs:
o = self._identity_co... | 217,049 |
Remove and get parameter by key.
Args:
key(str): Key of parameter.
Returns: ~nnabla.Variable
Parameter if key found, otherwise None. | def pop_parameter(key):
names = key.split('/')
if len(names) > 1:
with parameter_scope(names[0]):
return pop_parameter('/'.join(names[1:]))
global current_scope
param = current_scope.get(key, None)
if param is not None:
del current_scope[key]
return param | 217,063 |
Get parameter Variables under the current parameter scope.
Args:
params (dict): Internal use. User doesn't set it manually.
path (str): Internal use. User doesn't set it manually.
grad_only (bool): Retrieve all parameters under the current scope if
False, while only parameters ... | def get_parameters(params=None, path='', grad_only=True):
global current_scope
if params is None:
params = OrderedDict()
for k, v in iteritems(current_scope):
if isinstance(v, dict):
with parameter_scope(k):
params = get_parameters(
param... | 217,066 |
Load parameters from a file with the specified format.
Args:
path : path or file object | def load_parameters(path, proto=None, needs_proto=False):
_, ext = os.path.splitext(path)
if ext == '.h5':
# TODO temporary work around to suppress FutureWarning message.
import warnings
warnings.simplefilter('ignore', category=FutureWarning)
import h5py
with h5py.F... | 217,068 |
Save all parameters into a file with the specified format.
Currently hdf5 and protobuf formats are supported.
Args:
path : path or file object
params (dict, optional): Parameters to be saved. Dictionary is of a parameter name (:obj:`str`) to :obj:`~nnabla.Variable`. | def save_parameters(path, params=None):
_, ext = os.path.splitext(path)
params = get_parameters(grad_only=False) if params is None else params
if ext == '.h5':
# TODO temporary work around to suppress FutureWarning message.
import warnings
warnings.simplefilter('ignore', categor... | 217,069 |
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
if func... | 217,255 |
load
Load network information from files.
Args:
filenames (list): List of filenames.
Returns:
dict: Network information. | def load(filenames, prepare_data_iterator=True, batch_size=None, exclude_parameter=False, parameter_only=False):
class Info:
pass
info = Info()
proto = nnabla_pb2.NNablaProtoBuf()
for filename in filenames:
_, ext = os.path.splitext(filename)
# TODO: Here is some known pro... | 217,274 |
Convert a given graph.
Convert a given graph using the `converters` in the order of the registeration, i.e., sequentially.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
for converter in self.converters:
vroot = converter.convert(vroot, entry_variables)
return vroot | 217,336 |
Get a unique function name.
Args:
function_type(str): Name of Function. Ex) Convolution, Affine
functions(OrderedDict of (str, Function)
Returns: str
A unique function name | def _get_unique_function_name(function_type, functions):
function_name = function_name_base = function_type
count = 2
while function_name in functions:
function_name = '{}_{}'.format(function_name_base, count)
count += 1
return function_name | 217,375 |
Get a unique variable name.
Args:
vname(str): A candidate name.
variable(OrderedDict of str and Variable)
Returns: str
A unique variable name | def _get_unique_variable_name(vname, variables):
count = 2
vname_base = vname
while vname in variables:
vname = '{}_{}'.format(vname_base, count)
count += 1
return vname | 217,376 |
Reduction along axes with sum operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which the sum is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept... | def sum(x, axis=None, keepdims=False):
from .function_bases import sum as sum_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return sum_base(x, axis, keepdims) | 217,402 |
Reduction along axes with mean operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which mean is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are kept a... | def mean(x, axis=None, keepdims=False):
from .function_bases import mean as mean_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return mean_base(x, axis, keepdims) | 217,403 |
Reduction along axes with product operation.
Args:
x (Variable): An input variable.
axis (None, int or tuple of ints): Axis or axes along which product is
calculated. Passing the default value `None` will reduce all dimensions.
keepdims (bool): Flag whether the reduced axes are ... | def prod(x, axis=None, keepdims=False):
from .function_bases import prod as prod_base
if axis is None:
axis = range(x.ndim)
elif not hasattr(axis, '__iter__'):
axis = [axis]
return prod_base(x, axis, keepdims) | 217,406 |
Reduction function with given operation.
Args:
x (Variable): An input.
op (str): 'sum' or 'mean'.
Note:
This is deprecated. Use ``mean`` or ``sum`` instead. | def reduce(x, op='sum'):
import warnings
warnings.warn(
"Deprecated API. Use ``sum`` or ``mean`` instead.", DeprecationWarning)
from .function_bases import reduce_sum, reduce_mean
if op == 'sum':
return reduce_sum(x)
elif op == 'mean':
return reduce_mean(x)
raise Val... | 217,407 |
Split arrays at the specified axis.
It returns a number corresponding the size of the given
axis (i.e ``x.shape[axis]``) of :obj:`~nnabla.Variable` s.
Args:
x(~nnabla.Variable): N-D array
axis(int): Axis
Returns: A :obj:`tuple` of :obj:`~nnabla.Variable` s
See Also:
:func... | def split(x, axis=0):
from .function_bases import split as split_base
return split_base(x, axis, x.shape[axis]) | 217,408 |
Download a file from URL.
Args:
url (str): URL.
output_file (str, optional): If given, the downloaded file is written to the given path.
open_file (bool): If True, it returns an opened file stream of the downloaded file.
allow_overwrite (bool): If True, it overwrites an existing fil... | def download(url, output_file=None, open_file=True, allow_overwrite=False):
filename = url.split('/')[-1]
if output_file is None:
cache = os.path.join(get_data_home(), filename)
else:
cache = output_file
if os.path.exists(cache) and not allow_overwrite:
logger.info("> {} alr... | 217,430 |
Get learning rate with polymomial decay based on current iteration.
Args:
iter (int): current iteration (starting with 0).
Returns:
float: Learning rate | def get_learning_rate(self, iter):
return self.init_lr * ((1.0 - iter * 1.0 / self.max_iter) ** self.power) | 217,479 |
Get learning rate with cosine decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate | def get_learning_rate(self, iter):
return self.init_lr * ((math.cos(iter * 1.0 / (self.max_iter) * math.pi) + 1.0) * 0.5) | 217,480 |
Get learning rate with exponential decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate | def get_learning_rate(self, iter):
return self.init_lr * (self.gamma ** (iter // self.iter_interval)) | 217,482 |
Get learning rate with exponential decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate | def get_learning_rate(self, iter):
lr = self.init_lr
for iter_step in self.iter_steps:
if iter >= iter_step:
lr *= self.gamma
return lr | 217,484 |
Get learning rate with exponential decay based on current iteration.
Args:
iter (int): Current iteration (starting with 0).
Returns:
float: Learning rate | def get_learning_rate(self, iter):
lr = self.scheduler.get_learning_rate(iter)
if iter < self.warmup_iter:
lr *= (iter + 1) * 1.0 / self.warmup_iter
return lr | 217,486 |
Create input :obj:`nnabla.Variable` from :obj:`Inspec`.
Args:
inspecs (:obj:`list` of :obj:`Inspec`): A list of ``Inspec``.
Returns:
:obj:`list` of :obj:`nnabla.Variable`: Input variables. | def create_inputs(inspecs):
ret = []
for i in inspecs:
v = nn.Variable(i.shape, need_grad=i.need_grad)
v.d = i.init(v.shape)
ret.append(v)
return ret | 217,515 |
Write a single function benchmark.
Args:
fb (FunctionBenchmark): FunctionBenchmark class instance.
Before passing to this, you should call ``fb.benchmark()``. | def write(self, fb):
print('[{}.{}]'.format(fb.module, fb.func.__name__), file=self.file)
print('class = {}'.format(fb.func_ins.name), file=self.file)
print('inspecs = {}'.format(repr(fb.inspecs)), file=self.file)
print('func_args = {}'.format(repr(fb.func_args)), file=self.file... | 217,520 |
Create a function instance and execute setup.
Args:
delete (bool): Delete buffered variables. | def _setup(self, delete=True):
if delete:
self.clear()
with nn.context_scope(self.ctx):
outputs = self.func(
*(self.inputs_f + self.func_args), **self.func_kwargs)
if not hasattr(outputs, '__iter__'):
self.outputs = [outputs]
... | 217,525 |
Convert an array with shape of (B, C, H, W) into a tiled image.
Args:
data (~numpy.ndarray): An array with shape of (B, C, H, W).
padsize (int): Each tile has padding with this size.
padval (float): Padding pixels are filled with this value.
Returns:
tile_image (~numpy.ndarray)... | def tile_images(data, padsize=1, padval=0):
assert(data.ndim == 4)
data = data.transpose(0, 2, 3, 1)
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = (
(0, n ** 2 - data.shape[0]),
(0, padsize),
(0, padsize)
) + ((0, 0),)... | 217,578 |
Plot series data from MonitorSeries output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
plot_kwags (dict, optional):
Keyward arguments passed to :function:`matplotlib.pyplot.plot`.
Note:
matplotlib package is req... | def plot_series(filename, plot_kwargs=None):
import matplotlib.pyplot as plt
if plot_kwargs is None:
plot_kwargs = {}
data = np.genfromtxt(filename, dtype='i8,f4', names=['k', 'v'])
index = data['k']
values = data['v']
plt.plot(index, values, **plot_kwargs) | 217,579 |
Plot series data from MonitorTimeElapsed output text file.
Args:
filename (str): Path to *.series.txt file produced by :obj:`~nnabla.MonitorSeries` class.
elapsed (bool): If ``True``, it plots the total elapsed time.
unit (str):
Time unit chosen from ``'s'``, ``'m'``, ``'h'``, o... | def plot_time_elapsed(filename, elapsed=False, unit='s', plot_kwargs=None):
import matplotlib.pyplot as plt
if plot_kwargs is None:
plot_kwargs = {}
data_column = 3 if elapsed else 1
data = np.genfromtxt(filename, dtype='i8,f4',
usecols=(0, data_column), names=['k... | 217,580 |
Add a value to the series.
Args:
index (int): Index.
value (float): Value. | def add(self, index, value):
self.buf.append(value)
if (index - self.flush_at) < self.interval:
return
value = np.mean(self.buf)
if self.verbose:
logger.info("iter={} {{{}}}={}".format(index, self.name, value))
if self.fd is not None:
... | 217,583 |
Calculate time elapsed from the point previously called
this method or this object is created to this is called.
Args:
index (int): Index to be displayed, and be used to take intervals. | def add(self, index):
if (index - self.flush_at) < self.interval:
return
now = time.time()
elapsed = now - self.lap
elapsed_total = now - self.start
it = index - self.flush_at
self.lap = now
if self.verbose:
logger.info("iter={} {{... | 217,585 |
Add a minibatch of images to the monitor.
Args:
index (int): Index.
var (:obj:`~nnabla.Variable`, :obj:`~nnabla.NdArray`, or :obj:`~numpy.ndarray`):
A minibatch of images with ``(N, ..., C, H, W)`` format.
If C == 2, blue channel is appended with ones. If... | def add(self, index, var):
import nnabla as nn
from nnabla.utils.image_utils import imsave
if index != 0 and (index + 1) % self.interval != 0:
return
if isinstance(var, nn.Variable):
data = var.d.copy()
elif isinstance(var, nn.NdArray):
... | 217,588 |
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
# Activ... | 217,626 |
Context for dynamic graph execution mode.
Args:
auto (bool): Whether forward computation is executed during a
computation graph construction.
Returns: bool | def auto_forward(auto=True):
global __auto_forward_state
prev = __auto_forward_state
__auto_forward_state = auto
yield
__auto_forward_state = prev | 217,636 |
Manually print profiling result.
Args:
reset (bool): If False is specified, the profiling statistics so
far is maintained. If ``True`` (default),
:obj:`~reset_stats`
is called to reset the profiling statistics. | def print_stats(self, reset=True):
if not self.ncalls:
return
stats = self.stats
code = self.fn.__code__
print('--- Function Profiling ---')
print('File "{}", line {}, function {}'.format(
code.co_filename,
code.co_firstlineno,
... | 217,640 |
Save the graph to a given file path.
Args:
vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.
fpath (`str`): The file path used to save.
cleanup (`bool`): Clean up the source file after rendering... | def save(self, vleaf, fpath, cleanup=False, format=None):
graph = self.create_graphviz_digraph(vleaf, format=format)
graph.render(fpath, cleanup=cleanup) | 217,656 |
View the graph.
Args:
vleaf (`nnabla.Variable`): End variable. All variables and functions which can be traversed from this variable are shown in the reuslt.
fpath (`str`): The file path used to save.
cleanup (`bool`): Clean up the source file after rendering. Default is True.
... | def view(self, vleaf, fpath=None, cleanup=True, format=None):
graph = self.create_graphviz_digraph(vleaf, format=format)
graph.view(fpath, cleanup=cleanup) | 217,657 |
Get parameters.
Args:
grad_only (bool, optional): Return parameters with `need_grad` option as `True`.
If you set this option as `False`, All parameters are returned. Default is `True`.
Returns:
dict: The dictionary of parameter name (`str`) to Variable (:obj:`~nnabl... | def get_parameters(self, grad_only=True):
params = OrderedDict()
for v in self.get_modules():
if not isinstance(v, tuple):
continue
prefix, module = v
for k, v in module.__dict__.items():
if not isinstance(v, nn.Variable):
... | 217,661 |
Save all parameters into a file with the specified format.
Currently hdf5 and protobuf formats are supported.
Args:
path : path or file object
grad_only (bool, optional): Return parameters with `need_grad` option as `True`. | def save_parameters(self, path, grad_only=False):
params = self.get_parameters(grad_only=grad_only)
nn.save_parameters(path, params) | 217,663 |
Load parameters from a file with the specified format.
Args:
path : path or file object | def load_parameters(self, path):
nn.load_parameters(path)
for v in self.get_modules():
if not isinstance(v, tuple):
continue
prefix, module = v
for k, v in module.__dict__.items():
if not isinstance(v, nn.Variable):
... | 217,664 |
All functions are replaced with the same `new` function.
Args:
vroot (:obj:`Variable`): NNabla Variable
entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. | def convert(self, vroot, entry_variables):
self.graph_info = GraphInfo(vroot)
self.entry_variables = entry_variables
with nn.parameter_scope(self.name):
# Function loop in the forward order
for t, func in enumerate(self.graph_info.funcs):
# TODO:... | 217,666 |
Calculate length of a string for a given built-in font.
Args:
fontname: name of the font.
fontsize: size of font in points.
encoding: encoding to use (0=Latin, 1=Greek, 2=Cyrillic).
Returns:
(float) length of text. | def getTextlength(text, fontname="helv", fontsize=11, encoding=0):
fontname = fontname.lower()
basename = Base14_fontdict.get(fontname, None)
glyphs = None
if basename == "Symbol":
glyphs = symbol_glyphs
if basename == "ZapfDingbats":
glyphs = zapf_glyphs
if glyphs is not N... | 218,122 |
Returns the parsed table of a page in a PDF / (open) XPS / EPUB document.
Parameters:
page: fitz.Page object
bbox: containing rectangle, list of numbers [xmin, ymin, xmax, ymax]
columns: optional list of column coordinates. If None, columns are generated
Returns the parsed table as a list of lists o... | def ParseTab(page, bbox, columns = None):
tab_rect = fitz.Rect(bbox).irect
xmin, ymin, xmax, ymax = tuple(tab_rect)
if tab_rect.isEmpty or tab_rect.isInfinite:
print("Warning: incorrect rectangle coordinates!")
return []
if type(columns) is not list or columns == []:
... | 218,367 |
Show page number 'pno' of PDF 'src' in rectangle 'rect'.
Args:
rect: (rect-like) where to place the source image
src: (document) source PDF
pno: (int) source page number
overlay: (bool) put in foreground
keep_proportion: (bool) do not change width-height-ratio
rotate... | def showPDFpage(
page,
rect,
src,
pno=0,
overlay=True,
keep_proportion=True,
rotate=0,
reuse_xref=0,
clip = None,
):
def calc_matrix(sr, tr, keep=True, rotate=0):
# calc center point of source rect
smp = Point... | 218,404 |
Insert an image in a rectangle on the current page.
Notes:
Exactly one of filename, pixmap or stream must be provided.
Args:
rect: (rect-like) where to place the source image
filename: (str) name of an image file
pixmap: (obj) a Pixmap object
stream: (bytes) an image in ... | def insertImage(page, rect, filename=None, pixmap=None, stream=None, rotate=0,
keep_proportion = True,
overlay=True):
def calc_matrix(fw, fh, tr, rotate=0):
# center point of target rect
tmp = Point((tr.x1 + tr.x0) / 2., (tr.y1 + tr.y0) / 2.)
r... | 218,405 |
Search for a string on a page.
Args:
text: string to be searched for
hit_max: maximum hits
quads: return quads instead of rectangles
Returns:
a list of rectangles or quads, each containing one occurrence. | def searchFor(page, text, hit_max = 16, quads = False):
CheckParent(page)
dl = page.getDisplayList() # create DisplayList
tp = dl.getTextPage() # create TextPage
# return list of hitting reactangles
rlist = tp.search(text, hit_max = hit_max, quads = quads)
dl = None
... | 218,406 |
Search for a string on a page.
Args:
pno: page number
text: string to be searched for
hit_max: maximum hits
quads: return quads instead of rectangles
Returns:
a list of rectangles or quads, each containing an occurrence. | def searchPageFor(doc, pno, text, hit_max=16, quads=False):
return doc[pno].searchFor(text, hit_max = hit_max, quads = quads) | 218,407 |
Return the text blocks on a page.
Notes:
Lines in a block are concatenated with line breaks.
Args:
images: (bool) also return meta data of any images.
Image data are never returned with this method.
Returns:
A list of the blocks. Each item contains the containing rectang... | def getTextBlocks(page, images=False):
CheckParent(page)
dl = page.getDisplayList()
flags = TEXT_PRESERVE_LIGATURES | TEXT_PRESERVE_WHITESPACE
if images:
flags |= TEXT_PRESERVE_IMAGES
tp = dl.getTextPage(flags)
l = tp._extractTextBlocks_AsList()
del tp
del dl
return l | 218,408 |
Extract a document page's text.
Args:
output: (str) text, html, dict, json, rawdict, xhtml or xml.
Returns:
the output of TextPage methods extractText, extractHTML, extractDICT, extractJSON, extractRAWDICT, extractXHTML or etractXML respectively. Default and misspelling choice is "text". | def getText(page, output = "text"):
CheckParent(page)
dl = page.getDisplayList()
# available output types
formats = ("text", "html", "json", "xml", "xhtml", "dict", "rawdict")
# choose which of them also include images in the TextPage
images = (0, 1, 1, 0, 1, 1, 1) # controls image inc... | 218,410 |
Create pixmap of page.
Args:
matrix: Matrix for transformation (default: Identity).
colorspace: (str/Colorspace) rgb, rgb, gray - case ignored, default csRGB.
clip: (irect-like) restrict rendering to this area.
alpha: (bool) include alpha channel | def getPixmap(page, matrix = None, colorspace = csRGB, clip = None,
alpha = True):
CheckParent(page)
# determine required colorspace
cs = colorspace
if type(colorspace) is str:
if colorspace.upper() == "GRAY":
cs = csGRAY
elif colorspace.upper() == "CMYK":... | 218,411 |
Create pixmap of document page by page number.
Notes:
Convenience function calling page.getPixmap.
Args:
pno: (int) page number
matrix: Matrix for transformation (default: Identity).
colorspace: (str/Colorspace) rgb, rgb, gray - case ignored, default csRGB.
clip: (irect-... | def getPagePixmap(doc, pno, matrix = None, colorspace = csRGB,
clip = None, alpha = True):
return doc[pno].getPixmap(matrix = matrix, colorspace = colorspace,
clip = clip, alpha = alpha) | 218,412 |
Create a table of contents.
Args:
simple: a bool to control output. Returns a list, where each entry consists of outline level, title, page number and link destination (if simple = False). For details see PyMuPDF's documentation. | def getToC(doc, simple = True):
def recurse(olItem, liste, lvl):
while olItem:
if olItem.title:
title = olItem.title
else:
title = " "
if not olItem.isExternal:
if olItem.uri:
page = olIte... | 218,415 |
Draw a circle sector given circle center, one arc end point and the angle of the arc.
Parameters:
center -- center of circle
point -- arc end point
beta -- angle of arc (degrees)
fullSector -- connect arc ends with center | def drawSector(page, center, point, beta, color=None, fill=None,
dashes=None, fullSector=True, morph=None,
width=1, closePath=False, roundCap=False, overlay=True):
img = page.newShape()
Q = img.drawSector(Point(center), Point(point), beta, fullSector=fullSector)
img.finish(c... | 218,434 |
Set a float option.
Args:
option (str): name of option.
value (float): value of the option.
Raises:
TypeError: Value must be a float. | def set_float(self, option, value):
if not isinstance(value, float):
raise TypeError("Value must be a float")
self.options[option] = value | 218,558 |
Set an integer option.
Args:
option (str): name of option.
value (int): value of the option.
Raises:
ValueError: Value must be an integer. | def set_integer(self, option, value):
try:
int_value = int(value)
except ValueError as err:
print(err.args)
self.options[option] = value | 218,559 |
Set a boolean option.
Args:
option (str): name of option.
value (bool): value of the option.
Raises:
TypeError: Value must be a boolean. | def set_boolean(self, option, value):
if not isinstance(value, bool):
raise TypeError("%s must be a boolean" % option)
self.options[option] = str(value).lower() | 218,560 |
Set a string option.
Args:
option (str): name of option.
value (str): value of the option.
Raises:
TypeError: Value must be a string. | def set_string(self, option, value):
if not isinstance(value, str):
raise TypeError("%s must be a string" % option)
self.options[option] = value | 218,561 |
Set the MetricsGraphics chart type.
Allowed charts are: line, histogram, point, and bar
Args:
value (str): chart type.
Raises:
ValueError: Not a valid chart type. | def chart_type(self, value):
if value not in self._allowed_charts:
raise ValueError("Not a valid chart type")
self.options["chart_type"] = value | 218,562 |
Set the custom line color map.
Args:
values (list): list of colors.
Raises:
TypeError: Custom line color map must be a list. | def custom_line_color_map(self, values):
if not isinstance(values, list):
raise TypeError("custom_line_color_map must be a list")
self.options["custom_line_color_map"] = values | 218,563 |
Set the legend labels.
Args:
values (list): list of labels.
Raises:
ValueError: legend must be a list of labels. | def legend(self, values):
if not isinstance(values, list):
raise TypeError("legend must be a list of labels")
self.options["legend"] = values | 218,564 |
Set the markers.
Args:
values (list): list of marker objects.
Raises:
ValueError: Markers must be a list of objects. | def markers(self, values):
if not isinstance(values, list):
raise TypeError("Markers must be a list of objects")
self.options["markers"] = values | 218,565 |
Show confidence band?
See metricsgraphics documentation
Args:
value (list): strings
Raises:
TypeError: show_confidence_band must be a list of strings. | def show_confidence_band(self, value):
if not isinstance(values, list):
raise TypeError("show_confidence_band must be a list of strings")
self.options["show_confidence_band"] = values | 218,566 |
Set margin of the chart.
Args:
top (int): size of top margin in pixels.
bottom (int): size of bottom margin in pixels.
left (int): size of left margin in pixels.
right (int): size of right margin in pixels.
buffer_size (int): b... | def set_margin(self, top=40, bottom=30, left=50, right=10, buffer_size=8):
self.set_integer("top", top)
self.set_integer("bottom", bottom)
self.set_integer("left", left)
self.set_integer("right", right)
self.set_integer("buffer", buffer_size) | 218,588 |
Set the size of the chart.
Args:
height (int): height in pixels.
width (int): width in pixels.
height_threshold (int): height threshold in pixels
width_threshold (int): width threshold in pixesls | def set_size(self, height=220, width=350,
height_threshold=120,
width_threshold=160):
self.set_integer("height", height)
self.set_integer("width", width)
self.set_integer("small_height_threshold", height_threshold)
self.set_integer("small_width_... | 218,589 |
Formats props for the React template.
Args:
props (dict): properties to be written to the template.
Returns:
Two lists, one containing variable names and the other
containing a list of props to be fed to the React template. | def format_props(props, prop_template="{{k}} = { {{v}} }", delim="\n"):
vars_ = []
props_ = []
for k, v in list(props.items()):
vars_.append(Template("var {{k}} = {{v}};").render(k=k,v=json.dumps(v)))
props_.append(Template(prop_template).render(k=k, v=k))
return "\n".join(vars_), d... | 218,591 |
register UILayout with the flask app
create a function that will send props for each UILayout
Args:
layouts (dict): dict of UILayout objects by name
app (object): flask app
url (string): address of props; default is /api/props/ | def register_layouts(layouts, app, url="/api/props/", brand="Pyxley"):
def props(name):
if name not in layouts:
# cast as list for python3
name = list(layouts.keys())[0]
return jsonify({"layouts": layouts[name]["layout"]})
def apps():
paths = []
for ... | 218,605 |
create a mg line plot
Args:
df (pandas.DataFrame): data to plot | def create_line_plot(df):
fig = Figure("/mg/line_plot/", "mg_line_plot")
fig.graphics.transition_on_update(True)
fig.graphics.animate_on_load()
fig.layout.set_size(width=450, height=200)
fig.layout.set_margin(left=40, right=40)
return LineChart(df, fig, "Date", ["value"],
init_param... | 218,630 |
create a mg line plot
Args:
df (pandas.DataFrame): data to plot | def create_histogram(df):
fig = Figure("/mg/histogram/", "mg_histogram")
fig.layout.set_size(width=450, height=200)
fig.layout.set_margin(left=40, right=40)
fig.graphics.animate_on_load()
# Make a histogram with 20 bins
return Histogram(df, fig, "value", 20, init_params={"Data": "Steps"}) | 218,631 |
create a mg line plot
Args:
df (pandas.DataFrame): data to plot | def create_scatterplot(df):
fig = Figure("/mg/scatter/", "mg_scatter")
fig.layout.set_size(width=450, height=200)
fig.layout.set_margin(left=40, right=40)
fig.graphics.animate_on_load()
init_params = {"Data": "Steps"}
def get_data():
y = request.args.get("Data", "Steps")
r... | 218,632 |
Set x-axis limits.
Accepts a two-element list to set the x-axis limits.
Args:
xlim (list): lower and upper bounds
Raises:
ValueError: xlim must contain two elements
ValueError: Min must be less than max | def set_xlim(self, xlim):
if len(xlim) != 2:
raise ValueError("xlim must contain two elements")
if xlim[1] < xlim[0]:
raise ValueError("Min must be less than Max")
self.options["min_x"] = xlim[0]
self.options["max_x"] = xlim[1] | 218,635 |
Set y-axis limits.
Accepts a two-element list to set the y-axis limits.
Args:
ylim (list): lower and upper bounds
Raises:
ValueError: ylim must contain two elements
ValueError: Min must be less than max | def set_ylim(self, ylim):
if len(ylim) != 2:
raise ValueError("ylim must contain two elements")
if ylim[1] < ylim[0]:
raise ValueError("Min must be less than Max")
self.options["min_y"] = ylim[0]
self.options["max_y"] = ylim[1] | 218,636 |
basic line plot
dataframe to json for a line plot
Args:
df (pandas.DataFrame): input dataframe
xypairs (list): list of tuples containing column names
mode (str): plotly.js mode (e.g. lines)
layout (dict): layout parameters
... | def line_plot(df, xypairs, mode, layout={}, config=_BASE_CONFIG):
if df.empty:
return {
"x": [],
"y": [],
"mode": mode
}
_data = []
for x, y in xypairs:
if (x in df.columns) and (y in df.columns):
... | 218,639 |
Return the function call result decoded.
Args:
function_name (str): One of the existing functions described in the
contract interface.
data (bin): The encoded result from calling `function_name`.
Return:
List[object]: The values returned by the call ... | def decode_function_result(self, function_name, data):
description = self.function_data[function_name]
arguments = decode_abi(description['decode_types'], data)
return arguments | 219,309 |
Return a dictionary representation the log.
Note:
This function won't work with anonymous events.
Args:
log_topics (List[bin]): The log's indexed arguments.
log_data (bin): The encoded non-indexed arguments. | def decode_event(self, log_topics, log_data):
# https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI#function-selector-and-argument-encoding
# topics[0]: keccak(EVENT_NAME+"("+EVENT_ARGS.map(canonical_type_of).join(",")+")")
# If the event is declared as anonymous the topics[0] ... | 219,311 |
Return a dictionary representation of the Log instance.
Note:
This function won't work with anonymous events.
Args:
log (processblock.Log): The Log instance that needs to be parsed.
noprint (bool): Flag to turn off priting of the decoded log instance. | def listen(self, log, noprint=True):
try:
result = self.decode_event(log.topics, log.data)
except ValueError:
return # api compatibility
if not noprint:
print(result)
return result | 219,312 |
Return the compile contract code.
Args:
filepath (str): The path to the contract source code.
libraries (dict): A dictionary mapping library name to it's address.
combined (str): The argument for solc's --combined-json.
optimize (bool): Enable/disables compiler optimization.
Re... | def compile_file(filepath, libraries=None, combined='bin,abi',
optimize=True, extra_args=None):
workdir, filename = os.path.split(filepath)
args = solc_arguments(
libraries=libraries,
combined=combined,
optimize=optimize,
extra_args=extra_args)
args.in... | 219,642 |
gpp -- model for the graph partitioning problem
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns a model, ready to be solved. | def gpp(V,E):
model = Model("gpp")
x = {}
y = {}
for i in V:
x[i] = model.addVar(vtype="B", name="x(%s)"%i)
for (i,j) in E:
y[i,j] = model.addVar(vtype="B", name="y(%s,%s)"%(i,j))
model.addCons(quicksum(x[i] for i in V) == len(V)/2, "Partition")
for (i,j) in E:
... | 220,064 |
gpp -- model for the graph partitioning problem in soco
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns a model, ready to be solved. | def gpp_soco(V,E):
model = Model("gpp model -- soco")
x,s,z = {},{},{}
for i in V:
x[i] = model.addVar(vtype="B", name="x(%s)"%i)
for (i,j) in E:
s[i,j] = model.addVar(vtype="C", name="s(%s,%s)"%(i,j))
z[i,j] = model.addVar(vtype="C", name="z(%s,%s)"%(i,j))
model.addCo... | 220,065 |
make_data: prepare data for a random graph
Parameters:
- n: number of vertices
- prob: probability of existence of an edge, for each pair of vertices
Returns a tuple with a list of vertices and a list edges. | def make_data(n,prob):
V = range(1,n+1)
E = [(i,j) for i in V for j in V if i < j and random.random() < prob]
return V,E | 220,066 |
maxflow: maximize flow from source to sink, taking into account arc capacities M
Parameters:
- V: set of vertices
- M[i,j]: dictionary or capacity for arcs (i,j)
- source: flow origin
- sink: flow target
Returns a model, ready to be solved. | def maxflow(V,M,source,sink):
# create max-flow underlying model, on which to find cuts
model = Model("maxflow")
f = {} # flow variable
for (i,j) in M:
f[i,j] = model.addVar(lb=-M[i,j], ub=M[i,j], name="flow(%s,%s)"%(i,j))
cons = {}
for i in V:
if i != source and i != ... | 220,067 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.