INSTRUCTION stringlengths 1 8.43k | RESPONSE stringlengths 75 104k |
|---|---|
freeze module if names is not None set an array of layers that match given names to be freezed: param names: an array of layer names: return: | def freeze(self, names=None):
"""
freeze module, if names is not None, set an array of layers that match given names
to be freezed
:param names: an array of layer names
:return:
"""
callBigDlFunc(self.bigdl_type, "freeze", self.value, names)
return self |
unfreeze module if names is not None unfreeze layers that match given names: param names: an array of layer names: return: | def unfreeze(self, names=None):
"""
unfreeze module, if names is not None, unfreeze layers that match given names
:param names: an array of layer names
:return:
"""
callBigDlFunc(self.bigdl_type, "unFreeze", self.value, names)
return self |
Set this layer in the training mode or in predition mode if is_training = False | def training(self, is_training=True):
'''
Set this layer in the training mode or in predition mode if is_training=False
'''
if is_training:
callJavaFunc(self.value.training)
else:
callJavaFunc(self.value.evaluate)
return self |
Clone self and quantize it at last return a new quantized model.: return: A new quantized model. | def quantize(self):
'''
Clone self and quantize it, at last return a new quantized model.
:return: A new quantized model.
>>> fc = Linear(4, 2)
creating: createLinear
>>> fc.set_weights([np.ones((2, 4)), np.ones((2,))])
>>> input = np.ones((2, 4))
>>> out... |
Load a pre - trained Bigdl model. | def loadModel(modelPath, weightPath =None, bigdl_type="float"):
"""
Load a pre-trained Bigdl model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadBigDLModule", modelPath, weightPath)
... |
Load a pre - trained Torch model. | def load_torch(path, bigdl_type="float"):
"""
Load a pre-trained Torch model.
:param path: The path containing the pre-trained model.
:return: A pre-trained model.
"""
jmodel = callBigDlFunc(bigdl_type, "loadTorch", path)
return Layer.of(jmodel) |
Load a pre - trained Keras model. | def load_keras(json_path=None, hdf5_path=None, by_name=False):
"""
Load a pre-trained Keras model.
:param json_path: The json path containing the keras model definition.
:param hdf5_path: The HDF5 path containing the pre-trained keras model weights with or without the model architecture... |
Load a pre - trained Caffe model. | def load_caffe(model, defPath, modelPath, match_all=True, bigdl_type="float"):
"""
Load a pre-trained Caffe model.
:param model: A bigdl model definition \which equivalent to the pre-trained caffe model.
:param defPath: The path containing the caffe model definition.
:param mod... |
Load a pre - trained Caffe model. | def load_caffe_model(defPath, modelPath, bigdl_type="float"):
"""
Load a pre-trained Caffe model.
:param defPath: The path containing the caffe model definition.
:param modelPath: The path containing the pre-trained caffe model.
:return: A pre-trained model.
"""
... |
Load a pre - trained Tensorflow model.: param path: The path containing the pre - trained model.: param inputs: The input node of this graph: param outputs: The output node of this graph: param byte_order: byte_order of the file little_endian or big_endian: param bin_file: the optional bin file produced by bigdl dump_m... | def load_tensorflow(path, inputs, outputs, byte_order = "little_endian",
bin_file = None, generated_backward = True, bigdl_type = "float"):
"""
Load a pre-trained Tensorflow model.
:param path: The path containing the pre-trained model.
:param inputs: The input no... |
stop the input gradient of layers that match the given names their input gradient are not computed. And they will not contributed to the input gradient computation of layers that depend on them.: param stop_layers: an array of layer names: param bigdl_type:: return: | def stop_gradient(self, stop_layers, bigdl_type="float"):
"""
stop the input gradient of layers that match the given ```names```
their input gradient are not computed.
And they will not contributed to the input gradient computation of
layers that depend on them.
:param st... |
Return the corresponding node has the given name. If the given name doesn t match any node an exception will be thrown: param name: node name: param bigdl_type:: return: | def node(self, name, bigdl_type="float"):
"""
Return the corresponding node has the given name. If the given name doesn't match any node,
an exception will be thrown
:param name: node name
:param bigdl_type:
:return:
"""
jnode = callBigDlFunc(bigdl_type,... |
save current model graph to a folder which can be display in tensorboard by running tensorboard -- logdir logPath: param log_path: path to save the model graph: param bigdl_type:: return: | def save_graph_topology(self, log_path, bigdl_type="float"):
"""
save current model graph to a folder, which can be display in tensorboard by running
tensorboard --logdir logPath
:param log_path: path to save the model graph
:param bigdl_type:
:return:
"""
... |
NB: It s for debug only please use optimizer. optimize () in production. Takes an input object and computes the corresponding loss of the criterion compared with target | def forward(self, input, target):
"""
NB: It's for debug only, please use optimizer.optimize() in production.
Takes an input object, and computes the corresponding loss of the criterion,
compared with `target`
:param input: ndarray or list of ndarray
:param target: ndarr... |
Create a python Criterion by a java criterion object | def of(cls, jcriterion, bigdl_type="float"):
"""
Create a python Criterion by a java criterion object
:param jcriterion: A java criterion object which created by Py4j
:return: a criterion.
"""
criterion = Criterion(bigdl_type, jcriterion)
criterion.value = jcrite... |
Read the directory of images into DataFrame from the local or remote source.: param path Directory to the input data files the path can be comma separated paths as the list of inputs. Wildcards path are supported similarly to sc. binaryFiles ( path ).: param min_partitions A suggestion value of the minimal splitting nu... | def readImages(path, sc=None, minParitions = 1, bigdl_type="float"):
"""
Read the directory of images into DataFrame from the local or remote source.
:param path Directory to the input data files, the path can be comma separated paths as the
list of inputs. Wildcards path are sup... |
The file path can be stored in a local file system HDFS S3 or any Hadoop - supported file system. | def load_weights_from_json_hdf5(def_json, weights_hdf5, by_name=False):
"""
The file path can be stored in a local file system, HDFS, S3,
or any Hadoop-supported file system.
"""
bmodel = DefinitionLoader.from_json_path(def_json)
def_value = BCommon.text_from_path(def_jso... |
Loads all layer weights from a HDF5 save file. filepath can be stored in a local file system HDFS S3 or any Hadoop - supported file system. If by_name is False ( default ) weights are loaded based on the network s execution order topology meaning layers in the execution seq should be exactly the same the architecture | def load_weights_from_hdf5(bmodel, kmodel, filepath, by_name=False):
'''Loads all layer weights from a HDF5 save file.
filepath can be stored in a local file system, HDFS, S3,
or any Hadoop-supported file system.
If `by_name` is False (default) weights are loaded
based on the net... |
Convert kmodel s weights to bigdl format. We are supposing the order is the same as the execution order.: param kmodel: keras model: return: list of ndarray | def get_weights_from_kmodel(kmodel):
"""
Convert kmodel's weights to bigdl format.
We are supposing the order is the same as the execution order.
:param kmodel: keras model
:return: list of ndarray
"""
layers_with_weights = [layer for layer in kmodel.layers if lay... |
The result would contain all of the layers including nested layers.: param kmodel: a keras model which can be Sequential or Model: param node_id_to_config_layer: a container to store the result | def __build_node_id_2_klayer(kmodel, node_id_to_config_layer):
"""
The result would contain all of the layers including nested layers.
:param kmodel: a keras model which can be Sequential or Model
:param node_id_to_config_layer: a container to store the result
"""
node_id... |
: param hdf5_path: hdf5 path which can be stored in a local file system HDFS S3 or any Hadoop - supported file system.: return: BigDL Model | def from_hdf5_path(cls, hdf5_path):
"""
:param hdf5_path: hdf5 path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.
:return: BigDL Model
"""
from keras.models import load_model
hdf5_local_path = BCommon.get_local_file(hdf5_path)
... |
: param json_path: definition path which can be stored in a local file system HDFS S3 or any Hadoop - supported file system.: return: BigDL Model | def from_json_path(cls, json_path):
"""
:param json_path: definition path which can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system.
:return: BigDL Model
"""
json_str = BCommon.text_from_path(json_path)
return DefinitionLoader.from_json_str... |
Load IMDB dataset Transform input data into an RDD of Sample | def load_imdb():
"""
Load IMDB dataset
Transform input data into an RDD of Sample
"""
from keras.preprocessing import sequence
from keras.datasets import imdb
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=20000)
X_train = sequence.pad_sequences(X_train, maxlen=100)
X... |
Define a recurrent convolutional model in Keras 1. 2. 2 | def build_keras_model():
"""
Define a recurrent convolutional model in Keras 1.2.2
"""
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import LSTM
from keras.layers import Convolution1D, MaxPooli... |
Functional merge. Only use this method if you are defining a graph model. Used to merge a list of input nodes into a single output node ( NOT layers! ) following some merge mode. | def merge(inputs, mode="sum", concat_axis=-1, name=None):
"""
Functional merge. Only use this method if you are defining a graph model.
Used to merge a list of input nodes into a single output node (NOT layers!),
following some merge mode.
# Arguments
inputs: A list of node instances. Must be m... |
Return a list of shape tuples if there are multiple inputs. Return one shape tuple otherwise. | def get_input_shape(self):
"""
Return a list of shape tuples if there are multiple inputs.
Return one shape tuple otherwise.
"""
input = callBigDlFunc(self.bigdl_type, "getInputShape",
self.value)
return self.__process_shape(input) |
Return a list of shape tuples if there are multiple outputs. Return one shape tuple otherwise. | def get_output_shape(self):
"""
Return a list of shape tuples if there are multiple outputs.
Return one shape tuple otherwise.
"""
output = callBigDlFunc(self.bigdl_type, "getOutputShape",
self.value)
return self.__process_shape(output) |
Get mnist dataset with features and label as ndarray. Data would be downloaded automatically if it doesn t present at the specific location. | def get_mnist(data_type="train", location="/tmp/mnist"):
"""
Get mnist dataset with features and label as ndarray.
Data would be downloaded automatically if it doesn't present at the specific location.
:param data_type: "train" for training data and "test" for testing data.
:param location: Locatio... |
Parse or download movielens 1m data if train_dir is empty. | def read_data_sets(data_dir):
"""
Parse or download movielens 1m data if train_dir is empty.
:param data_dir: The directory storing the movielens data
:return: a 2D numpy array with user index and item index in each row
"""
WHOLE_DATA = 'ml-1m.zip'
local_file = base.maybe_download(WHOLE_D... |
Get and return the jar path for bigdl if exists. | def get_bigdl_classpath():
"""
Get and return the jar path for bigdl if exists.
"""
if os.getenv("BIGDL_CLASSPATH"):
return os.environ["BIGDL_CLASSPATH"]
jar_dir = os.path.abspath(__file__ + "/../../")
jar_paths = glob.glob(os.path.join(jar_dir, "share/lib/*.jar"))
if jar_paths:
... |
Check if spark version is below 2. 2 | def is_spark_below_2_2():
"""
Check if spark version is below 2.2
"""
import pyspark
if(hasattr(pyspark,"version")):
full_version = pyspark.version.__version__
# We only need the general spark version (eg, 1.6, 2.2).
parts = full_version.split(".")
spark_version = par... |
Compare version strings.: param version1 ;: param version2 ;: return: 1 if version1 is after version2 ; - 1 if version1 is before version2 ; 0 if two versions are the same. | def compare_version(version1, version2):
"""
Compare version strings.
:param version1;
:param version2;
:return: 1 if version1 is after version2; -1 if version1 is before version2; 0 if two versions are the same.
"""
v1Arr = version1.split(".")
v2Arr = version2.split(".")
len1 = len(... |
Convert tensorflow model to bigdl model: param input_ops: operation list used for input should be placeholders: param output_ops: operations list used for output: return: bigdl model | def convert(input_ops, output_ops, byte_order, bigdl_type):
"""
Convert tensorflow model to bigdl model
:param input_ops: operation list used for input, should be placeholders
:param output_ops: operations list used for output
:return: bigdl model
"""
input_names = map(lambda x: x.name.spli... |
Export variable tensors from the checkpoint files. | def export_checkpoint(checkpoint_path):
"""
Export variable tensors from the checkpoint files.
:param checkpoint_path: tensorflow checkpoint path
:return: dictionary of tensor. The key is the variable name and the value is the numpy
"""
reader = tf.train.NewCheckpointReader(checkpoint_path)
... |
Save a variable dictionary to a Java object file so it can be read by BigDL | def save_variable_bigdl(tensors, target_path, bigdl_type="float"):
"""
Save a variable dictionary to a Java object file, so it can be read by BigDL
:param tensors: tensor dictionary
:param target_path: where is the Java object file store
:param bigdl_type: model variable numeric type
:return: n... |
Dump a tensorflow model to files. The graph will be dumped to path/ model. pb and the checkpoint will be dumped to path/ model. bin: param path: dump folder path: param sess: if user pass in session we assume that the variable of the graph in the session has been inited: param graph: tensorflow graph. Default use the d... | def dump_model(path, graph=None, sess=None, ckpt_file=None, bigdl_type="float"):
"""
Dump a tensorflow model to files. The graph will be dumped to path/model.pb, and the checkpoint will
be dumped to path/model.bin
:param path: dump folder path
:param sess: if user pass in session, we assume tha... |
Get the variable values from the checkpoint file and merge them to the GraphDef file Args: input_graph: the GraphDef file doesn t contain variable values checkpoint: the checkpoint file output_node_names: A list of string the output names output_graph: String of the location and the name of the output graph | def merge_checkpoint(input_graph,
checkpoint,
output_node_names,
output_graph,
sess):
"""
Get the variable values from the checkpoint file, and merge them to the GraphDef file
Args:
input_graph: the GraphDef file, do... |
Processes batch of utterances and returns corresponding responses batch. | def _call(self, utterances_batch: list, utterances_ids: Optional[list]=None) -> list:
"""
Processes batch of utterances and returns corresponding responses batch.
Each call of Agent passes incoming utterances batch through skills filter,
agent skills, skills processor. Batch of dialog I... |
Expand and tile tensor along given axis | def expand_tile(units, axis):
"""
Expand and tile tensor along given axis
Args:
units: tf tensor with dimensions [batch_size, time_steps, n_input_features]
axis: axis along which expand and tile. Must be 1 or 2
"""
assert axis in (1, 2)
n_time_steps = K.int_shape(units)[1]
... |
Compute additive self attention for time series of vectors ( with batch dimension ) the formula: score ( h_i h_j ) = <v tanh ( W_1 h_i + W_2 h_j ) > v is a learnable vector of n_hidden dimensionality W_1 and W_2 are learnable [ n_hidden n_input_features ] matrices | def additive_self_attention(units, n_hidden=None, n_output_features=None, activation=None):
"""
Compute additive self attention for time series of vectors (with batch dimension)
the formula: score(h_i, h_j) = <v, tanh(W_1 h_i + W_2 h_j)>
v is a learnable vector of n_hidden dimensionality... |
Compute multiplicative self attention for time series of vectors ( with batch dimension ) the formula: score ( h_i h_j ) = <W_1 h_i W_2 h_j > W_1 and W_2 are learnable matrices with dimensionality [ n_hidden n_input_features ] | def multiplicative_self_attention(units, n_hidden=None, n_output_features=None, activation=None):
"""
Compute multiplicative self attention for time series of vectors (with batch dimension)
the formula: score(h_i, h_j) = <W_1 h_i, W_2 h_j>, W_1 and W_2 are learnable matrices
with dimensionality [n_hid... |
Collecting possible continuations of length < = n for every node | def precompute_future_symbols(trie, n, allow_spaces=False):
"""
Collecting possible continuations of length <= n for every node
"""
if n == 0:
return
if trie.is_terminated and trie.precompute_symbols:
# символы уже предпосчитаны
return
for index, final in enumerate(trie.f... |
Сохраняет дерево для дальнейшего использования | def save(self, outfile):
"""
Сохраняет дерево для дальнейшего использования
"""
with open(outfile, "w", encoding="utf8") as fout:
attr_values = [getattr(self, attr) for attr in Trie.ATTRS]
attr_values.append(any(x is not None for x in self.data))
fout.... |
Включает кэширование запросов к descend | def make_cashed(self):
"""
Включает кэширование запросов к descend
"""
self._descendance_cash = [dict() for _ in self.graph]
self.descend = self._descend_cashed |
Добавление строки s в префиксный бор | def add(self, s):
"""
Добавление строки s в префиксный бор
"""
if self.is_terminated:
raise TypeError("Impossible to add string to fitted trie")
if s == "":
self._set_final(self.root)
return
curr = self.root
for i, a in enumerat... |
Возвращает итератор по словам содержащимся в боре | def words(self):
"""
Возвращает итератор по словам, содержащимся в боре
"""
branch, word, indexes = [self.root], [], [0]
letters_with_children = [self._get_children_and_letters(self.root)]
while len(branch) > 0:
if self.is_final(branch[-1]):
yi... |
Находит все разбиения s = s_1... s_m на словарные слова s_1... s_m для m < = max_count | def find_partitions(self, s, max_count=1):
"""
Находит все разбиения s = s_1 ... s_m на словарные слова s_1, ..., s_m
для m <= max_count
"""
curr_agenda = [(self.root, [], 0)]
for i, a in enumerate(s):
next_agenda = []
for curr, borders, cost in cu... |
Добавление ребёнка к вершине parent по символу с кодом code | def _add_empty_child(self, parent, code, final=False):
"""
Добавление ребёнка к вершине parent по символу с кодом code
"""
self.graph[parent][code] = self.nodes_number
self.graph.append(self._make_default_node())
self.data.append(None)
self.final.append(final)
... |
Спуск из вершины curr по строке s | def _descend_simple(self, curr, s):
"""
Спуск из вершины curr по строке s
"""
for a in s:
curr = self.graph[curr][self.alphabet_codes[a]]
if curr == Trie.NO_NODE:
break
return curr |
Спуск из вершины curr по строке s с кэшированием | def _descend_cashed(self, curr, s):
"""
Спуск из вершины curr по строке s с кэшированием
"""
if s == "":
return curr
curr_cash = self._descendance_cash[curr]
answer = curr_cash.get(s, None)
if answer is not None:
return answer
# для... |
Извлекает все метки выходных рёбер вершины с номером index | def _get_letters(self, index, return_indexes=False):
"""
Извлекает все метки выходных рёбер вершины с номером index
"""
if self.dict_storage:
answer = list(self.graph[index].keys())
else:
answer = [i for i, elem in enumerate(self.graph[index])
... |
Извлекает всех потомков вершины с номером index | def _get_children(self, index):
"""
Извлекает всех потомков вершины с номером index
"""
if self.dict_storage:
return list(self.graph[index].values())
else:
return [elem for elem in self.graph[index] if elem != Trie.NO_NODE] |
Обратная топологическая сортировка | def generate_postorder(self, trie):
"""
Обратная топологическая сортировка
"""
order, stack = [], []
stack.append(trie.root)
colors = ['white'] * len(trie)
while len(stack) > 0:
index = stack[-1]
color = colors[index]
if color =... |
Change save and load paths for obtained population save config. json with model config run population via current python executor ( with which evolve. py already run ) and on given devices ( - 1 means CPU other integeres - visible for evolve. py GPUs ) Args: population: list of dictionaries - configs of current populat... | def run_population(population, evolution, gpus):
"""
Change save and load paths for obtained population, save config.json with model config,
run population via current python executor (with which evolve.py already run)
and on given devices (-1 means CPU, other integeres - visible for evolve.py GPUs)
... |
Computes attention vector for each item in inputs: attention vector is a weighted sum of memory items. Dot product between input and memory vector is used as similarity measure. | def dot_attention(inputs, memory, mask, att_size, keep_prob=1.0, scope="dot_attention"):
"""Computes attention vector for each item in inputs:
attention vector is a weighted sum of memory items.
Dot product between input and memory vector is used as similarity measure.
Gate mechanism is applie... |
Simple attention without any conditions. | def simple_attention(memory, att_size, mask, keep_prob=1.0, scope="simple_attention"):
"""Simple attention without any conditions.
Computes weighted sum of memory elements.
"""
with tf.variable_scope(scope):
BS, ML, MH = tf.unstack(tf.shape(memory))
memory_do = tf.nn.dropout(memory, ... |
Computes weighted sum of inputs conditioned on state | def attention(inputs, state, att_size, mask, scope="attention"):
"""Computes weighted sum of inputs conditioned on state"""
with tf.variable_scope(scope):
u = tf.concat([tf.tile(tf.expand_dims(state, axis=1), [1, tf.shape(inputs)[1], 1]), inputs], axis=2)
logits = tf.layers.dense(tf.layers.dense... |
Computes BLEU score of translated segments against one or more references. | def compute_bleu(reference_corpus, translation_corpus, max_order=4,
smooth=False):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of lists of references for each translation. Each
reference should be tokenized into a list of t... |
Returns opened file object for writing dialog logs. | def _get_log_file(self):
"""Returns opened file object for writing dialog logs.
Returns:
log_file: opened Python file object.
"""
log_dir: Path = Path(self.config['log_path']).expanduser().resolve() / self.agent_name
log_dir.mkdir(parents=True, exist_ok=True)
... |
Logs single dialog utterance to current dialog log file. | def _log(self, utterance: Any, direction: str, dialog_id: Optional[Hashable]=None):
"""Logs single dialog utterance to current dialog log file.
Args:
utterance: Dialog utterance.
direction: 'in' or 'out' utterance direction.
dialog_id: Dialog ID.
"""
... |
Wraps _log method for all input utterances. Args: utterance: Dialog utterance. dialog_id: Dialog ID. | def log_in(self, utterance: Any, dialog_id: Optional[Hashable] = None) -> None:
"""Wraps _log method for all input utterances.
Args:
utterance: Dialog utterance.
dialog_id: Dialog ID.
"""
if self.enabled:
self._log(utterance, 'in', dialog_id) |
get summary ops for the magnitude of gradient updates | def summary_gradient_updates(grads, opt, lr):
"""get summary ops for the magnitude of gradient updates"""
# strategy:
# make a dict of variable name -> [variable, grad, adagrad slot]
vars_grads = {}
for v in tf.trainable_variables():
vars_grads[v.name] = [v, None, None]
for g, v in grad... |
Sums values associated with any non - unique indices. Args: values: A Tensor with rank > = 1. indices: A one - dimensional integer Tensor indexing into the first dimension of values ( as in an IndexedSlices object ). Returns: A tuple of ( summed_values unique_indices ) where unique_indices is a de - duplicated version ... | def _deduplicate_indexed_slices(values, indices):
"""Sums `values` associated with any non-unique `indices`.
Args:
values: A `Tensor` with rank >= 1.
indices: A one-dimensional integer `Tensor`, indexing into the first
dimension of `values` (as in an IndexedSlices object).
Returns:
A... |
Dump the trained weights from a model to a HDF5 file. | def dump_weights(tf_save_dir, outfile, options):
"""
Dump the trained weights from a model to a HDF5 file.
"""
def _get_outname(tf_name):
outname = re.sub(':0$', '', tf_name)
outname = outname.lstrip('lm/')
outname = re.sub('/rnn/', '/RNN/', outname)
outname = re.sub('/m... |
Read data by dataset_reader from specified config. | def read_data_by_config(config: dict):
"""Read data by dataset_reader from specified config."""
dataset_config = config.get('dataset', None)
if dataset_config:
config.pop('dataset')
ds_type = dataset_config['type']
if ds_type == 'classification':
reader = {'class_name': ... |
Create iterator ( from config ) for specified data. | def get_iterator_from_config(config: dict, data: dict):
"""Create iterator (from config) for specified data."""
iterator_config = config['dataset_iterator']
iterator: Union[DataLearningIterator, DataFittingIterator] = from_params(iterator_config,
... |
Make training and evaluation of the model described in corresponding configuration file. | def train_evaluate_model_from_config(config: Union[str, Path, dict],
iterator: Union[DataLearningIterator, DataFittingIterator] = None, *,
to_train: bool = True,
evaluation_targets: Optional[Iterable[str]] = N... |
Exchange messages between basic pipelines and the Yandex. Dialogs service. If the pipeline returns multiple values only the first one is forwarded to Yandex. | def interact_alice(agent: Agent):
"""
Exchange messages between basic pipelines and the Yandex.Dialogs service.
If the pipeline returns multiple values, only the first one is forwarded to Yandex.
"""
data = request.get_json()
text = data['request'].get('command', '').strip()
payload = data['... |
Convert labels to one - hot vectors for multi - class multi - label classification | def labels2onehot(labels: [List[str], List[List[str]], np.ndarray], classes: [list, np.ndarray]) -> np.ndarray:
"""
Convert labels to one-hot vectors for multi-class multi-label classification
Args:
labels: list of samples where each sample is a class or a list of classes which sample belongs with... |
Convert vectors of probabilities to labels using confident threshold ( if probability to belong with the class is bigger than confident_threshold sample belongs with the class ; if no probabilities bigger than confident threshold sample belongs with the class with the biggest probability ) | def proba2labels(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> List[List]:
"""
Convert vectors of probabilities to labels using confident threshold
(if probability to belong with the class is bigger than confident_threshold, sample belongs with the class;
if no ... |
Convert vectors of probabilities to one - hot representations using confident threshold | def proba2onehot(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> np.ndarray:
"""
Convert vectors of probabilities to one-hot representations using confident threshold
Args:
proba: samples where each sample is a vector of probabilities to belong with given cla... |
Configure session for particular device | def _config_session():
"""
Configure session for particular device
Returns:
tensorflow.Session
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = '0'
return tf.Session(config=config) |
Process event after epoch Args: event_name: whether event is send after epoch or batch. Set of values: after_epoch after_batch data: event data ( dictionary ) | def process_event(self, event_name: str, data: dict) -> None:
"""
Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
... |
Checks existence of the model file loads the model if the file exists | def load(self) -> None:
"""Checks existence of the model file, loads the model if the file exists"""
# Checks presence of the model files
if self.load_path.exists():
path = str(self.load_path.resolve())
log.info('[loading model from {}]'.format(path))
self._n... |
Saves model to the save_path provided in config. The directory is already created by super (). __init__ which is called in __init__ of this class | def save(self) -> None:
"""Saves model to the save_path, provided in config. The directory is
already created by super().__init__, which is called in __init__ of this class"""
path = str(self.save_path.absolute())
log.info('[saving model to {}]'.format(path))
self._net.save(path) |
Trains the model on a single batch. | def train_on_batch(self, *args) -> None:
"""Trains the model on a single batch.
Args:
*args: the list of network inputs.
Last element of `args` is the batch of targets,
all previous elements are training data batches
"""
*data, labels = args
s... |
Extract values of momentum variables from optimizer | def get_momentum_variable(self):
"""
Extract values of momentum variables from optimizer
Returns:
optimizer's `rho` or `beta_1`
"""
optimizer = self.get_optimizer()
if hasattr(optimizer, 'rho'):
return optimizer.rho
elif hasattr(optimizer,... |
Update graph variables setting giving learning_rate and momentum | def _update_graph_variables(self, learning_rate: float = None, momentum: float = None):
"""
Update graph variables setting giving `learning_rate` and `momentum`
Args:
learning_rate: learning rate value to be set in graph (set if not None)
momentum: momentum value to be s... |
Process event after epoch Args: event_name: whether event is send after epoch or batch. Set of values: after_epoch after_batch data: event data ( dictionary ) | def process_event(self, event_name: str, data: dict):
"""
Process event after epoch
Args:
event_name: whether event is send after epoch or batch.
Set of values: ``"after_epoch", "after_batch"``
data: event data (dictionary)
Returns:
... |
Calculates F1 ( binary ) measure. | def round_f1(y_true, y_predicted):
"""
Calculates F1 (binary) measure.
Args:
y_true: list of true values
y_predicted: list of predicted values
Returns:
F1 score
"""
try:
predictions = [np.round(x) for x in y_predicted]
except TypeError:
predictions =... |
Calculates F1 macro measure. | def round_f1_macro(y_true, y_predicted):
"""
Calculates F1 macro measure.
Args:
y_true: list of true values
y_predicted: list of predicted values
Returns:
F1 score
"""
try:
predictions = [np.round(x) for x in y_predicted]
except TypeError:
prediction... |
Converts word to a tuple of symbols optionally converts it to lowercase and adds capitalization label. | def process_word(word: str, to_lower: bool = False,
append_case: Optional[str] = None) -> Tuple[str]:
"""Converts word to a tuple of symbols, optionally converts it to lowercase
and adds capitalization label.
Args:
word: input word
to_lower: whether to lowercase
app... |
Number of convolutional layers stacked on top of each other | def stacked_cnn(units: tf.Tensor,
n_hidden_list: List,
filter_width=3,
use_batch_norm=False,
use_dilation=False,
training_ph=None,
add_l2_losses=False):
""" Number of convolutional layers stacked on top of each other
... |
Densely connected convolutional layers. Based on the paper: [ Gao 17 ] https:// arxiv. org/ abs/ 1608. 06993 | def dense_convolutional_network(units: tf.Tensor,
n_hidden_list: List,
filter_width=3,
use_dilation=False,
use_batch_norm=False,
training_ph=None):
""" Dens... |
Bi directional recurrent neural network. GRU or LSTM | def bi_rnn(units: tf.Tensor,
n_hidden: List,
cell_type='gru',
seq_lengths=None,
trainable_initial_states=False,
use_peepholes=False,
name='Bi-'):
""" Bi directional recurrent neural network. GRU or LSTM
Args:
units: a tensorflow ... |
Stackted recurrent neural networks GRU or LSTM | def stacked_bi_rnn(units: tf.Tensor,
n_hidden_list: List,
cell_type='gru',
seq_lengths=None,
use_peepholes=False,
name='RNN_layer'):
""" Stackted recurrent neural networks GRU or LSTM
Args:
units: a t... |
Network architecture inspired by One Hundred layer Tiramisu. https:// arxiv. org/ abs/ 1611. 09326. U - Net like. | def u_shape(units: tf.Tensor,
n_hidden_list: List,
filter_width=7,
use_batch_norm=False,
training_ph=None):
""" Network architecture inspired by One Hundred layer Tiramisu.
https://arxiv.org/abs/1611.09326. U-Net like.
Args:
units: a tenso... |
Highway convolutional network. Skip connection with gating mechanism. | def stacked_highway_cnn(units: tf.Tensor,
n_hidden_list: List,
filter_width=3,
use_batch_norm=False,
use_dilation=False,
training_ph=None):
""" Highway convolutional network. Skip connection with ... |
Token embedding layer. Create matrix of for token embeddings. Can be initialized with given matrix ( for example pre - trained with word2ve algorithm | def embedding_layer(token_indices=None,
token_embedding_matrix=None,
n_tokens=None,
token_embedding_dim=None,
name: str = None,
trainable=True):
""" Token embedding layer. Create matrix of for token embeddings.
... |
Characters to vector. Every sequence of characters ( token ) is embedded to vector space with dimensionality char_embedding_dim Convolution plus max_pooling is used to obtain vector representations of words. | def character_embedding_network(char_placeholder: tf.Tensor,
n_characters: int = None,
emb_mat: np.array = None,
char_embedding_dim: int = None,
filter_widths=(3, 4, 5, 7),
... |
Expand and tile tensor along given axis Args: units: tf tensor with dimensions [ batch_size time_steps n_input_features ] axis: axis along which expand and tile. Must be 1 or 2 | def expand_tile(units, axis):
"""Expand and tile tensor along given axis
Args:
units: tf tensor with dimensions [batch_size, time_steps, n_input_features]
axis: axis along which expand and tile. Must be 1 or 2
"""
assert axis in (1, 2)
n_time_steps = tf.shape(units)[1]
repetitio... |
Computes additive self attention for time series of vectors ( with batch dimension ) the formula: score ( h_i h_j ) = <v tanh ( W_1 h_i + W_2 h_j ) > v is a learnable vector of n_hidden dimensionality W_1 and W_2 are learnable [ n_hidden n_input_features ] matrices | def additive_self_attention(units, n_hidden=None, n_output_features=None, activation=None):
""" Computes additive self attention for time series of vectors (with batch dimension)
the formula: score(h_i, h_j) = <v, tanh(W_1 h_i + W_2 h_j)>
v is a learnable vector of n_hidden dimensionality,
W... |
Computes multiplicative self attention for time series of vectors ( with batch dimension ) the formula: score ( h_i h_j ) = <W_1 h_i W_2 h_j > W_1 and W_2 are learnable matrices with dimensionality [ n_hidden n_input_features ] where <a b > stands for a and b dot product | def multiplicative_self_attention(units, n_hidden=None, n_output_features=None, activation=None):
""" Computes multiplicative self attention for time series of vectors (with batch dimension)
the formula: score(h_i, h_j) = <W_1 h_i, W_2 h_j>, W_1 and W_2 are learnable matrices
with dimensionality [... |
Fast CuDNN GRU implementation | def cudnn_gru(units, n_hidden, n_layers=1, trainable_initial_states=False,
seq_lengths=None, input_initial_h=None, name='cudnn_gru', reuse=False):
""" Fast CuDNN GRU implementation
Args:
units: tf.Tensor with dimensions [B x T x F], where
B - batch size
T - number ... |
CuDNN Compatible GRU implementation. It should be used to load models saved with CudnnGRUCell to run on CPU. | def cudnn_compatible_gru(units, n_hidden, n_layers=1, trainable_initial_states=False,
seq_lengths=None, input_initial_h=None, name='cudnn_gru', reuse=False):
""" CuDNN Compatible GRU implementation.
It should be used to load models saved with CudnnGRUCell to run on CPU.
Arg... |
Fast CuDNN LSTM implementation | def cudnn_lstm(units, n_hidden, n_layers=1, trainable_initial_states=None, seq_lengths=None, initial_h=None,
initial_c=None, name='cudnn_lstm', reuse=False):
""" Fast CuDNN LSTM implementation
Args:
units: tf.Tensor with dimensions [B x T x F], where
B - batch siz... |
CuDNN Compatible LSTM implementation. It should be used to load models saved with CudnnLSTMCell to run on CPU. | def cudnn_compatible_lstm(units, n_hidden, n_layers=1, trainable_initial_states=None, seq_lengths=None, initial_h=None,
initial_c=None, name='cudnn_lstm', reuse=False):
""" CuDNN Compatible LSTM implementation.
It should be used to load models saved with CudnnLSTMCell to run on CPU... |
Fast CuDNN Bi - GRU implementation | def cudnn_bi_gru(units,
n_hidden,
seq_lengths=None,
n_layers=1,
trainable_initial_states=False,
name='cudnn_bi_gru',
reuse=False):
""" Fast CuDNN Bi-GRU implementation
Args:
units: tf.Tensor with dimen... |
Fast CuDNN Bi - LSTM implementation | def cudnn_bi_lstm(units,
n_hidden,
seq_lengths=None,
n_layers=1,
trainable_initial_states=False,
name='cudnn_bi_gru',
reuse=False):
""" Fast CuDNN Bi-LSTM implementation
Args:
units: tf.Tensor wi... |
Fast CuDNN Stacked Bi - GRU implementation | def cudnn_stacked_bi_gru(units,
n_hidden,
seq_lengths=None,
n_stacks=2,
keep_prob=1.0,
concat_stacked_outputs=False,
trainable_initial_states=False,
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.