_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q266900
get_activation_by_name
test
def get_activation_by_name(activation_name, activation_id=None): """ Convert to a bigdl activation layer given the name of the activation as a string """ import bigdl.nn.layer as BLayer activation = None activation_name = activation_name.lower() if activation_name == "tanh": activation = BLayer.Tanh() elif activation_name == "sigmoid": activation = BLayer.Sigmoid() elif activation_name == "hard_sigmoid": activation = BLayer.HardSigmoid() elif activation_name == "relu": activation = BLayer.ReLU() elif activation_name == "softmax": activation = BLayer.SoftMax() elif activation_name == "softplus": activation = BLayer.SoftPlus(beta=1.0) elif activation_name == "softsign": activation = BLayer.SoftSign() elif activation_name == "linear": activation = BLayer.Identity() else: raise Exception("Unsupported activation type: %s" % activation_name) if not activation_id: activation.set_name(activation_id) return activation
python
{ "resource": "" }
q266901
JTensor.from_ndarray
test
def from_ndarray(cls, a_ndarray, bigdl_type="float"): """ Convert a ndarray to a DenseTensor which would be used in Java side. >>> import numpy as np >>> from bigdl.util.common import JTensor >>> from bigdl.util.common import callBigDlFunc >>> np.random.seed(123) >>> data = np.random.uniform(0, 1, (2, 3)).astype("float32") >>> result = JTensor.from_ndarray(data) >>> expected_storage = np.array([[0.69646919, 0.28613934, 0.22685145], [0.55131477, 0.71946895, 0.42310646]]) >>> expected_shape = np.array([2, 3]) >>> np.testing.assert_allclose(result.storage, expected_storage, rtol=1e-6, atol=1e-6) >>> np.testing.assert_allclose(result.shape, expected_shape) >>> data_back = result.to_ndarray() >>> (data == data_back).all() True >>> tensor1 = callBigDlFunc("float", "testTensor", JTensor.from_ndarray(data)) # noqa >>> array_from_tensor = tensor1.to_ndarray() >>> (array_from_tensor == data).all() True """ if a_ndarray is None: return None assert isinstance(a_ndarray, np.ndarray), \ "input should be a np.ndarray, not %s" % type(a_ndarray) return cls(a_ndarray, a_ndarray.shape if a_ndarray.shape else (a_ndarray.size), bigdl_type)
python
{ "resource": "" }
q266902
ImageFeature.get_label
test
def get_label(self): """ get label as ndarray from ImageFeature """ label = callBigDlFunc(self.bigdl_type, "imageFeatureToLabelTensor", self.value) return label.to_ndarray()
python
{ "resource": "" }
q266903
ImageFrame.read_parquet
test
def read_parquet(cls, path, sc, bigdl_type="float"): """ Read parquet file as DistributedImageFrame """ return DistributedImageFrame(jvalue=callBigDlFunc(bigdl_type, "readParquet", path, sc))
python
{ "resource": "" }
q266904
ImageFrame.write_parquet
test
def write_parquet(cls, path, output, sc, partition_num = 1, bigdl_type="float"): """ write ImageFrame as parquet file """ return callBigDlFunc(bigdl_type, "writeParquet", path, output, sc, partition_num)
python
{ "resource": "" }
q266905
ImageFrame.get_image
test
def get_image(self, float_key="floats", to_chw=True): """ get image from ImageFrame """ return self.image_frame.get_image(float_key, to_chw)
python
{ "resource": "" }
q266906
LocalImageFrame.get_image
test
def get_image(self, float_key="floats", to_chw=True): """ get image list from ImageFrame """ tensors = callBigDlFunc(self.bigdl_type, "localImageFrameToImageTensor", self.value, float_key, to_chw) return map(lambda tensor: tensor.to_ndarray(), tensors)
python
{ "resource": "" }
q266907
DistributedImageFrame.get_label
test
def get_label(self): """ get label rdd from ImageFrame """ tensor_rdd = callBigDlFunc(self.bigdl_type, "distributedImageFrameToLabelTensorRdd", self.value) return tensor_rdd.map(lambda tensor: tensor.to_ndarray())
python
{ "resource": "" }
q266908
DistributedImageFrame.get_predict
test
def get_predict(self, key="predict"): """ get prediction rdd from ImageFrame """ predicts = callBigDlFunc(self.bigdl_type, "distributedImageFrameToPredict", self.value, key) return predicts.map(lambda predict: (predict[0], predict[1].to_ndarray()) if predict[1] else (predict[0], None))
python
{ "resource": "" }
q266909
KerasModelWrapper.predict
test
def predict(self, x, batch_size=None, verbose=None, is_distributed=False): """Generates output predictions for the input samples, processing the samples in a batched way. # Arguments x: the input data, as a Numpy array or list of Numpy array for local mode. as RDD[Sample] for distributed mode is_distributed: used to control run in local or cluster. the default value is False # Returns A Numpy array or RDD[Sample] of predictions. """ if batch_size or verbose: raise Exception("we don't support batch_size or verbose for now") if is_distributed: if isinstance(x, np.ndarray): input = to_sample_rdd(x, np.zeros([x.shape[0]])) # np.asarray(self.bmodel.predict(x_rdd).collect()) elif isinstance(x, RDD): input = x return self.bmodel.predict(input) else: if isinstance(x, np.ndarray): return self.bmodel.predict_local(x) raise Exception("not supported type: %s" % x)
python
{ "resource": "" }
q266910
KerasModelWrapper.fit
test
def fit(self, x, y=None, batch_size=32, nb_epoch=10, verbose=1, callbacks=None, validation_split=0., validation_data=None, shuffle=True, class_weight=None, sample_weight=None, initial_epoch=0, is_distributed=False): """Optimize the model by the given options :param x: ndarray or list of ndarray for local mode. RDD[Sample] for distributed mode :param y: ndarray or list of ndarray for local mode and would be None for cluster mode. is_distributed: used to control run in local or cluster. the default value is False. NB: if is_distributed=true, x should be RDD[Sample] and y should be None :param is_distributed: Whether to train in local mode or distributed mode :return: A Numpy array or RDD[Sample] of predictions. """ if callbacks: raise Exception("We don't support callbacks in fit for now") if class_weight: unsupport_exp("class_weight") if sample_weight: unsupport_exp("sample_weight") if initial_epoch != 0: unsupport_exp("initial_epoch") if shuffle != True: unsupport_exp("shuffle") if validation_split != 0.: unsupport_exp("validation_split") bopt = self.__create_optimizer(x=x, y=y, batch_size=batch_size, nb_epoch=nb_epoch, validation_data=validation_data, is_distributed=is_distributed) bopt.optimize()
python
{ "resource": "" }
q266911
DLImageTransformer.transform
test
def transform(self, dataset): """ Apply the transformer to the images in "inputCol" and store the transformed result into "outputCols" """ self._transfer_params_to_java() return callBigDlFunc(self.bigdl_type, "dlImageTransform", self.value, dataset)
python
{ "resource": "" }
q266912
save_keras_definition
test
def save_keras_definition(keras_model, path): """ Save a Keras model definition to JSON with given path """ model_json = keras_model.to_json() with open(path, "w") as json_file: json_file.write(model_json)
python
{ "resource": "" }
q266913
build_keras_model
test
def build_keras_model(): """ Define a convnet model in Keras 1.2.2 """ from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D keras_model = Sequential() keras_model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=input_shape)) keras_model.add(Activation('relu')) keras_model.add(Convolution2D(32, 3, 3)) keras_model.add(Activation('relu')) keras_model.add(MaxPooling2D(pool_size=(2, 2))) keras_model.add(Dropout(0.25)) keras_model.add(Flatten()) keras_model.add(Dense(128)) keras_model.add(Activation('relu')) keras_model.add(Dropout(0.5)) keras_model.add(Dense(10)) keras_model.add(Activation('softmax')) return keras_model
python
{ "resource": "" }
q266914
Layer.predict_class_distributed
test
def predict_class_distributed(self, data_rdd): """ module predict, return the predict label :param data_rdd: the data to be predict. :return: An RDD represent the predict label. """ result = callBigDlFunc(self.bigdl_type, "modelPredictClass", self.value, data_rdd) return result
python
{ "resource": "" }
q266915
Layer.set_weights
test
def set_weights(self, weights): """ Set weights for this layer :param weights: a list of numpy arrays which represent weight and bias :return: >>> linear = Linear(3,2) creating: createLinear >>> linear.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])]) >>> weights = linear.get_weights() >>> weights[0].shape == (2,3) True >>> np.testing.assert_allclose(weights[0][0], np.array([1., 2., 3.])) >>> np.testing.assert_allclose(weights[1], np.array([7., 8.])) >>> relu = ReLU() creating: createReLU >>> from py4j.protocol import Py4JJavaError >>> try: ... relu.set_weights([np.array([[1,2,3],[4,5,6]]), np.array([7,8])]) ... except Py4JJavaError as err: ... print(err.java_exception) ... java.lang.IllegalArgumentException: requirement failed: this layer does not have weight/bias >>> relu.get_weights() The layer does not have weight/bias >>> add = Add(2) creating: createAdd >>> try: ... add.set_weights([np.array([7,8]), np.array([1,2])]) ... except Py4JJavaError as err: ... print(err.java_exception) ... java.lang.IllegalArgumentException: requirement failed: the number of input weight/bias is not consistant with number of weight/bias of this layer, number of input 1, number of output 2 >>> cAdd = CAdd([4, 1]) creating: createCAdd >>> cAdd.set_weights(np.ones([4, 1])) >>> (cAdd.get_weights()[0] == np.ones([4, 1])).all() True """ tensors = [JTensor.from_ndarray(param, self.bigdl_type) for param in to_list(weights)] callBigDlFunc(self.bigdl_type, "setWeights", self.value, tensors)
python
{ "resource": "" }
q266916
Layer.get_weights
test
def get_weights(self): """ Get weights for this layer :return: list of numpy arrays which represent weight and bias """ tensorWeights = callBigDlFunc(self.bigdl_type, "getWeights", self.value) if tensorWeights is not None: return [tensor.to_ndarray() for tensor in tensorWeights] else: print("The layer does not have weight/bias") return None
python
{ "resource": "" }
q266917
Layer.save_tensorflow
test
def save_tensorflow(self, inputs, path, byte_order="little_endian", data_format="nhwc"): """ Save a model to protobuf files so that it can be used in tensorflow inference. When saving the model, placeholders will be added to the tf model as input nodes. So you need to pass in the names and shapes of the placeholders. BigDL model doesn't have such information. The order of the placeholder information should be same as the inputs of the graph model. :param inputs: placeholder information, should be an array of tuples (input_name, shape) where 'input_name' is a string and shape is an array of integer :param path: the path to be saved to :param byte_order: model byte order :param data_format: model data format, should be "nhwc" or "nchw" """ callBigDlFunc(self.bigdl_type, "saveTF", self.value, inputs, path, byte_order, data_format)
python
{ "resource": "" }
q266918
Layer.training
test
def training(self, is_training=True): ''' Set this layer in the training mode or in predition mode if is_training=False ''' if is_training: callJavaFunc(self.value.training) else: callJavaFunc(self.value.evaluate) return self
python
{ "resource": "" }
q266919
Model.load_torch
test
def load_torch(path, bigdl_type="float"): """ Load a pre-trained Torch model. :param path: The path containing the pre-trained model. :return: A pre-trained model. """ jmodel = callBigDlFunc(bigdl_type, "loadTorch", path) return Layer.of(jmodel)
python
{ "resource": "" }
q266920
Model.load_keras
test
def load_keras(json_path=None, hdf5_path=None, by_name=False): """ Load a pre-trained Keras model. :param json_path: The json path containing the keras model definition. :param hdf5_path: The HDF5 path containing the pre-trained keras model weights with or without the model architecture. :return: A bigdl model. """ import os try: import tensorflow except ImportError: os.environ['KERAS_BACKEND'] = "theano" try: # Make theano backend compatible with Python3 from theano import ifelse except ImportError: raise Exception("No backend is found for Keras. " "Please install either tensorflow or theano.") from bigdl.keras.converter import DefinitionLoader, WeightLoader if json_path and not hdf5_path: return DefinitionLoader.from_json_path(json_path) elif json_path and hdf5_path: return WeightLoader.load_weights_from_json_hdf5(json_path, hdf5_path, by_name=by_name) elif hdf5_path and not json_path: kmodel, bmodel = DefinitionLoader.from_hdf5_path(hdf5_path) WeightLoader.load_weights_from_kmodel(bmodel, kmodel) return bmodel
python
{ "resource": "" }
q266921
Criterion.of
test
def of(cls, jcriterion, bigdl_type="float"): """ Create a python Criterion by a java criterion object :param jcriterion: A java criterion object which created by Py4j :return: a criterion. """ criterion = Criterion(bigdl_type, jcriterion) criterion.value = jcriterion criterion.bigdl_type = bigdl_type return criterion
python
{ "resource": "" }
q266922
WeightLoader.load_weights_from_json_hdf5
test
def load_weights_from_json_hdf5(def_json, weights_hdf5, by_name=False): """ The file path can be stored in a local file system, HDFS, S3, or any Hadoop-supported file system. """ bmodel = DefinitionLoader.from_json_path(def_json) def_value = BCommon.text_from_path(def_json) kmodel = model_from_json(def_value) WeightLoader.load_weights_from_hdf5(bmodel, kmodel, weights_hdf5, by_name) return bmodel
python
{ "resource": "" }
q266923
load_imdb
test
def load_imdb(): """ Load IMDB dataset Transform input data into an RDD of Sample """ from keras.preprocessing import sequence from keras.datasets import imdb (X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=20000) X_train = sequence.pad_sequences(X_train, maxlen=100) X_test = sequence.pad_sequences(X_test, maxlen=100) return X_train, y_train, X_test, y_test
python
{ "resource": "" }
q266924
build_keras_model
test
def build_keras_model(): """ Define a recurrent convolutional model in Keras 1.2.2 """ from keras.models import Sequential from keras.layers import Dense, Dropout, Activation from keras.layers import Embedding from keras.layers import LSTM from keras.layers import Convolution1D, MaxPooling1D keras_model = Sequential() keras_model.add(Embedding(20000, 128, input_length=100)) keras_model.add(Dropout(0.25)) keras_model.add(Convolution1D(nb_filter=64, filter_length=5, border_mode='valid', activation='relu', subsample_length=1)) keras_model.add(MaxPooling1D(pool_length=4)) keras_model.add(LSTM(70)) keras_model.add(Dense(1)) keras_model.add(Activation('sigmoid')) return keras_model
python
{ "resource": "" }
q266925
InferShape.get_input_shape
test
def get_input_shape(self): """ Return a list of shape tuples if there are multiple inputs. Return one shape tuple otherwise. """ input = callBigDlFunc(self.bigdl_type, "getInputShape", self.value) return self.__process_shape(input)
python
{ "resource": "" }
q266926
InferShape.get_output_shape
test
def get_output_shape(self): """ Return a list of shape tuples if there are multiple outputs. Return one shape tuple otherwise. """ output = callBigDlFunc(self.bigdl_type, "getOutputShape", self.value) return self.__process_shape(output)
python
{ "resource": "" }
q266927
get_mnist
test
def get_mnist(data_type="train", location="/tmp/mnist"): """ Get mnist dataset with features and label as ndarray. Data would be downloaded automatically if it doesn't present at the specific location. :param data_type: "train" for training data and "test" for testing data. :param location: Location to store mnist dataset. :return: (features: ndarray, label: ndarray) """ X, Y = mnist.read_data_sets(location, data_type) return X, Y + 1
python
{ "resource": "" }
q266928
read_data_sets
test
def read_data_sets(data_dir): """ Parse or download movielens 1m data if train_dir is empty. :param data_dir: The directory storing the movielens data :return: a 2D numpy array with user index and item index in each row """ WHOLE_DATA = 'ml-1m.zip' local_file = base.maybe_download(WHOLE_DATA, data_dir, SOURCE_URL + WHOLE_DATA) zip_ref = zipfile.ZipFile(local_file, 'r') extracted_to = os.path.join(data_dir, "ml-1m") if not os.path.exists(extracted_to): print("Extracting %s to %s" % (local_file, data_dir)) zip_ref.extractall(data_dir) zip_ref.close() rating_files = os.path.join(extracted_to,"ratings.dat") rating_list = [i.strip().split("::") for i in open(rating_files,"r").readlines()] movielens_data = np.array(rating_list).astype(int) return movielens_data
python
{ "resource": "" }
q266929
get_bigdl_classpath
test
def get_bigdl_classpath(): """ Get and return the jar path for bigdl if exists. """ if os.getenv("BIGDL_CLASSPATH"): return os.environ["BIGDL_CLASSPATH"] jar_dir = os.path.abspath(__file__ + "/../../") jar_paths = glob.glob(os.path.join(jar_dir, "share/lib/*.jar")) if jar_paths: assert len(jar_paths) == 1, "Expecting one jar: %s" % len(jar_paths) return jar_paths[0] return ""
python
{ "resource": "" }
q266930
is_spark_below_2_2
test
def is_spark_below_2_2(): """ Check if spark version is below 2.2 """ import pyspark if(hasattr(pyspark,"version")): full_version = pyspark.version.__version__ # We only need the general spark version (eg, 1.6, 2.2). parts = full_version.split(".") spark_version = parts[0] + "." + parts[1] if(compare_version(spark_version, "2.2")>=0): return False return True
python
{ "resource": "" }
q266931
export_checkpoint
test
def export_checkpoint(checkpoint_path): """ Export variable tensors from the checkpoint files. :param checkpoint_path: tensorflow checkpoint path :return: dictionary of tensor. The key is the variable name and the value is the numpy """ reader = tf.train.NewCheckpointReader(checkpoint_path) # Get tensor name list tensor_names = filter(lambda n: n!='global_step', reader.get_variable_to_shape_map().keys()) # Prepare key-value dictionary tensors = {} for tn in tensor_names: tensors[tn] = reader.get_tensor(tn) return tensors
python
{ "resource": "" }
q266932
save_variable_bigdl
test
def save_variable_bigdl(tensors, target_path, bigdl_type="float"): """ Save a variable dictionary to a Java object file, so it can be read by BigDL :param tensors: tensor dictionary :param target_path: where is the Java object file store :param bigdl_type: model variable numeric type :return: nothing """ import numpy as np jtensors = {} for tn in tensors.keys(): if not isinstance(tensors[tn], np.ndarray): value = np.array(tensors[tn]) else: value = tensors[tn] jtensors[tn] = JTensor.from_ndarray(value) callBigDlFunc(bigdl_type, "saveTensorDictionary", jtensors, target_path)
python
{ "resource": "" }
q266933
expand_tile
test
def expand_tile(units, axis): """ Expand and tile tensor along given axis Args: units: tf tensor with dimensions [batch_size, time_steps, n_input_features] axis: axis along which expand and tile. Must be 1 or 2 """ assert axis in (1, 2) n_time_steps = K.int_shape(units)[1] repetitions = [1, 1, 1, 1] repetitions[axis] = n_time_steps if axis == 1: expanded = Reshape(target_shape=( (1,) + K.int_shape(units)[1:] ))(units) else: expanded = Reshape(target_shape=(K.int_shape(units)[1:2] + (1,) + K.int_shape(units)[2:]))(units) return K.tile(expanded, repetitions)
python
{ "resource": "" }
q266934
precompute_future_symbols
test
def precompute_future_symbols(trie, n, allow_spaces=False): """ Collecting possible continuations of length <= n for every node """ if n == 0: return if trie.is_terminated and trie.precompute_symbols: # символы уже предпосчитаны return for index, final in enumerate(trie.final): trie.data[index] = [set() for i in range(n)] for index, (node_data, final) in enumerate(zip(trie.data, trie.final)): node_data[0] = set(trie._get_letters(index)) if allow_spaces and final: node_data[0].add(" ") for d in range(1, n): for index, (node_data, final) in enumerate(zip(trie.data, trie.final)): children = set(trie._get_children(index)) for child in children: node_data[d] |= trie.data[child][d - 1] # в случае, если разрешён возврат по пробелу в стартовое состояние if allow_spaces and final: node_data[d] |= trie.data[trie.root][d - 1] trie.terminated = True
python
{ "resource": "" }
q266935
simple_attention
test
def simple_attention(memory, att_size, mask, keep_prob=1.0, scope="simple_attention"): """Simple attention without any conditions. Computes weighted sum of memory elements. """ with tf.variable_scope(scope): BS, ML, MH = tf.unstack(tf.shape(memory)) memory_do = tf.nn.dropout(memory, keep_prob=keep_prob, noise_shape=[BS, 1, MH]) logits = tf.layers.dense(tf.layers.dense(memory_do, att_size, activation=tf.nn.tanh), 1, use_bias=False) logits = softmax_mask(tf.squeeze(logits, [2]), mask) att_weights = tf.expand_dims(tf.nn.softmax(logits), axis=2) res = tf.reduce_sum(att_weights * memory, axis=1) return res
python
{ "resource": "" }
q266936
attention
test
def attention(inputs, state, att_size, mask, scope="attention"): """Computes weighted sum of inputs conditioned on state""" with tf.variable_scope(scope): u = tf.concat([tf.tile(tf.expand_dims(state, axis=1), [1, tf.shape(inputs)[1], 1]), inputs], axis=2) logits = tf.layers.dense(tf.layers.dense(u, att_size, activation=tf.nn.tanh), 1, use_bias=False) logits = softmax_mask(tf.squeeze(logits, [2]), mask) att_weights = tf.expand_dims(tf.nn.softmax(logits), axis=2) res = tf.reduce_sum(att_weights * inputs, axis=1) return res, logits
python
{ "resource": "" }
q266937
compute_bleu
test
def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False): """Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram precisions and brevity penalty. """ matches_by_order = [0] * max_order possible_matches_by_order = [0] * max_order reference_length = 0 translation_length = 0 for (references, translation) in zip(reference_corpus, translation_corpus): reference_length += min(len(r) for r in references) translation_length += len(translation) merged_ref_ngram_counts = collections.Counter() for reference in references: merged_ref_ngram_counts |= _get_ngrams(reference, max_order) translation_ngram_counts = _get_ngrams(translation, max_order) overlap = translation_ngram_counts & merged_ref_ngram_counts for ngram in overlap: matches_by_order[len(ngram)-1] += overlap[ngram] for order in range(1, max_order+1): possible_matches = len(translation) - order + 1 if possible_matches > 0: possible_matches_by_order[order-1] += possible_matches precisions = [0] * max_order for i in range(0, max_order): if smooth: precisions[i] = ((matches_by_order[i] + 1.) / (possible_matches_by_order[i] + 1.)) else: if possible_matches_by_order[i] > 0: precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i]) else: precisions[i] = 0.0 if min(precisions) > 0: p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions) geo_mean = math.exp(p_log_sum) else: geo_mean = 0 ratio = float(translation_length) / reference_length if ratio > 1.0: bp = 1. else: bp = math.exp(1 - 1. / ratio) bleu = geo_mean * bp return (bleu, precisions, bp, ratio, translation_length, reference_length)
python
{ "resource": "" }
q266938
DialogLogger._get_log_file
test
def _get_log_file(self): """Returns opened file object for writing dialog logs. Returns: log_file: opened Python file object. """ log_dir: Path = Path(self.config['log_path']).expanduser().resolve() / self.agent_name log_dir.mkdir(parents=True, exist_ok=True) log_file_path = Path(log_dir, f'{self._get_timestamp_utc_str()}_{self.agent_name}.log') log_file = open(log_file_path, 'a', buffering=1, encoding='utf8') return log_file
python
{ "resource": "" }
q266939
DialogLogger._log
test
def _log(self, utterance: Any, direction: str, dialog_id: Optional[Hashable]=None): """Logs single dialog utterance to current dialog log file. Args: utterance: Dialog utterance. direction: 'in' or 'out' utterance direction. dialog_id: Dialog ID. """ if isinstance(utterance, str): pass elif isinstance(utterance, RichMessage): utterance = utterance.json() elif isinstance(utterance, (list, dict)): utterance = jsonify_data(utterance) else: utterance = str(utterance) dialog_id = str(dialog_id) if not isinstance(dialog_id, str) else dialog_id if self.log_file.tell() >= self.log_max_size * 1024: self.log_file.close() self.log_file = self._get_log_file() else: try: log_msg = {} log_msg['timestamp'] = self._get_timestamp_utc_str() log_msg['dialog_id'] = dialog_id log_msg['direction'] = direction log_msg['message'] = utterance log_str = json.dumps(log_msg, ensure_ascii=self.config['ensure_ascii']) self.log_file.write(f'{log_str}\n') except IOError: log.error('Failed to write dialog log.')
python
{ "resource": "" }
q266940
summary_gradient_updates
test
def summary_gradient_updates(grads, opt, lr): """get summary ops for the magnitude of gradient updates""" # strategy: # make a dict of variable name -> [variable, grad, adagrad slot] vars_grads = {} for v in tf.trainable_variables(): vars_grads[v.name] = [v, None, None] for g, v in grads: vars_grads[v.name][1] = g vars_grads[v.name][2] = opt.get_slot(v, 'accumulator') # now make summaries ret = [] for vname, (v, g, a) in vars_grads.items(): if g is None: continue if isinstance(g, tf.IndexedSlices): # a sparse gradient - only take norm of params that are updated updates = lr * g.values if a is not None: updates /= tf.sqrt(tf.gather(a, g.indices)) else: updates = lr * g if a is not None: updates /= tf.sqrt(a) values_norm = tf.sqrt(tf.reduce_sum(v * v)) + 1.0e-7 updates_norm = tf.sqrt(tf.reduce_sum(updates * updates)) ret.append(tf.summary.scalar('UPDATE/' + vname.replace(":", "_"), updates_norm / values_norm)) return ret
python
{ "resource": "" }
q266941
dump_weights
test
def dump_weights(tf_save_dir, outfile, options): """ Dump the trained weights from a model to a HDF5 file. """ def _get_outname(tf_name): outname = re.sub(':0$', '', tf_name) outname = outname.lstrip('lm/') outname = re.sub('/rnn/', '/RNN/', outname) outname = re.sub('/multi_rnn_cell/', '/MultiRNNCell/', outname) outname = re.sub('/cell_', '/Cell', outname) outname = re.sub('/lstm_cell/', '/LSTMCell/', outname) if '/RNN/' in outname: if 'projection' in outname: outname = re.sub('projection/kernel', 'W_P_0', outname) else: outname = re.sub('/kernel', '/W_0', outname) outname = re.sub('/bias', '/B', outname) return outname ckpt_file = tf.train.latest_checkpoint(tf_save_dir) config = tf.ConfigProto(allow_soft_placement=True) with tf.Graph().as_default(): with tf.Session(config=config) as sess: with tf.variable_scope('lm'): LanguageModel(options, False) # Create graph # we use the "Saver" class to load the variables loader = tf.train.Saver() loader.restore(sess, ckpt_file) with h5py.File(outfile, 'w') as fout: for v in tf.trainable_variables(): if v.name.find('softmax') >= 0: # don't dump these continue outname = _get_outname(v.name) # print("Saving variable {0} with name {1}".format( # v.name, outname)) shape = v.get_shape().as_list() dset = fout.create_dataset(outname, shape, dtype='float32') values = sess.run([v])[0] dset[...] = values
python
{ "resource": "" }
q266942
read_data_by_config
test
def read_data_by_config(config: dict): """Read data by dataset_reader from specified config.""" dataset_config = config.get('dataset', None) if dataset_config: config.pop('dataset') ds_type = dataset_config['type'] if ds_type == 'classification': reader = {'class_name': 'basic_classification_reader'} iterator = {'class_name': 'basic_classification_iterator'} config['dataset_reader'] = {**dataset_config, **reader} config['dataset_iterator'] = {**dataset_config, **iterator} else: raise Exception("Unsupported dataset type: {}".format(ds_type)) try: reader_config = dict(config['dataset_reader']) except KeyError: raise ConfigError("No dataset reader is provided in the JSON config.") reader = get_model(reader_config.pop('class_name'))() data_path = reader_config.pop('data_path', '') if isinstance(data_path, list): data_path = [expand_path(x) for x in data_path] else: data_path = expand_path(data_path) return reader.read(data_path, **reader_config)
python
{ "resource": "" }
q266943
train_evaluate_model_from_config
test
def train_evaluate_model_from_config(config: Union[str, Path, dict], iterator: Union[DataLearningIterator, DataFittingIterator] = None, *, to_train: bool = True, evaluation_targets: Optional[Iterable[str]] = None, to_validate: Optional[bool] = None, download: bool = False, start_epoch_num: Optional[int] = None, recursive: bool = False) -> Dict[str, Dict[str, float]]: """Make training and evaluation of the model described in corresponding configuration file.""" config = parse_config(config) if download: deep_download(config) if to_train and recursive: for subconfig in get_all_elems_from_json(config['chainer'], 'config_path'): log.info(f'Training "{subconfig}"') train_evaluate_model_from_config(subconfig, download=False, recursive=True) import_packages(config.get('metadata', {}).get('imports', [])) if iterator is None: try: data = read_data_by_config(config) except ConfigError as e: to_train = False log.warning(f'Skipping training. {e.message}') else: iterator = get_iterator_from_config(config, data) if 'train' not in config: log.warning('Train config is missing. Populating with default values') train_config = config.get('train') if start_epoch_num is not None: train_config['start_epoch_num'] = start_epoch_num if 'evaluation_targets' not in train_config and ('validate_best' in train_config or 'test_best' in train_config): log.warning('"validate_best" and "test_best" parameters are deprecated.' ' Please, use "evaluation_targets" list instead') train_config['evaluation_targets'] = [] if train_config.pop('validate_best', True): train_config['evaluation_targets'].append('valid') if train_config.pop('test_best', True): train_config['evaluation_targets'].append('test') trainer_class = get_model(train_config.pop('class_name', 'nn_trainer')) trainer = trainer_class(config['chainer'], **train_config) if to_train: trainer.train(iterator) res = {} if iterator is not None: if to_validate is not None: if evaluation_targets is None: log.warning('"to_validate" parameter is deprecated and will be removed in future versions.' ' Please, use "evaluation_targets" list instead') evaluation_targets = ['test'] if to_validate: evaluation_targets.append('valid') else: log.warn('Both "evaluation_targets" and "to_validate" parameters are specified.' ' "to_validate" is deprecated and will be ignored') res = trainer.evaluate(iterator, evaluation_targets, print_reports=True) trainer.get_chainer().destroy() res = {k: v['metrics'] for k, v in res.items()} return res
python
{ "resource": "" }
q266944
interact_alice
test
def interact_alice(agent: Agent): """ Exchange messages between basic pipelines and the Yandex.Dialogs service. If the pipeline returns multiple values, only the first one is forwarded to Yandex. """ data = request.get_json() text = data['request'].get('command', '').strip() payload = data['request'].get('payload') session_id = data['session']['session_id'] user_id = data['session']['user_id'] message_id = data['session']['message_id'] dialog_id = DialogID(user_id, session_id) response = { 'response': { 'end_session': True, 'text': '' }, "session": { 'session_id': session_id, 'message_id': message_id, 'user_id': user_id }, 'version': '1.0' } agent_response: Union[str, RichMessage] = agent([payload or text], [dialog_id])[0] if isinstance(agent_response, RichMessage): response['response']['text'] = '\n'.join([j['content'] for j in agent_response.json() if j['type'] == 'plain_text']) else: response['response']['text'] = str(agent_response) return jsonify(response), 200
python
{ "resource": "" }
q266945
labels2onehot
test
def labels2onehot(labels: [List[str], List[List[str]], np.ndarray], classes: [list, np.ndarray]) -> np.ndarray: """ Convert labels to one-hot vectors for multi-class multi-label classification Args: labels: list of samples where each sample is a class or a list of classes which sample belongs with classes: array of classes' names Returns: 2d array with one-hot representation of given samples """ n_classes = len(classes) y = [] for sample in labels: curr = np.zeros(n_classes) if isinstance(sample, list): for intent in sample: if intent not in classes: log.warning('Unknown intent {} detected. Assigning no class'.format(intent)) else: curr[np.where(np.array(classes) == intent)[0]] = 1 else: curr[np.where(np.array(classes) == sample)[0]] = 1 y.append(curr) y = np.asarray(y) return y
python
{ "resource": "" }
q266946
proba2onehot
test
def proba2onehot(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> np.ndarray: """ Convert vectors of probabilities to one-hot representations using confident threshold Args: proba: samples where each sample is a vector of probabilities to belong with given classes confident_threshold: boundary of probability to belong with a class classes: array of classes' names Returns: 2d array with one-hot representation of given samples """ return labels2onehot(proba2labels(proba, confident_threshold, classes), classes)
python
{ "resource": "" }
q266947
KerasModel._config_session
test
def _config_session(): """ Configure session for particular device Returns: tensorflow.Session """ config = tf.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.visible_device_list = '0' return tf.Session(config=config)
python
{ "resource": "" }
q266948
KerasWrapper.load
test
def load(self) -> None: """Checks existence of the model file, loads the model if the file exists""" # Checks presence of the model files if self.load_path.exists(): path = str(self.load_path.resolve()) log.info('[loading model from {}]'.format(path)) self._net.load(path)
python
{ "resource": "" }
q266949
LRScheduledKerasModel.get_momentum_variable
test
def get_momentum_variable(self): """ Extract values of momentum variables from optimizer Returns: optimizer's `rho` or `beta_1` """ optimizer = self.get_optimizer() if hasattr(optimizer, 'rho'): return optimizer.rho elif hasattr(optimizer, 'beta_1'): return optimizer.beta_1 return None
python
{ "resource": "" }
q266950
LRScheduledKerasModel._update_graph_variables
test
def _update_graph_variables(self, learning_rate: float = None, momentum: float = None): """ Update graph variables setting giving `learning_rate` and `momentum` Args: learning_rate: learning rate value to be set in graph (set if not None) momentum: momentum value to be set in graph (set if not None) Returns: None """ if learning_rate is not None: K.set_value(self.get_learning_rate_variable(), learning_rate) # log.info(f"Learning rate = {learning_rate}") if momentum is not None: K.set_value(self.get_momentum_variable(), momentum)
python
{ "resource": "" }
q266951
round_f1_macro
test
def round_f1_macro(y_true, y_predicted): """ Calculates F1 macro measure. Args: y_true: list of true values y_predicted: list of predicted values Returns: F1 score """ try: predictions = [np.round(x) for x in y_predicted] except TypeError: predictions = y_predicted return f1_score(np.array(y_true), np.array(predictions), average="macro")
python
{ "resource": "" }
q266952
process_word
test
def process_word(word: str, to_lower: bool = False, append_case: Optional[str] = None) -> Tuple[str]: """Converts word to a tuple of symbols, optionally converts it to lowercase and adds capitalization label. Args: word: input word to_lower: whether to lowercase append_case: whether to add case mark ('<FIRST_UPPER>' for first capital and '<ALL_UPPER>' for all caps) Returns: a preprocessed word """ if all(x.isupper() for x in word) and len(word) > 1: uppercase = "<ALL_UPPER>" elif word[0].isupper(): uppercase = "<FIRST_UPPER>" else: uppercase = None if to_lower: word = word.lower() if word.isdigit(): answer = ["<DIGIT>"] elif word.startswith("http://") or word.startswith("www."): answer = ["<HTTP>"] else: answer = list(word) if to_lower and uppercase is not None: if append_case == "first": answer = [uppercase] + answer elif append_case == "last": answer = answer + [uppercase] return tuple(answer)
python
{ "resource": "" }
q266953
stacked_cnn
test
def stacked_cnn(units: tf.Tensor, n_hidden_list: List, filter_width=3, use_batch_norm=False, use_dilation=False, training_ph=None, add_l2_losses=False): """ Number of convolutional layers stacked on top of each other Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the ouput of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalization between layers use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ... training_ph: boolean placeholder determining whether is training phase now or not. It is used only for batch normalization to determine whether to use current batch average (std) or memory stored average (std) add_l2_losses: whether to add l2 losses on network kernels to tf.GraphKeys.REGULARIZATION_LOSSES or not Returns: units: tensor at the output of the last convolutional layer """ l2_reg = tf.nn.l2_loss if add_l2_losses else None for n_layer, n_hidden in enumerate(n_hidden_list): if use_dilation: dilation_rate = 2 ** n_layer else: dilation_rate = 1 units = tf.layers.conv1d(units, n_hidden, filter_width, padding='same', dilation_rate=dilation_rate, kernel_initializer=INITIALIZER(), kernel_regularizer=l2_reg) if use_batch_norm: assert training_ph is not None units = tf.layers.batch_normalization(units, training=training_ph) units = tf.nn.relu(units) return units
python
{ "resource": "" }
q266954
bi_rnn
test
def bi_rnn(units: tf.Tensor, n_hidden: List, cell_type='gru', seq_lengths=None, trainable_initial_states=False, use_peepholes=False, name='Bi-'): """ Bi directional recurrent neural network. GRU or LSTM Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden: list with number of hidden units at the ouput of each layer seq_lengths: length of sequences for different length sequences in batch can be None for maximum length as a length for every sample in the batch cell_type: 'lstm' or 'gru' trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros use_peepholes: whether to use peephole connections (only 'lstm' case affected) name: what variable_scope to use for the network parameters Returns: units: tensor at the output of the last recurrent layer with dimensionality [None, n_tokens, n_hidden_list[-1]] last_units: tensor of last hidden states for GRU and tuple of last hidden stated and last cell states for LSTM dimensionality of cell states and hidden states are similar and equal to [B x 2 * H], where B - batch size and H is number of hidden units """ with tf.variable_scope(name + '_' + cell_type.upper()): if cell_type == 'gru': forward_cell = tf.nn.rnn_cell.GRUCell(n_hidden, kernel_initializer=INITIALIZER()) backward_cell = tf.nn.rnn_cell.GRUCell(n_hidden, kernel_initializer=INITIALIZER()) if trainable_initial_states: initial_state_fw = tf.tile(tf.get_variable('init_fw_h', [1, n_hidden]), (tf.shape(units)[0], 1)) initial_state_bw = tf.tile(tf.get_variable('init_bw_h', [1, n_hidden]), (tf.shape(units)[0], 1)) else: initial_state_fw = initial_state_bw = None elif cell_type == 'lstm': forward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes, initializer=INITIALIZER()) backward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes, initializer=INITIALIZER()) if trainable_initial_states: initial_state_fw = tf.nn.rnn_cell.LSTMStateTuple( tf.tile(tf.get_variable('init_fw_c', [1, n_hidden]), (tf.shape(units)[0], 1)), tf.tile(tf.get_variable('init_fw_h', [1, n_hidden]), (tf.shape(units)[0], 1))) initial_state_bw = tf.nn.rnn_cell.LSTMStateTuple( tf.tile(tf.get_variable('init_bw_c', [1, n_hidden]), (tf.shape(units)[0], 1)), tf.tile(tf.get_variable('init_bw_h', [1, n_hidden]), (tf.shape(units)[0], 1))) else: initial_state_fw = initial_state_bw = None else: raise RuntimeError('cell_type must be either "gru" or "lstm"s') (rnn_output_fw, rnn_output_bw), (fw, bw) = \ tf.nn.bidirectional_dynamic_rnn(forward_cell, backward_cell, units, dtype=tf.float32, sequence_length=seq_lengths, initial_state_fw=initial_state_fw, initial_state_bw=initial_state_bw) kernels = [var for var in forward_cell.trainable_variables + backward_cell.trainable_variables if 'kernel' in var.name] for kernel in kernels: tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.nn.l2_loss(kernel)) return (rnn_output_fw, rnn_output_bw), (fw, bw)
python
{ "resource": "" }
q266955
stacked_bi_rnn
test
def stacked_bi_rnn(units: tf.Tensor, n_hidden_list: List, cell_type='gru', seq_lengths=None, use_peepholes=False, name='RNN_layer'): """ Stackted recurrent neural networks GRU or LSTM Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the ouput of each layer seq_lengths: length of sequences for different length sequences in batch can be None for maximum length as a length for every sample in the batch cell_type: 'lstm' or 'gru' use_peepholes: whether to use peephole connections (only 'lstm' case affected) name: what variable_scope to use for the network parameters Returns: units: tensor at the output of the last recurrent layer with dimensionality [None, n_tokens, n_hidden_list[-1]] last_units: tensor of last hidden states for GRU and tuple of last hidden stated and last cell states for LSTM dimensionality of cell states and hidden states are similar and equal to [B x 2 * H], where B - batch size and H is number of hidden units """ for n, n_hidden in enumerate(n_hidden_list): with tf.variable_scope(name + '_' + str(n)): if cell_type == 'gru': forward_cell = tf.nn.rnn_cell.GRUCell(n_hidden) backward_cell = tf.nn.rnn_cell.GRUCell(n_hidden) elif cell_type == 'lstm': forward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes) backward_cell = tf.nn.rnn_cell.LSTMCell(n_hidden, use_peepholes=use_peepholes) else: raise RuntimeError('cell_type must be either gru or lstm') (rnn_output_fw, rnn_output_bw), (fw, bw) = \ tf.nn.bidirectional_dynamic_rnn(forward_cell, backward_cell, units, dtype=tf.float32, sequence_length=seq_lengths) units = tf.concat([rnn_output_fw, rnn_output_bw], axis=2) if cell_type == 'gru': last_units = tf.concat([fw, bw], axis=1) else: (c_fw, h_fw), (c_bw, h_bw) = fw, bw c = tf.concat([c_fw, c_bw], axis=1) h = tf.concat([h_fw, h_bw], axis=1) last_units = (h, c) return units, last_units
python
{ "resource": "" }
q266956
stacked_highway_cnn
test
def stacked_highway_cnn(units: tf.Tensor, n_hidden_list: List, filter_width=3, use_batch_norm=False, use_dilation=False, training_ph=None): """ Highway convolutional network. Skip connection with gating mechanism. Args: units: a tensorflow tensor with dimensionality [None, n_tokens, n_features] n_hidden_list: list with number of hidden units at the output of each layer filter_width: width of the kernel in tokens use_batch_norm: whether to use batch normalization between layers use_dilation: use power of 2 dilation scheme [1, 2, 4, 8 .. ] for layers 1, 2, 3, 4 ... training_ph: boolean placeholder determining whether is training phase now or not. It is used only for batch normalization to determine whether to use current batch average (std) or memory stored average (std) Returns: units: tensor at the output of the last convolutional layer with dimensionality [None, n_tokens, n_hidden_list[-1]] """ for n_layer, n_hidden in enumerate(n_hidden_list): input_units = units # Projection if needed if input_units.get_shape().as_list()[-1] != n_hidden: input_units = tf.layers.dense(input_units, n_hidden) if use_dilation: dilation_rate = 2 ** n_layer else: dilation_rate = 1 units = tf.layers.conv1d(units, n_hidden, filter_width, padding='same', dilation_rate=dilation_rate, kernel_initializer=INITIALIZER()) if use_batch_norm: units = tf.layers.batch_normalization(units, training=training_ph) sigmoid_gate = tf.layers.dense(input_units, 1, activation=tf.sigmoid, kernel_initializer=INITIALIZER()) input_units = sigmoid_gate * input_units + (1 - sigmoid_gate) * units input_units = tf.nn.relu(input_units) units = input_units return units
python
{ "resource": "" }
q266957
embedding_layer
test
def embedding_layer(token_indices=None, token_embedding_matrix=None, n_tokens=None, token_embedding_dim=None, name: str = None, trainable=True): """ Token embedding layer. Create matrix of for token embeddings. Can be initialized with given matrix (for example pre-trained with word2ve algorithm Args: token_indices: token indices tensor of type tf.int32 token_embedding_matrix: matrix of embeddings with dimensionality [n_tokens, embeddings_dimension] n_tokens: total number of unique tokens token_embedding_dim: dimensionality of embeddings, typical 100..300 name: embedding matrix name (variable name) trainable: whether to set the matrix trainable or not Returns: embedded_tokens: tf tensor of size [B, T, E], where B - batch size T - number of tokens, E - token_embedding_dim """ if token_embedding_matrix is not None: tok_mat = token_embedding_matrix if trainable: Warning('Matrix of embeddings is passed to the embedding_layer, ' 'possibly there is a pre-trained embedding matrix. ' 'Embeddings paramenters are set to Trainable!') else: tok_mat = np.random.randn(n_tokens, token_embedding_dim).astype(np.float32) / np.sqrt(token_embedding_dim) tok_emb_mat = tf.Variable(tok_mat, name=name, trainable=trainable) embedded_tokens = tf.nn.embedding_lookup(tok_emb_mat, token_indices) return embedded_tokens
python
{ "resource": "" }
q266958
cudnn_gru
test
def cudnn_gru(units, n_hidden, n_layers=1, trainable_initial_states=False, seq_lengths=None, input_initial_h=None, name='cudnn_gru', reuse=False): """ Fast CuDNN GRU implementation Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros seq_lengths: tensor of sequence lengths with dimension [B] n_layers: number of layers input_initial_h: initial hidden state, tensor name: name of the variable scope to use reuse:whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x F] h_last - last hidden state, tf.Tensor with dimensionality [B x H] """ with tf.variable_scope(name, reuse=reuse): gru = tf.contrib.cudnn_rnn.CudnnGRU(num_layers=n_layers, num_units=n_hidden) if trainable_initial_states: init_h = tf.get_variable('init_h', [n_layers, 1, n_hidden]) init_h = tf.tile(init_h, (1, tf.shape(units)[0], 1)) else: init_h = tf.zeros([n_layers, tf.shape(units)[0], n_hidden]) initial_h = input_initial_h or init_h h, h_last = gru(tf.transpose(units, (1, 0, 2)), (initial_h, )) h = tf.transpose(h, (1, 0, 2)) h_last = tf.squeeze(h_last, axis=0)[-1] # extract last layer state # Extract last states if they are provided if seq_lengths is not None: indices = tf.stack([tf.range(tf.shape(h)[0]), seq_lengths-1], axis=1) h_last = tf.gather_nd(h, indices) return h, h_last
python
{ "resource": "" }
q266959
cudnn_compatible_gru
test
def cudnn_compatible_gru(units, n_hidden, n_layers=1, trainable_initial_states=False, seq_lengths=None, input_initial_h=None, name='cudnn_gru', reuse=False): """ CuDNN Compatible GRU implementation. It should be used to load models saved with CudnnGRUCell to run on CPU. Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros seq_lengths: tensor of sequence lengths with dimension [B] n_layers: number of layers input_initial_h: initial hidden state, tensor name: name of the variable scope to use reuse:whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x F] h_last - last hidden state, tf.Tensor with dimensionality [B x H] """ with tf.variable_scope(name, reuse=reuse): if trainable_initial_states: init_h = tf.get_variable('init_h', [n_layers, 1, n_hidden]) init_h = tf.tile(init_h, (1, tf.shape(units)[0], 1)) else: init_h = tf.zeros([n_layers, tf.shape(units)[0], n_hidden]) initial_h = input_initial_h or init_h with tf.variable_scope('cudnn_gru', reuse=reuse): def single_cell(): return tf.contrib.cudnn_rnn.CudnnCompatibleGRUCell(n_hidden) cell = tf.nn.rnn_cell.MultiRNNCell([single_cell() for _ in range(n_layers)]) units = tf.transpose(units, (1, 0, 2)) h, h_last = tf.nn.dynamic_rnn(cell=cell, inputs=units, time_major=True, initial_state=tuple(tf.unstack(initial_h, axis=0))) h = tf.transpose(h, (1, 0, 2)) h_last = h_last[-1] # h_last is tuple: n_layers x batch_size x n_hidden # Extract last states if they are provided if seq_lengths is not None: indices = tf.stack([tf.range(tf.shape(h)[0]), seq_lengths-1], axis=1) h_last = tf.gather_nd(h, indices) return h, h_last
python
{ "resource": "" }
q266960
cudnn_lstm
test
def cudnn_lstm(units, n_hidden, n_layers=1, trainable_initial_states=None, seq_lengths=None, initial_h=None, initial_c=None, name='cudnn_lstm', reuse=False): """ Fast CuDNN LSTM implementation Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state n_layers: number of layers trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros seq_lengths: tensor of sequence lengths with dimension [B] initial_h: optional initial hidden state, masks trainable_initial_states if provided initial_c: optional initial cell state, masks trainable_initial_states if provided name: name of the variable scope to use reuse:whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x F] h_last - last hidden state, tf.Tensor with dimensionality [B x H] where H - number of hidden units c_last - last cell state, tf.Tensor with dimensionality [B x H] where H - number of hidden units """ with tf.variable_scope(name, reuse=reuse): lstm = tf.contrib.cudnn_rnn.CudnnLSTM(num_layers=n_layers, num_units=n_hidden) if trainable_initial_states: init_h = tf.get_variable('init_h', [n_layers, 1, n_hidden]) init_h = tf.tile(init_h, (1, tf.shape(units)[0], 1)) init_c = tf.get_variable('init_c', [n_layers, 1, n_hidden]) init_c = tf.tile(init_c, (1, tf.shape(units)[0], 1)) else: init_h = init_c = tf.zeros([n_layers, tf.shape(units)[0], n_hidden]) initial_h = initial_h or init_h initial_c = initial_c or init_c h, (h_last, c_last) = lstm(tf.transpose(units, (1, 0, 2)), (initial_h, initial_c)) h = tf.transpose(h, (1, 0, 2)) h_last = h_last[-1] c_last = c_last[-1] # Extract last states if they are provided if seq_lengths is not None: indices = tf.stack([tf.range(tf.shape(h)[0]), seq_lengths-1], axis=1) h_last = tf.gather_nd(h, indices) return h, (h_last, c_last)
python
{ "resource": "" }
q266961
cudnn_compatible_lstm
test
def cudnn_compatible_lstm(units, n_hidden, n_layers=1, trainable_initial_states=None, seq_lengths=None, initial_h=None, initial_c=None, name='cudnn_lstm', reuse=False): """ CuDNN Compatible LSTM implementation. It should be used to load models saved with CudnnLSTMCell to run on CPU. Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state n_layers: number of layers trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros seq_lengths: tensor of sequence lengths with dimension [B] initial_h: optional initial hidden state, masks trainable_initial_states if provided initial_c: optional initial cell state, masks trainable_initial_states if provided name: name of the variable scope to use reuse:whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x F] h_last - last hidden state, tf.Tensor with dimensionality [B x H] where H - number of hidden units c_last - last cell state, tf.Tensor with dimensionality [B x H] where H - number of hidden units """ with tf.variable_scope(name, reuse=reuse): if trainable_initial_states: init_h = tf.get_variable('init_h', [n_layers, 1, n_hidden]) init_h = tf.tile(init_h, (1, tf.shape(units)[0], 1)) init_c = tf.get_variable('init_c', [n_layers, 1, n_hidden]) init_c = tf.tile(init_c, (1, tf.shape(units)[0], 1)) else: init_h = init_c = tf.zeros([n_layers, tf.shape(units)[0], n_hidden]) initial_h = initial_h or init_h initial_c = initial_c or init_c with tf.variable_scope('cudnn_lstm', reuse=reuse): def single_cell(): return tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(n_hidden) cell = tf.nn.rnn_cell.MultiRNNCell([single_cell() for _ in range(n_layers)]) units = tf.transpose(units, (1, 0, 2)) init = tuple([tf.nn.rnn_cell.LSTMStateTuple(ic, ih) for ih, ic in zip(tf.unstack(initial_h, axis=0), tf.unstack(initial_c, axis=0))]) h, state = tf.nn.dynamic_rnn(cell=cell, inputs=units, time_major=True, initial_state=init) h = tf.transpose(h, (1, 0, 2)) h_last = state[-1].h c_last = state[-1].c # Extract last states if they are provided if seq_lengths is not None: indices = tf.stack([tf.range(tf.shape(h)[0]), seq_lengths-1], axis=1) h_last = tf.gather_nd(h, indices) return h, (h_last, c_last)
python
{ "resource": "" }
q266962
cudnn_bi_gru
test
def cudnn_bi_gru(units, n_hidden, seq_lengths=None, n_layers=1, trainable_initial_states=False, name='cudnn_bi_gru', reuse=False): """ Fast CuDNN Bi-GRU implementation Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state seq_lengths: number of tokens in each sample in the batch n_layers: number of layers trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros name: name of the variable scope to use reuse:whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x F] h_last - last hidden state, tf.Tensor with dimensionality [B x H * 2] where H - number of hidden units """ with tf.variable_scope(name, reuse=reuse): if seq_lengths is None: seq_lengths = tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1] with tf.variable_scope('Forward'): h_fw, h_last_fw = cudnn_gru_wrapper(units, n_hidden, n_layers=n_layers, trainable_initial_states=trainable_initial_states, seq_lengths=seq_lengths, reuse=reuse) with tf.variable_scope('Backward'): reversed_units = tf.reverse_sequence(units, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0) h_bw, h_last_bw = cudnn_gru_wrapper(reversed_units, n_hidden, n_layers=n_layers, trainable_initial_states=trainable_initial_states, seq_lengths=seq_lengths, reuse=reuse) h_bw = tf.reverse_sequence(h_bw, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0) return (h_fw, h_bw), (h_last_fw, h_last_bw)
python
{ "resource": "" }
q266963
cudnn_bi_lstm
test
def cudnn_bi_lstm(units, n_hidden, seq_lengths=None, n_layers=1, trainable_initial_states=False, name='cudnn_bi_gru', reuse=False): """ Fast CuDNN Bi-LSTM implementation Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state seq_lengths: number of tokens in each sample in the batch n_layers: number of layers trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros name: name of the variable scope to use reuse:whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x F] h_last - last hidden state, tf.Tensor with dimensionality [B x H * 2] where H - number of hidden units c_last - last cell state, tf.Tensor with dimensionality [B x H * 2] where H - number of hidden units """ with tf.variable_scope(name, reuse=reuse): if seq_lengths is None: seq_lengths = tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1] with tf.variable_scope('Forward'): h_fw, (h_fw_last, c_fw_last) = cudnn_lstm_wrapper(units, n_hidden, n_layers=n_layers, trainable_initial_states=trainable_initial_states, seq_lengths=seq_lengths) with tf.variable_scope('Backward'): reversed_units = tf.reverse_sequence(units, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0) h_bw, (h_bw_last, c_bw_last) = cudnn_lstm_wrapper(reversed_units, n_hidden, n_layers=n_layers, trainable_initial_states=trainable_initial_states, seq_lengths=seq_lengths) h_bw = tf.reverse_sequence(h_bw, seq_lengths=seq_lengths, seq_dim=1, batch_dim=0) return (h_fw, h_bw), ((h_fw_last, c_fw_last), (h_bw_last, c_bw_last))
python
{ "resource": "" }
q266964
cudnn_stacked_bi_gru
test
def cudnn_stacked_bi_gru(units, n_hidden, seq_lengths=None, n_stacks=2, keep_prob=1.0, concat_stacked_outputs=False, trainable_initial_states=False, name='cudnn_stacked_bi_gru', reuse=False): """ Fast CuDNN Stacked Bi-GRU implementation Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state seq_lengths: number of tokens in each sample in the batch n_stacks: number of stacked Bi-GRU keep_prob: dropout keep_prob between Bi-GRUs (intra-layer dropout) concat_stacked_outputs: return last Bi-GRU output or concat outputs from every Bi-GRU, trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros name: name of the variable scope to use reuse: whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x ((n_hidden * 2) * n_stacks)] """ if seq_lengths is None: seq_lengths = tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1] outputs = [units] with tf.variable_scope(name, reuse=reuse): for n in range(n_stacks): if n == 0: inputs = outputs[-1] else: inputs = variational_dropout(outputs[-1], keep_prob=keep_prob) (h_fw, h_bw), _ = cudnn_bi_gru(inputs, n_hidden, seq_lengths, n_layers=1, trainable_initial_states=trainable_initial_states, name='{}_cudnn_bi_gru'.format(n), reuse=reuse) outputs.append(tf.concat([h_fw, h_bw], axis=2)) if concat_stacked_outputs: return tf.concat(outputs[1:], axis=2) return outputs[-1]
python
{ "resource": "" }
q266965
variational_dropout
test
def variational_dropout(units, keep_prob, fixed_mask_dims=(1,)): """ Dropout with the same drop mask for all fixed_mask_dims Args: units: a tensor, usually with shapes [B x T x F], where B - batch size T - tokens dimension F - feature dimension keep_prob: keep probability fixed_mask_dims: in these dimensions the mask will be the same Returns: dropped units tensor """ units_shape = tf.shape(units) noise_shape = [units_shape[n] for n in range(len(units.shape))] for dim in fixed_mask_dims: noise_shape[dim] = 1 return tf.nn.dropout(units, keep_prob, noise_shape)
python
{ "resource": "" }
q266966
CharacterTagger.build
test
def build(self): """Builds the network using Keras. """ word_inputs = kl.Input(shape=(None, MAX_WORD_LENGTH+2), dtype="int32") inputs = [word_inputs] word_outputs = self._build_word_cnn(word_inputs) if len(self.word_vectorizers) > 0: additional_word_inputs = [kl.Input(shape=(None, input_dim), dtype="float32") for input_dim, dense_dim in self.word_vectorizers] inputs.extend(additional_word_inputs) additional_word_embeddings = [kl.Dense(dense_dim)(additional_word_inputs[i]) for i, (_, dense_dim) in enumerate(self.word_vectorizers)] word_outputs = kl.Concatenate()([word_outputs] + additional_word_embeddings) outputs, lstm_outputs = self._build_basic_network(word_outputs) compile_args = {"optimizer": ko.nadam(lr=0.002, clipnorm=5.0), "loss": "categorical_crossentropy", "metrics": ["accuracy"]} self.model_ = Model(inputs, outputs) self.model_.compile(**compile_args) if self.verbose > 0: self.model_.summary(print_fn=log.info) return self
python
{ "resource": "" }
q266967
CharacterTagger._build_word_cnn
test
def _build_word_cnn(self, inputs): """Builds word-level network """ inputs = kl.Lambda(kb.one_hot, arguments={"num_classes": self.symbols_number_}, output_shape=lambda x: tuple(x) + (self.symbols_number_,))(inputs) char_embeddings = kl.Dense(self.char_embeddings_size, use_bias=False)(inputs) conv_outputs = [] self.char_output_dim_ = 0 for window_size, filters_number in zip(self.char_window_size, self.char_filters): curr_output = char_embeddings curr_filters_number = (min(self.char_filter_multiple * window_size, 200) if filters_number is None else filters_number) for _ in range(self.char_conv_layers - 1): curr_output = kl.Conv2D(curr_filters_number, (1, window_size), padding="same", activation="relu", data_format="channels_last")(curr_output) if self.conv_dropout > 0.0: curr_output = kl.Dropout(self.conv_dropout)(curr_output) curr_output = kl.Conv2D(curr_filters_number, (1, window_size), padding="same", activation="relu", data_format="channels_last")(curr_output) conv_outputs.append(curr_output) self.char_output_dim_ += curr_filters_number if len(conv_outputs) > 1: conv_output = kl.Concatenate(axis=-1)(conv_outputs) else: conv_output = conv_outputs[0] highway_input = kl.Lambda(kb.max, arguments={"axis": -2})(conv_output) if self.intermediate_dropout > 0.0: highway_input = kl.Dropout(self.intermediate_dropout)(highway_input) for i in range(self.char_highway_layers - 1): highway_input = Highway(activation="relu")(highway_input) if self.highway_dropout > 0.0: highway_input = kl.Dropout(self.highway_dropout)(highway_input) highway_output = Highway(activation="relu")(highway_input) return highway_output
python
{ "resource": "" }
q266968
CharacterTagger._build_basic_network
test
def _build_basic_network(self, word_outputs): """ Creates the basic network architecture, transforming word embeddings to intermediate outputs """ if self.word_dropout > 0.0: lstm_outputs = kl.Dropout(self.word_dropout)(word_outputs) else: lstm_outputs = word_outputs for j in range(self.word_lstm_layers-1): lstm_outputs = kl.Bidirectional( kl.LSTM(self.word_lstm_units[j], return_sequences=True, dropout=self.lstm_dropout))(lstm_outputs) lstm_outputs = kl.Bidirectional( kl.LSTM(self.word_lstm_units[-1], return_sequences=True, dropout=self.lstm_dropout))(lstm_outputs) pre_outputs = kl.TimeDistributed( kl.Dense(self.tags_number_, activation="softmax", activity_regularizer=self.regularizer), name="p")(lstm_outputs) return pre_outputs, lstm_outputs
python
{ "resource": "" }
q266969
CharacterTagger.train_on_batch
test
def train_on_batch(self, data: List[Iterable], labels: Iterable[list]) -> None: """Trains model on a single batch Args: data: a batch of word sequences labels: a batch of correct tag sequences Returns: the trained model """ X, Y = self._transform_batch(data, labels) self.model_.train_on_batch(X, Y)
python
{ "resource": "" }
q266970
CharacterTagger.predict_on_batch
test
def predict_on_batch(self, data: Union[list, tuple], return_indexes: bool = False) -> List[List[str]]: """ Makes predictions on a single batch Args: data: a batch of word sequences together with additional inputs return_indexes: whether to return tag indexes in vocabulary or tags themselves Returns: a batch of label sequences """ X = self._transform_batch(data) objects_number, lengths = len(X[0]), [len(elem) for elem in data[0]] Y = self.model_.predict_on_batch(X) labels = np.argmax(Y, axis=-1) answer: List[List[str]] = [None] * objects_number for i, (elem, length) in enumerate(zip(labels, lengths)): elem = elem[:length] answer[i] = elem if return_indexes else self.tags.idxs2toks(elem) return answer
python
{ "resource": "" }
q266971
CharacterTagger._make_sent_vector
test
def _make_sent_vector(self, sent: List, bucket_length: int =None) -> np.ndarray: """Transforms a sentence to Numpy array, which will be the network input. Args: sent: input sentence bucket_length: the width of the bucket Returns: A 3d array, answer[i][j][k] contains the index of k-th letter in j-th word of i-th input sentence. """ bucket_length = bucket_length or len(sent) answer = np.zeros(shape=(bucket_length, MAX_WORD_LENGTH+2), dtype=np.int32) for i, word in enumerate(sent): answer[i, 0] = self.tags.tok2idx("BEGIN") m = min(len(word), MAX_WORD_LENGTH) for j, x in enumerate(word[-m:]): answer[i, j+1] = self.symbols.tok2idx(x) answer[i, m+1] = self.tags.tok2idx("END") answer[i, m+2:] = self.tags.tok2idx("PAD") return answer
python
{ "resource": "" }
q266972
CharacterTagger._make_tags_vector
test
def _make_tags_vector(self, tags, bucket_length=None) -> np.ndarray: """Transforms a sentence of tags to Numpy array, which will be the network target. Args: tags: input sentence of tags bucket_length: the width of the bucket Returns: A 2d array, answer[i][j] contains the index of j-th tag in i-th input sentence. """ bucket_length = bucket_length or len(tags) answer = np.zeros(shape=(bucket_length,), dtype=np.int32) for i, tag in enumerate(tags): answer[i] = self.tags.tok2idx(tag) return answer
python
{ "resource": "" }
q266973
bleu_advanced
test
def bleu_advanced(y_true: List[Any], y_predicted: List[Any], weights: Tuple=(1,), smoothing_function=SMOOTH.method1, auto_reweigh=False, penalty=True) -> float: """Calculate BLEU score Parameters: y_true: list of reference tokens y_predicted: list of query tokens weights: n-gram weights smoothing_function: SmoothingFunction auto_reweigh: Option to re-normalize the weights uniformly penalty: either enable brevity penalty or not Return: BLEU score """ bleu_measure = sentence_bleu([y_true], y_predicted, weights, smoothing_function, auto_reweigh) hyp_len = len(y_predicted) hyp_lengths = hyp_len ref_lengths = closest_ref_length([y_true], hyp_len) bpenalty = brevity_penalty(ref_lengths, hyp_lengths) if penalty is True or bpenalty == 0: return bleu_measure return bleu_measure/bpenalty
python
{ "resource": "" }
q266974
verify_sc_url
test
def verify_sc_url(url: str) -> bool: """Verify signature certificate URL against Amazon Alexa requirements. Each call of Agent passes incoming utterances batch through skills filter, agent skills, skills processor. Batch of dialog IDs can be provided, in other case utterances indexes in incoming batch are used as dialog IDs. Args: url: Signature certificate URL from SignatureCertChainUrl HTTP header. Returns: result: True if verification was successful, False if not. """ parsed = urlsplit(url) scheme: str = parsed.scheme netloc: str = parsed.netloc path: str = parsed.path try: port = parsed.port except ValueError: port = None result = (scheme.lower() == 'https' and netloc.lower().split(':')[0] == 's3.amazonaws.com' and path.startswith('/echo.api/') and (port == 443 or port is None)) return result
python
{ "resource": "" }
q266975
extract_certs
test
def extract_certs(certs_txt: str) -> List[crypto.X509]: """Extracts pycrypto X509 objects from SSL certificates chain string. Args: certs_txt: SSL certificates chain string. Returns: result: List of pycrypto X509 objects. """ pattern = r'-----BEGIN CERTIFICATE-----.+?-----END CERTIFICATE-----' certs_txt = re.findall(pattern, certs_txt, flags=re.DOTALL) certs = [crypto.load_certificate(crypto.FILETYPE_PEM, cert_txt) for cert_txt in certs_txt] return certs
python
{ "resource": "" }
q266976
verify_certs_chain
test
def verify_certs_chain(certs_chain: List[crypto.X509], amazon_cert: crypto.X509) -> bool: """Verifies if Amazon and additional certificates creates chain of trust to a root CA. Args: certs_chain: List of pycrypto X509 intermediate certificates from signature chain URL. amazon_cert: Pycrypto X509 Amazon certificate. Returns: result: True if verification was successful, False if not. """ store = crypto.X509Store() # add certificates from Amazon provided certs chain for cert in certs_chain: store.add_cert(cert) # add CA certificates default_verify_paths = ssl.get_default_verify_paths() default_verify_file = default_verify_paths.cafile default_verify_file = Path(default_verify_file).resolve() if default_verify_file else None default_verify_path = default_verify_paths.capath default_verify_path = Path(default_verify_path).resolve() if default_verify_path else None ca_files = [ca_file for ca_file in default_verify_path.iterdir()] if default_verify_path else [] if default_verify_file: ca_files.append(default_verify_file) for ca_file in ca_files: ca_file: Path if ca_file.is_file(): with ca_file.open('r', encoding='ascii') as crt_f: ca_certs_txt = crt_f.read() ca_certs = extract_certs(ca_certs_txt) for cert in ca_certs: store.add_cert(cert) # add CA certificates (Windows) ssl_context = ssl.create_default_context() der_certs = ssl_context.get_ca_certs(binary_form=True) pem_certs = '\n'.join([ssl.DER_cert_to_PEM_cert(der_cert) for der_cert in der_certs]) ca_certs = extract_certs(pem_certs) for ca_cert in ca_certs: store.add_cert(ca_cert) store_context = crypto.X509StoreContext(store, amazon_cert) try: store_context.verify_certificate() result = True except crypto.X509StoreContextError: result = False return result
python
{ "resource": "" }
q266977
verify_signature
test
def verify_signature(amazon_cert: crypto.X509, signature: str, request_body: bytes) -> bool: """Verifies Alexa request signature. Args: amazon_cert: Pycrypto X509 Amazon certificate. signature: Base64 decoded Alexa request signature from Signature HTTP header. request_body: full HTTPS request body Returns: result: True if verification was successful, False if not. """ signature = base64.b64decode(signature) try: crypto.verify(amazon_cert, signature, request_body, 'sha1') result = True except crypto.Error: result = False return result
python
{ "resource": "" }
q266978
verify_cert
test
def verify_cert(signature_chain_url: str) -> Optional[crypto.X509]: """Conducts series of Alexa SSL certificate verifications against Amazon Alexa requirements. Args: signature_chain_url: Signature certificate URL from SignatureCertChainUrl HTTP header. Returns: result: Amazon certificate if verification was successful, None if not. """ try: certs_chain_get = requests.get(signature_chain_url) except requests.exceptions.ConnectionError as e: log.error(f'Amazon signature chain get error: {e}') return None certs_chain_txt = certs_chain_get.text certs_chain = extract_certs(certs_chain_txt) amazon_cert: crypto.X509 = certs_chain.pop(0) # verify signature chain url sc_url_verification = verify_sc_url(signature_chain_url) if not sc_url_verification: log.error(f'Amazon signature url {signature_chain_url} was not verified') # verify not expired expired_verification = not amazon_cert.has_expired() if not expired_verification: log.error(f'Amazon certificate ({signature_chain_url}) expired') # verify subject alternative names sans_verification = verify_sans(amazon_cert) if not sans_verification: log.error(f'Subject alternative names verification for ({signature_chain_url}) certificate failed') # verify certs chain chain_verification = verify_certs_chain(certs_chain, amazon_cert) if not chain_verification: log.error(f'Certificates chain verification for ({signature_chain_url}) certificate failed') result = (sc_url_verification and expired_verification and sans_verification and chain_verification) return amazon_cert if result else None
python
{ "resource": "" }
q266979
RichMessage.json
test
def json(self) -> list: """Returns list of json compatible states of the RichMessage instance nested controls. Returns: json_controls: Json representation of RichMessage instance nested controls. """ json_controls = [control.json() for control in self.controls] return json_controls
python
{ "resource": "" }
q266980
RichMessage.ms_bot_framework
test
def ms_bot_framework(self) -> list: """Returns list of MS Bot Framework compatible states of the RichMessage instance nested controls. Returns: ms_bf_controls: MS Bot Framework representation of RichMessage instance nested controls. """ ms_bf_controls = [control.ms_bot_framework() for control in self.controls] return ms_bf_controls
python
{ "resource": "" }
q266981
RichMessage.telegram
test
def telegram(self) -> list: """Returns list of Telegram compatible states of the RichMessage instance nested controls. Returns: telegram_controls: Telegram representation of RichMessage instance nested controls. """ telegram_controls = [control.telegram() for control in self.controls] return telegram_controls
python
{ "resource": "" }
q266982
RichMessage.alexa
test
def alexa(self) -> list: """Returns list of Amazon Alexa compatible states of the RichMessage instance nested controls. Returns: alexa_controls: Amazon Alexa representation of RichMessage instance nested controls. """ alexa_controls = [control.alexa() for control in self.controls] return alexa_controls
python
{ "resource": "" }
q266983
main
test
def main(): """DeepPavlov console configuration utility.""" args = parser.parse_args() path = get_settings_path() if args.default: if populate_settings_dir(force=True): print(f'Populated {path} with default settings files') else: print(f'{path} is already a default settings directory') else: print(f'Current DeepPavlov settings path: {path}')
python
{ "resource": "" }
q266984
_graph_wrap
test
def _graph_wrap(func, graph): """Constructs function encapsulated in the graph.""" @wraps(func) def _wrapped(*args, **kwargs): with graph.as_default(): return func(*args, **kwargs) return _wrapped
python
{ "resource": "" }
q266985
_keras_wrap
test
def _keras_wrap(func, graph, session): """Constructs function encapsulated in the graph and the session.""" import keras.backend as K @wraps(func) def _wrapped(*args, **kwargs): with graph.as_default(): K.set_session(session) return func(*args, **kwargs) return _wrapped
python
{ "resource": "" }
q266986
accuracy
test
def accuracy(y_true: [list, np.ndarray], y_predicted: [list, np.ndarray]) -> float: """ Calculate accuracy in terms of absolute coincidence Args: y_true: array of true values y_predicted: array of predicted values Returns: portion of absolutely coincidental samples """ examples_len = len(y_true) correct = sum([y1 == y2 for y1, y2 in zip(y_true, y_predicted)]) return correct / examples_len if examples_len else 0
python
{ "resource": "" }
q266987
round_accuracy
test
def round_accuracy(y_true, y_predicted): """ Rounds predictions and calculates accuracy in terms of absolute coincidence. Args: y_true: list of true values y_predicted: list of predicted values Returns: portion of absolutely coincidental samples """ predictions = [round(x) for x in y_predicted] examples_len = len(y_true) correct = sum([y1 == y2 for y1, y2 in zip(y_true, predictions)]) return correct / examples_len if examples_len else 0
python
{ "resource": "" }
q266988
_pretrained_initializer
test
def _pretrained_initializer(varname, weight_file, embedding_weight_file=None): """ We'll stub out all the initializers in the pretrained LM with a function that loads the weights from the file """ weight_name_map = {} for i in range(2): for j in range(8): # if we decide to add more layers root = 'RNN_{}/RNN/MultiRNNCell/Cell{}'.format(i, j) weight_name_map[root + '/rnn/lstm_cell/kernel'] = \ root + '/LSTMCell/W_0' weight_name_map[root + '/rnn/lstm_cell/bias'] = \ root + '/LSTMCell/B' weight_name_map[root + '/rnn/lstm_cell/projection/kernel'] = \ root + '/LSTMCell/W_P_0' # convert the graph name to that in the checkpoint varname_in_file = varname[5:] if varname_in_file.startswith('RNN'): varname_in_file = weight_name_map[varname_in_file] if varname_in_file == 'embedding': with h5py.File(embedding_weight_file, 'r') as fin: # Have added a special 0 index for padding not present # in the original model. embed_weights = fin[varname_in_file][...] weights = np.zeros( (embed_weights.shape[0] + 1, embed_weights.shape[1]), dtype=DTYPE ) weights[1:, :] = embed_weights else: with h5py.File(weight_file, 'r') as fin: if varname_in_file == 'char_embed': # Have added a special 0 index for padding not present # in the original model. char_embed_weights = fin[varname_in_file][...] weights = np.zeros( (char_embed_weights.shape[0] + 1, char_embed_weights.shape[1]), dtype=DTYPE ) weights[1:, :] = char_embed_weights else: weights = fin[varname_in_file][...] # Tensorflow initializers are callables that accept a shape parameter # and some optional kwargs def ret(shape, **kwargs): if list(shape) != list(weights.shape): raise ValueError( "Invalid shape initializing {0}, got {1}, expected {2}".format( varname_in_file, shape, weights.shape) ) return weights return ret
python
{ "resource": "" }
q266989
DatasetReader.read
test
def read(self, data_path: str, *args, **kwargs) -> Dict[str, List[Tuple[Any, Any]]]: """Reads a file from a path and returns data as a list of tuples of inputs and correct outputs for every data type in ``train``, ``valid`` and ``test``. """ raise NotImplementedError
python
{ "resource": "" }
q266990
make_hello_bot_agent
test
def make_hello_bot_agent() -> DefaultAgent: """Builds agent based on PatternMatchingSkill and HighestConfidenceSelector. This is agent building tutorial. You can use this .py file to check how hello-bot agent works. Returns: agent: Agent capable of handling several simple greetings. """ skill_hello = PatternMatchingSkill(['Hello world'], patterns=['hi', 'hello', 'good day']) skill_bye = PatternMatchingSkill(['Goodbye world', 'See you around'], patterns=['bye', 'chao', 'see you']) skill_fallback = PatternMatchingSkill(['I don\'t understand, sorry', 'I can say "Hello world"']) agent = DefaultAgent([skill_hello, skill_bye, skill_fallback], skills_processor=HighestConfidenceSelector()) return agent
python
{ "resource": "" }
q266991
to_one_hot
test
def to_one_hot(x, k): """ Takes an array of integers and transforms it to an array of one-hot encoded vectors """ unit = np.eye(k, dtype=int) return unit[x]
python
{ "resource": "" }
q266992
prettify_metrics
test
def prettify_metrics(metrics: List[Tuple[str, float]], precision: int = 4) -> OrderedDict: """Prettifies the dictionary of metrics.""" prettified_metrics = OrderedDict() for key, value in metrics: value = round(value, precision) prettified_metrics[key] = value return prettified_metrics
python
{ "resource": "" }
q266993
populate_settings_dir
test
def populate_settings_dir(force: bool = False) -> bool: """ Populate settings directory with default settings files Args: force: if ``True``, replace existing settings files with default ones Returns: ``True`` if any files were copied and ``False`` otherwise """ res = False if _default_settings_path == _settings_path: return res for src in list(_default_settings_path.glob('**/*.json')): dest = _settings_path / src.relative_to(_default_settings_path) if not force and dest.exists(): continue res = True dest.parent.mkdir(parents=True, exist_ok=True) shutil.copy(src, dest) return res
python
{ "resource": "" }
q266994
TFModel.load
test
def load(self, exclude_scopes: tuple = ('Optimizer',)) -> None: """Load model parameters from self.load_path""" if not hasattr(self, 'sess'): raise RuntimeError('Your TensorFlow model {} must' ' have sess attribute!'.format(self.__class__.__name__)) path = str(self.load_path.resolve()) # Check presence of the model files if tf.train.checkpoint_exists(path): log.info('[loading model from {}]'.format(path)) # Exclude optimizer variables from saved variables var_list = self._get_saveable_variables(exclude_scopes) saver = tf.train.Saver(var_list) saver.restore(self.sess, path)
python
{ "resource": "" }
q266995
TFModel.save
test
def save(self, exclude_scopes: tuple = ('Optimizer',)) -> None: """Save model parameters to self.save_path""" if not hasattr(self, 'sess'): raise RuntimeError('Your TensorFlow model {} must' ' have sess attribute!'.format(self.__class__.__name__)) path = str(self.save_path.resolve()) log.info('[saving model to {}]'.format(path)) var_list = self._get_saveable_variables(exclude_scopes) saver = tf.train.Saver(var_list) saver.save(self.sess, path)
python
{ "resource": "" }
q266996
TFModel.get_train_op
test
def get_train_op(self, loss, learning_rate, optimizer=None, clip_norm=None, learnable_scopes=None, optimizer_scope_name=None, **kwargs): """ Get train operation for given loss Args: loss: loss, tf tensor or scalar learning_rate: scalar or placeholder. clip_norm: clip gradients norm by clip_norm. learnable_scopes: which scopes are trainable (None for all). optimizer: instance of tf.train.Optimizer, default Adam. **kwargs: parameters passed to tf.train.Optimizer object (scalars or placeholders). Returns: train_op """ if optimizer_scope_name is None: opt_scope = tf.variable_scope('Optimizer') else: opt_scope = tf.variable_scope(optimizer_scope_name) with opt_scope: if learnable_scopes is None: variables_to_train = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) else: variables_to_train = [] for scope_name in learnable_scopes: variables_to_train.extend(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name)) if optimizer is None: optimizer = tf.train.AdamOptimizer # For batch norm it is necessary to update running averages extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(extra_update_ops): def clip_if_not_none(grad): if grad is not None: return tf.clip_by_norm(grad, clip_norm) opt = optimizer(learning_rate, **kwargs) grads_and_vars = opt.compute_gradients(loss, var_list=variables_to_train) if clip_norm is not None: grads_and_vars = [(clip_if_not_none(grad), var) for grad, var in grads_and_vars] train_op = opt.apply_gradients(grads_and_vars) return train_op
python
{ "resource": "" }
q266997
LevenshteinSearcher.search
test
def search(self, word, d, allow_spaces=True, return_cost=True): """ Finds all dictionary words in d-window from word """ if not all((c in self.alphabet or (c == " " and self.allow_spaces)) for c in word): return [] # raise ValueError("{0} contains an incorrect symbol".format(word)) return self._trie_search( word, d, allow_spaces=allow_spaces, return_cost=return_cost)
python
{ "resource": "" }
q266998
SegmentTransducer._make_default_operation_costs
test
def _make_default_operation_costs(self, allow_spaces=False): """ sets 1.0 cost for every replacement, insertion, deletion and transposition """ self.operation_costs = dict() self.operation_costs[""] = {c: 1.0 for c in list(self.alphabet) + [' ']} for a in self.alphabet: current_costs = {c: 1.0 for c in self.alphabet} current_costs[a] = 0.0 current_costs[""] = 1.0 if allow_spaces: current_costs[" "] = 1.0 self.operation_costs[a] = current_costs # транспозиции for a, b in itertools.permutations(self.alphabet, 2): self.operation_costs[a + b] = {b + a: 1.0} # пробелы if allow_spaces: self.operation_costs[" "] = {c: 1.0 for c in self.alphabet} self.operation_costs[" "][""] = 1.0
python
{ "resource": "" }
q266999
Conversation._start_timer
test
def _start_timer(self) -> None: """Initiates self-destruct timer.""" self.timer = Timer(self.config['conversation_lifetime'], self.self_destruct_callback) self.timer.start()
python
{ "resource": "" }