body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
def generator_fn(words_file, tags_file):
'Enumerator to enumerate through words_file and associated tags_file one line at a time\n\n :param words_file: file path of the words file (one sentence per line)\n :param tags_file: file path of tags file (tags corresponding to words file)\n :return enumerator that enumerates over the format (words, len(words)), tags one line at a time from input files.\n '
with Path(words_file).open('r') as f_words, Path(tags_file).open('r') as f_tags:
for (line_words, line_tags) in zip(f_words, f_tags):
(yield parse_fn(line_words, line_tags))
| -4,083,911,911,165,067,300
|
Enumerator to enumerate through words_file and associated tags_file one line at a time
:param words_file: file path of the words file (one sentence per line)
:param tags_file: file path of tags file (tags corresponding to words file)
:return enumerator that enumerates over the format (words, len(words)), tags one line at a time from input files.
|
src/model/lstm_crf/main.py
|
generator_fn
|
vikasbahirwani/SequenceTagging
|
python
|
def generator_fn(words_file, tags_file):
'Enumerator to enumerate through words_file and associated tags_file one line at a time\n\n :param words_file: file path of the words file (one sentence per line)\n :param tags_file: file path of tags file (tags corresponding to words file)\n :return enumerator that enumerates over the format (words, len(words)), tags one line at a time from input files.\n '
with Path(words_file).open('r') as f_words, Path(tags_file).open('r') as f_tags:
for (line_words, line_tags) in zip(f_words, f_tags):
(yield parse_fn(line_words, line_tags))
|
def input_fn(words_file, tags_file, params=None, shuffle_and_repeat=False):
"Creates tensorflow dataset using the generator_fn\n\n :param words_file: file path of the words file (one sentence per line)\n :param tags_file: file path of tags file (tags corresponding to words file)\n :param params: if not None then model hyperparameters expected - 'buffer' (as in buffer size) and 'epochs'\n :param shuffle_and_repeat: if the input is to be shuffled and repeat-delivered (say per epoch)\n :return: instance of tf.data.Dataset\n "
params = (params if (params is not None) else {})
shapes = (([None], ()), [None])
types = ((tf.string, tf.int32), tf.string)
defaults = (('<pad>', 0), 'O')
generator = functools.partial(generator_fn, words_file, tags_file)
dataset = tf.data.Dataset.from_generator(generator, output_shapes=shapes, output_types=types)
if shuffle_and_repeat:
dataset = dataset.shuffle(params['buffer']).repeat(params['epochs'])
dataset = dataset.padded_batch(params.get('batch_size', 20), shapes, defaults).prefetch(1)
return dataset
| -139,622,785,079,150,910
|
Creates tensorflow dataset using the generator_fn
:param words_file: file path of the words file (one sentence per line)
:param tags_file: file path of tags file (tags corresponding to words file)
:param params: if not None then model hyperparameters expected - 'buffer' (as in buffer size) and 'epochs'
:param shuffle_and_repeat: if the input is to be shuffled and repeat-delivered (say per epoch)
:return: instance of tf.data.Dataset
|
src/model/lstm_crf/main.py
|
input_fn
|
vikasbahirwani/SequenceTagging
|
python
|
def input_fn(words_file, tags_file, params=None, shuffle_and_repeat=False):
"Creates tensorflow dataset using the generator_fn\n\n :param words_file: file path of the words file (one sentence per line)\n :param tags_file: file path of tags file (tags corresponding to words file)\n :param params: if not None then model hyperparameters expected - 'buffer' (as in buffer size) and 'epochs'\n :param shuffle_and_repeat: if the input is to be shuffled and repeat-delivered (say per epoch)\n :return: instance of tf.data.Dataset\n "
params = (params if (params is not None) else {})
shapes = (([None], ()), [None])
types = ((tf.string, tf.int32), tf.string)
defaults = (('<pad>', 0), 'O')
generator = functools.partial(generator_fn, words_file, tags_file)
dataset = tf.data.Dataset.from_generator(generator, output_shapes=shapes, output_types=types)
if shuffle_and_repeat:
dataset = dataset.shuffle(params['buffer']).repeat(params['epochs'])
dataset = dataset.padded_batch(params.get('batch_size', 20), shapes, defaults).prefetch(1)
return dataset
|
def model_fn(features, labels, mode, params):
'\n\n :param features: words from sentence and number of words per sentence\n :param labels: One tag per word\n :param mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.PREDICT or tf.estimator.ModeKeys.EVAL\n :param params: dictionary of hyper parameters for the model\n :return:\n '
if isinstance(features, dict):
features = (features['words'], features['nwords'])
(words, nwords) = features
training = (mode == tf.estimator.ModeKeys.TRAIN)
vocab_words = tf.contrib.lookup.index_table_from_file(params['vocab_words_file'], num_oov_buckets=params['num_oov_buckets'])
'\n If the file contains the following: \n B-LOC\n B-PER\n O\n I-LOC\n \n then indices = [0, 1, 3] and num_tags = 4\n \n Open Question: The special treatment of tag indices is probably needed for microavg metrics. Why though?\n '
with Path(params['vocab_tags_file']).open('r') as f:
indices = [idx for (idx, tag) in enumerate(f) if (tag.strip() != 'O')]
num_tags = (len(indices) + 1)
word_ids = vocab_words.lookup(words)
glove = np.load(params['glove'])['embeddings']
glove = np.vstack([glove, [([0.0] * params['dim'])]])
variable = tf.Variable(glove, dtype=tf.float32, trainable=False)
embeddings = tf.nn.embedding_lookup(variable, word_ids)
dropout = params['dropout']
embeddings = tf.layers.dropout(embeddings, rate=dropout, training=training)
time_major = tf.transpose(embeddings, perm=[1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
'\n Any LSTM Cell returns two things: Cell Output (h) and Cell State (c)\n\n Following this, lstm_fw or lstm_bw each return a pair containing:\n\n Cell Output: A 3-D tensor of shape [time_len, batch_size, output_size]\n Final state: a tuple (cell_state, output) produced by the last LSTM Cell in the sequence.\n\n '
(output_fw, _) = lstm_cell_fw(time_major, dtype=tf.float32, sequence_length=nwords)
(output_bw, _) = lstm_cell_bw(time_major, dtype=tf.float32, sequence_length=nwords)
output = tf.concat([output_fw, output_bw], axis=(- 1))
output = tf.transpose(output, perm=[1, 0, 2])
output = tf.layers.dropout(output, rate=dropout, training=training)
logits = tf.layers.dense(output, num_tags)
crf_params = tf.get_variable('crf', shape=[num_tags, num_tags], dtype=tf.float32)
(pred_ids, _) = tf.contrib.crf.crf_decode(logits, crf_params, nwords)
if (mode == tf.estimator.ModeKeys.PREDICT):
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_file(params['vocab_tags_file'])
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
predictions = {'pred_ids': pred_ids, 'tags': pred_strings}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
vocab_tags = tf.contrib.lookup.index_table_from_file(params['vocab_tags_file'])
label_ids = vocab_tags.lookup(labels)
'\n logits are the same thing as unary potentials,\n checkout https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html look for scores s[i]\n '
(log_likelihood, _) = tf.contrib.crf.crf_log_likelihood(logits, label_ids, nwords, crf_params)
loss = tf.reduce_mean((- log_likelihood))
weights = tf.sequence_mask(nwords)
metrics = {'acc': tf.metrics.accuracy(label_ids, pred_ids, weights), 'precision': precision(label_ids, pred_ids, num_tags, indices, weights), 'recall': recall(label_ids, pred_ids, num_tags, indices, weights), 'f1': f1(label_ids, pred_ids, num_tags, indices, weights)}
for (metric_name, op) in metrics.items():
tf.summary.scalar(metric_name, op[1])
if (mode == tf.estimator.ModeKeys.EVAL):
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
elif (mode == tf.estimator.ModeKeys.TRAIN):
train_op = tf.train.AdamOptimizer().minimize(loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
| 8,180,957,206,196,648,000
|
:param features: words from sentence and number of words per sentence
:param labels: One tag per word
:param mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.PREDICT or tf.estimator.ModeKeys.EVAL
:param params: dictionary of hyper parameters for the model
:return:
|
src/model/lstm_crf/main.py
|
model_fn
|
vikasbahirwani/SequenceTagging
|
python
|
def model_fn(features, labels, mode, params):
'\n\n :param features: words from sentence and number of words per sentence\n :param labels: One tag per word\n :param mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.PREDICT or tf.estimator.ModeKeys.EVAL\n :param params: dictionary of hyper parameters for the model\n :return:\n '
if isinstance(features, dict):
features = (features['words'], features['nwords'])
(words, nwords) = features
training = (mode == tf.estimator.ModeKeys.TRAIN)
vocab_words = tf.contrib.lookup.index_table_from_file(params['vocab_words_file'], num_oov_buckets=params['num_oov_buckets'])
'\n If the file contains the following: \n B-LOC\n B-PER\n O\n I-LOC\n \n then indices = [0, 1, 3] and num_tags = 4\n \n Open Question: The special treatment of tag indices is probably needed for microavg metrics. Why though?\n '
with Path(params['vocab_tags_file']).open('r') as f:
indices = [idx for (idx, tag) in enumerate(f) if (tag.strip() != 'O')]
num_tags = (len(indices) + 1)
word_ids = vocab_words.lookup(words)
glove = np.load(params['glove'])['embeddings']
glove = np.vstack([glove, [([0.0] * params['dim'])]])
variable = tf.Variable(glove, dtype=tf.float32, trainable=False)
embeddings = tf.nn.embedding_lookup(variable, word_ids)
dropout = params['dropout']
embeddings = tf.layers.dropout(embeddings, rate=dropout, training=training)
time_major = tf.transpose(embeddings, perm=[1, 0, 2])
lstm_cell_fw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.LSTMBlockFusedCell(params['lstm_size'])
lstm_cell_bw = tf.contrib.rnn.TimeReversedFusedRNN(lstm_cell_bw)
'\n Any LSTM Cell returns two things: Cell Output (h) and Cell State (c)\n\n Following this, lstm_fw or lstm_bw each return a pair containing:\n\n Cell Output: A 3-D tensor of shape [time_len, batch_size, output_size]\n Final state: a tuple (cell_state, output) produced by the last LSTM Cell in the sequence.\n\n '
(output_fw, _) = lstm_cell_fw(time_major, dtype=tf.float32, sequence_length=nwords)
(output_bw, _) = lstm_cell_bw(time_major, dtype=tf.float32, sequence_length=nwords)
output = tf.concat([output_fw, output_bw], axis=(- 1))
output = tf.transpose(output, perm=[1, 0, 2])
output = tf.layers.dropout(output, rate=dropout, training=training)
logits = tf.layers.dense(output, num_tags)
crf_params = tf.get_variable('crf', shape=[num_tags, num_tags], dtype=tf.float32)
(pred_ids, _) = tf.contrib.crf.crf_decode(logits, crf_params, nwords)
if (mode == tf.estimator.ModeKeys.PREDICT):
reverse_vocab_tags = tf.contrib.lookup.index_to_string_table_from_file(params['vocab_tags_file'])
pred_strings = reverse_vocab_tags.lookup(tf.to_int64(pred_ids))
predictions = {'pred_ids': pred_ids, 'tags': pred_strings}
return tf.estimator.EstimatorSpec(mode, predictions=predictions)
vocab_tags = tf.contrib.lookup.index_table_from_file(params['vocab_tags_file'])
label_ids = vocab_tags.lookup(labels)
'\n logits are the same thing as unary potentials,\n checkout https://guillaumegenthial.github.io/sequence-tagging-with-tensorflow.html look for scores s[i]\n '
(log_likelihood, _) = tf.contrib.crf.crf_log_likelihood(logits, label_ids, nwords, crf_params)
loss = tf.reduce_mean((- log_likelihood))
weights = tf.sequence_mask(nwords)
metrics = {'acc': tf.metrics.accuracy(label_ids, pred_ids, weights), 'precision': precision(label_ids, pred_ids, num_tags, indices, weights), 'recall': recall(label_ids, pred_ids, num_tags, indices, weights), 'f1': f1(label_ids, pred_ids, num_tags, indices, weights)}
for (metric_name, op) in metrics.items():
tf.summary.scalar(metric_name, op[1])
if (mode == tf.estimator.ModeKeys.EVAL):
return tf.estimator.EstimatorSpec(mode, loss=loss, eval_metric_ops=metrics)
elif (mode == tf.estimator.ModeKeys.TRAIN):
train_op = tf.train.AdamOptimizer().minimize(loss, global_step=tf.train.get_or_create_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
|
def __virtual__():
'\n Load this state if the reg module exists\n '
if ('reg.read_value' not in __utils__):
return (False, 'reg state module failed to load: missing module function: reg.read_value')
if ('reg.set_value' not in __utils__):
return (False, 'reg state module failed to load: missing module function: reg.set_value')
if ('reg.delete_value' not in __utils__):
return (False, 'reg state module failed to load: missing module function: reg.delete_value')
if ('reg.delete_key_recursive' not in __utils__):
return (False, 'reg state module failed to load: missing module function: reg.delete_key_recursive')
return 'reg'
| 8,883,516,520,131,150,000
|
Load this state if the reg module exists
|
salt/states/reg.py
|
__virtual__
|
Feeeenng/salt
|
python
|
def __virtual__():
'\n \n '
if ('reg.read_value' not in __utils__):
return (False, 'reg state module failed to load: missing module function: reg.read_value')
if ('reg.set_value' not in __utils__):
return (False, 'reg state module failed to load: missing module function: reg.set_value')
if ('reg.delete_value' not in __utils__):
return (False, 'reg state module failed to load: missing module function: reg.delete_value')
if ('reg.delete_key_recursive' not in __utils__):
return (False, 'reg state module failed to load: missing module function: reg.delete_key_recursive')
return 'reg'
|
def _parse_key(key):
'\n split the hive from the key\n '
splt = key.split('\\')
hive = splt.pop(0)
key = '\\'.join(splt)
return (hive, key)
| -1,644,809,154,807,784,400
|
split the hive from the key
|
salt/states/reg.py
|
_parse_key
|
Feeeenng/salt
|
python
|
def _parse_key(key):
'\n \n '
splt = key.split('\\')
hive = splt.pop(0)
key = '\\'.join(splt)
return (hive, key)
|
def present(name, vname=None, vdata=None, vtype='REG_SZ', use_32bit_registry=False):
"\n Ensure a registry key or value is present.\n\n :param str name: A string value representing the full path of the key to\n include the HIVE, Key, and all Subkeys. For example:\n\n ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``\n\n Valid hive values include:\n - HKEY_CURRENT_USER or HKCU\n - HKEY_LOCAL_MACHINE or HKLM\n - HKEY_USERS or HKU\n\n :param str vname: The name of the value you'd like to create beneath the\n Key. If this parameter is not passed it will assume you want to set the\n (Default) value\n\n :param str vdata: The value you'd like to set. If a value name (vname) is\n passed, this will be the data for that value name. If not, this will be the\n (Default) value for the key.\n\n The type for the (Default) value is always REG_SZ and cannot be changed.\n This parameter is optional. If not passed, the Key will be created with no\n associated item/value pairs.\n\n :param str vtype: The value type for the data you wish to store in the\n registry. Valid values are:\n\n - REG_BINARY\n - REG_DWORD\n - REG_EXPAND_SZ\n - REG_MULTI_SZ\n - REG_SZ (Default)\n\n :param bool use_32bit_registry: Use the 32bit portion of the registry.\n Applies only to 64bit windows. 32bit Windows will ignore this parameter.\n Default is False.\n\n :return: Returns a dictionary showing the results of the registry operation.\n :rtype: dict\n\n The following example will set the ``(Default)`` value for the\n ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``:\n\n Example:\n\n .. code-block:: yaml\n\n HKEY_CURRENT_USER\\SOFTWARE\\Salt:\n reg.present:\n - vdata: 2016.3.1\n\n The following example will set the value for the ``version`` entry under the\n ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``. The\n value will be reflected in ``Wow6432Node``:\n\n Example:\n\n .. code-block:: yaml\n\n HKEY_CURRENT_USER\\SOFTWARE\\Salt:\n reg.present:\n - vname: version\n - vdata: 2016.3.1\n\n In the above example the path is interpreted as follows:\n - ``HKEY_CURRENT_USER`` is the hive\n - ``SOFTWARE\\Salt`` is the key\n - ``vname`` is the value name ('version') that will be created under the key\n - ``vdata`` is the data that will be assigned to 'version'\n "
ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''}
(hive, key) = _parse_key(name)
reg_current = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry)
if ((vdata == reg_current['vdata']) and reg_current['success']):
ret['comment'] = '{0} in {1} is already configured'.format((salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'), salt.utils.stringutils.to_unicode(name, 'utf-8'))
return ret
vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype)
add_change = {'Key': '{0}\\{1}'.format(hive, key), 'Entry': '{0}'.format((salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)')), 'Value': vdata_decoded}
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will add': add_change}}
return ret
ret['result'] = __utils__['reg.set_value'](hive=hive, key=key, vname=vname, vdata=vdata, vtype=vtype, use_32bit_registry=use_32bit_registry)
if (not ret['result']):
ret['changes'] = {}
ret['comment'] = 'Failed to add {0} to {1}\\{2}'.format(name, hive, key)
else:
ret['changes'] = {'reg': {'Added': add_change}}
ret['comment'] = 'Added {0} to {1}\\{2}'.format(name, hive, key)
return ret
| -3,619,361,012,449,428,500
|
Ensure a registry key or value is present.
:param str name: A string value representing the full path of the key to
include the HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\SOFTWARE\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
:param str vname: The name of the value you'd like to create beneath the
Key. If this parameter is not passed it will assume you want to set the
(Default) value
:param str vdata: The value you'd like to set. If a value name (vname) is
passed, this will be the data for that value name. If not, this will be the
(Default) value for the key.
The type for the (Default) value is always REG_SZ and cannot be changed.
This parameter is optional. If not passed, the Key will be created with no
associated item/value pairs.
:param str vtype: The value type for the data you wish to store in the
registry. Valid values are:
- REG_BINARY
- REG_DWORD
- REG_EXPAND_SZ
- REG_MULTI_SZ
- REG_SZ (Default)
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
The following example will set the ``(Default)`` value for the
``SOFTWARE\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``:
Example:
.. code-block:: yaml
HKEY_CURRENT_USER\SOFTWARE\Salt:
reg.present:
- vdata: 2016.3.1
The following example will set the value for the ``version`` entry under the
``SOFTWARE\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``. The
value will be reflected in ``Wow6432Node``:
Example:
.. code-block:: yaml
HKEY_CURRENT_USER\SOFTWARE\Salt:
reg.present:
- vname: version
- vdata: 2016.3.1
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\Salt`` is the key
- ``vname`` is the value name ('version') that will be created under the key
- ``vdata`` is the data that will be assigned to 'version'
|
salt/states/reg.py
|
present
|
Feeeenng/salt
|
python
|
def present(name, vname=None, vdata=None, vtype='REG_SZ', use_32bit_registry=False):
"\n Ensure a registry key or value is present.\n\n :param str name: A string value representing the full path of the key to\n include the HIVE, Key, and all Subkeys. For example:\n\n ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``\n\n Valid hive values include:\n - HKEY_CURRENT_USER or HKCU\n - HKEY_LOCAL_MACHINE or HKLM\n - HKEY_USERS or HKU\n\n :param str vname: The name of the value you'd like to create beneath the\n Key. If this parameter is not passed it will assume you want to set the\n (Default) value\n\n :param str vdata: The value you'd like to set. If a value name (vname) is\n passed, this will be the data for that value name. If not, this will be the\n (Default) value for the key.\n\n The type for the (Default) value is always REG_SZ and cannot be changed.\n This parameter is optional. If not passed, the Key will be created with no\n associated item/value pairs.\n\n :param str vtype: The value type for the data you wish to store in the\n registry. Valid values are:\n\n - REG_BINARY\n - REG_DWORD\n - REG_EXPAND_SZ\n - REG_MULTI_SZ\n - REG_SZ (Default)\n\n :param bool use_32bit_registry: Use the 32bit portion of the registry.\n Applies only to 64bit windows. 32bit Windows will ignore this parameter.\n Default is False.\n\n :return: Returns a dictionary showing the results of the registry operation.\n :rtype: dict\n\n The following example will set the ``(Default)`` value for the\n ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``:\n\n Example:\n\n .. code-block:: yaml\n\n HKEY_CURRENT_USER\\SOFTWARE\\Salt:\n reg.present:\n - vdata: 2016.3.1\n\n The following example will set the value for the ``version`` entry under the\n ``SOFTWARE\\Salt`` key in the ``HKEY_CURRENT_USER`` hive to ``2016.3.1``. The\n value will be reflected in ``Wow6432Node``:\n\n Example:\n\n .. code-block:: yaml\n\n HKEY_CURRENT_USER\\SOFTWARE\\Salt:\n reg.present:\n - vname: version\n - vdata: 2016.3.1\n\n In the above example the path is interpreted as follows:\n - ``HKEY_CURRENT_USER`` is the hive\n - ``SOFTWARE\\Salt`` is the key\n - ``vname`` is the value name ('version') that will be created under the key\n - ``vdata`` is the data that will be assigned to 'version'\n "
ret = {'name': name, 'result': True, 'changes': {}, 'comment': }
(hive, key) = _parse_key(name)
reg_current = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry)
if ((vdata == reg_current['vdata']) and reg_current['success']):
ret['comment'] = '{0} in {1} is already configured'.format((salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)'), salt.utils.stringutils.to_unicode(name, 'utf-8'))
return ret
vdata_decoded = __utils__['reg.cast_vdata'](vdata=vdata, vtype=vtype)
add_change = {'Key': '{0}\\{1}'.format(hive, key), 'Entry': '{0}'.format((salt.utils.stringutils.to_unicode(vname, 'utf-8') if vname else '(Default)')), 'Value': vdata_decoded}
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will add': add_change}}
return ret
ret['result'] = __utils__['reg.set_value'](hive=hive, key=key, vname=vname, vdata=vdata, vtype=vtype, use_32bit_registry=use_32bit_registry)
if (not ret['result']):
ret['changes'] = {}
ret['comment'] = 'Failed to add {0} to {1}\\{2}'.format(name, hive, key)
else:
ret['changes'] = {'reg': {'Added': add_change}}
ret['comment'] = 'Added {0} to {1}\\{2}'.format(name, hive, key)
return ret
|
def absent(name, vname=None, use_32bit_registry=False):
"\n Ensure a registry value is removed. To remove a key use key_absent.\n\n :param str name: A string value representing the full path of the key to\n include the HIVE, Key, and all Subkeys. For example:\n\n ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``\n\n Valid hive values include:\n\n - HKEY_CURRENT_USER or HKCU\n - HKEY_LOCAL_MACHINE or HKLM\n - HKEY_USERS or HKU\n\n :param str vname: The name of the value you'd like to create beneath the\n Key. If this parameter is not passed it will assume you want to set the\n (Default) value\n\n :param bool use_32bit_registry: Use the 32bit portion of the registry.\n Applies only to 64bit windows. 32bit Windows will ignore this parameter.\n Default is False.\n\n :return: Returns a dictionary showing the results of the registry operation.\n :rtype: dict\n\n CLI Example:\n\n .. code-block:: yaml\n\n 'HKEY_CURRENT_USER\\SOFTWARE\\Salt':\n reg.absent\n - vname: version\n\n In the above example the value named ``version`` will be removed from\n the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not\n passed, the (Default) value would be deleted.\n "
ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''}
(hive, key) = _parse_key(name)
reg_check = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry)
if ((not reg_check['success']) or (reg_check['vdata'] == '(value not set)')):
ret['comment'] = '{0} is already absent'.format(name)
return ret
remove_change = {'Key': '{0}\\{1}'.format(hive, key), 'Entry': '{0}'.format((vname if vname else '(Default)'))}
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will remove': remove_change}}
return ret
ret['result'] = __utils__['reg.delete_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry)
if (not ret['result']):
ret['changes'] = {}
ret['comment'] = 'Failed to remove {0} from {1}'.format(key, hive)
else:
ret['changes'] = {'reg': {'Removed': remove_change}}
ret['comment'] = 'Removed {0} from {1}'.format(key, hive)
return ret
| 8,684,500,273,568,656,000
|
Ensure a registry value is removed. To remove a key use key_absent.
:param str name: A string value representing the full path of the key to
include the HIVE, Key, and all Subkeys. For example:
``HKEY_LOCAL_MACHINE\SOFTWARE\Salt``
Valid hive values include:
- HKEY_CURRENT_USER or HKCU
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_USERS or HKU
:param str vname: The name of the value you'd like to create beneath the
Key. If this parameter is not passed it will assume you want to set the
(Default) value
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
CLI Example:
.. code-block:: yaml
'HKEY_CURRENT_USER\SOFTWARE\Salt':
reg.absent
- vname: version
In the above example the value named ``version`` will be removed from
the SOFTWARE\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not
passed, the (Default) value would be deleted.
|
salt/states/reg.py
|
absent
|
Feeeenng/salt
|
python
|
def absent(name, vname=None, use_32bit_registry=False):
"\n Ensure a registry value is removed. To remove a key use key_absent.\n\n :param str name: A string value representing the full path of the key to\n include the HIVE, Key, and all Subkeys. For example:\n\n ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt``\n\n Valid hive values include:\n\n - HKEY_CURRENT_USER or HKCU\n - HKEY_LOCAL_MACHINE or HKLM\n - HKEY_USERS or HKU\n\n :param str vname: The name of the value you'd like to create beneath the\n Key. If this parameter is not passed it will assume you want to set the\n (Default) value\n\n :param bool use_32bit_registry: Use the 32bit portion of the registry.\n Applies only to 64bit windows. 32bit Windows will ignore this parameter.\n Default is False.\n\n :return: Returns a dictionary showing the results of the registry operation.\n :rtype: dict\n\n CLI Example:\n\n .. code-block:: yaml\n\n 'HKEY_CURRENT_USER\\SOFTWARE\\Salt':\n reg.absent\n - vname: version\n\n In the above example the value named ``version`` will be removed from\n the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not\n passed, the (Default) value would be deleted.\n "
ret = {'name': name, 'result': True, 'changes': {}, 'comment': }
(hive, key) = _parse_key(name)
reg_check = __utils__['reg.read_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry)
if ((not reg_check['success']) or (reg_check['vdata'] == '(value not set)')):
ret['comment'] = '{0} is already absent'.format(name)
return ret
remove_change = {'Key': '{0}\\{1}'.format(hive, key), 'Entry': '{0}'.format((vname if vname else '(Default)'))}
if __opts__['test']:
ret['result'] = None
ret['changes'] = {'reg': {'Will remove': remove_change}}
return ret
ret['result'] = __utils__['reg.delete_value'](hive=hive, key=key, vname=vname, use_32bit_registry=use_32bit_registry)
if (not ret['result']):
ret['changes'] = {}
ret['comment'] = 'Failed to remove {0} from {1}'.format(key, hive)
else:
ret['changes'] = {'reg': {'Removed': remove_change}}
ret['comment'] = 'Removed {0} from {1}'.format(key, hive)
return ret
|
def key_absent(name, use_32bit_registry=False):
"\n .. versionadded:: 2015.5.4\n\n Ensure a registry key is removed. This will remove a key and all value\n entries it contains. It will fail if the key contains subkeys.\n\n :param str name: A string representing the full path to the key to be\n removed to include the hive and the keypath. The hive can be any of the\n following:\n\n - HKEY_LOCAL_MACHINE or HKLM\n - HKEY_CURRENT_USER or HKCU\n - HKEY_USER or HKU\n\n :param bool use_32bit_registry: Use the 32bit portion of the registry.\n Applies only to 64bit windows. 32bit Windows will ignore this parameter.\n Default is False.\n\n :return: Returns a dictionary showing the results of the registry operation.\n :rtype: dict\n\n The following example will delete the ``SOFTWARE\\Salt`` key and all subkeys\n under the ``HKEY_CURRENT_USER`` hive.\n\n Example:\n\n .. code-block:: yaml\n\n 'HKEY_CURRENT_USER\\SOFTWARE\\Salt':\n reg.key_absent:\n - force: True\n\n In the above example the path is interpreted as follows:\n\n - ``HKEY_CURRENT_USER`` is the hive\n - ``SOFTWARE\\Salt`` is the key\n "
ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''}
(hive, key) = _parse_key(name)
if (not __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']):
ret['comment'] = '{0} is already absent'.format(name)
return ret
ret['changes'] = {'reg': {'Removed': {'Key': '{0}\\{1}'.format(hive, key)}}}
if __opts__['test']:
ret['result'] = None
return ret
__utils__['reg.delete_key_recursive'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)
if __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']:
ret['result'] = False
ret['changes'] = {}
ret['comment'] = 'Failed to remove registry key {0}'.format(name)
return ret
| 7,864,107,645,527,735,000
|
.. versionadded:: 2015.5.4
Ensure a registry key is removed. This will remove a key and all value
entries it contains. It will fail if the key contains subkeys.
:param str name: A string representing the full path to the key to be
removed to include the hive and the keypath. The hive can be any of the
following:
- HKEY_LOCAL_MACHINE or HKLM
- HKEY_CURRENT_USER or HKCU
- HKEY_USER or HKU
:param bool use_32bit_registry: Use the 32bit portion of the registry.
Applies only to 64bit windows. 32bit Windows will ignore this parameter.
Default is False.
:return: Returns a dictionary showing the results of the registry operation.
:rtype: dict
The following example will delete the ``SOFTWARE\Salt`` key and all subkeys
under the ``HKEY_CURRENT_USER`` hive.
Example:
.. code-block:: yaml
'HKEY_CURRENT_USER\SOFTWARE\Salt':
reg.key_absent:
- force: True
In the above example the path is interpreted as follows:
- ``HKEY_CURRENT_USER`` is the hive
- ``SOFTWARE\Salt`` is the key
|
salt/states/reg.py
|
key_absent
|
Feeeenng/salt
|
python
|
def key_absent(name, use_32bit_registry=False):
"\n .. versionadded:: 2015.5.4\n\n Ensure a registry key is removed. This will remove a key and all value\n entries it contains. It will fail if the key contains subkeys.\n\n :param str name: A string representing the full path to the key to be\n removed to include the hive and the keypath. The hive can be any of the\n following:\n\n - HKEY_LOCAL_MACHINE or HKLM\n - HKEY_CURRENT_USER or HKCU\n - HKEY_USER or HKU\n\n :param bool use_32bit_registry: Use the 32bit portion of the registry.\n Applies only to 64bit windows. 32bit Windows will ignore this parameter.\n Default is False.\n\n :return: Returns a dictionary showing the results of the registry operation.\n :rtype: dict\n\n The following example will delete the ``SOFTWARE\\Salt`` key and all subkeys\n under the ``HKEY_CURRENT_USER`` hive.\n\n Example:\n\n .. code-block:: yaml\n\n 'HKEY_CURRENT_USER\\SOFTWARE\\Salt':\n reg.key_absent:\n - force: True\n\n In the above example the path is interpreted as follows:\n\n - ``HKEY_CURRENT_USER`` is the hive\n - ``SOFTWARE\\Salt`` is the key\n "
ret = {'name': name, 'result': True, 'changes': {}, 'comment': }
(hive, key) = _parse_key(name)
if (not __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']):
ret['comment'] = '{0} is already absent'.format(name)
return ret
ret['changes'] = {'reg': {'Removed': {'Key': '{0}\\{1}'.format(hive, key)}}}
if __opts__['test']:
ret['result'] = None
return ret
__utils__['reg.delete_key_recursive'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)
if __utils__['reg.read_value'](hive=hive, key=key, use_32bit_registry=use_32bit_registry)['success']:
ret['result'] = False
ret['changes'] = {}
ret['comment'] = 'Failed to remove registry key {0}'.format(name)
return ret
|
def test_authenticate_username_superuser(self):
'Test to authenticate as superuser.'
self.user.is_superuser = True
self.user.validated_by_email = False
self.user.validated_by_manager = False
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(backend.authenticate(MagicMock(), username='TestUser', password='pass'), self.user)
| 8,255,383,177,069,123,000
|
Test to authenticate as superuser.
|
dakara_server/users/tests/test_backends.py
|
test_authenticate_username_superuser
|
DakaraProject/dakara-server
|
python
|
def test_authenticate_username_superuser(self):
self.user.is_superuser = True
self.user.validated_by_email = False
self.user.validated_by_manager = False
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(backend.authenticate(MagicMock(), username='TestUser', password='pass'), self.user)
|
def test_authenticate_username_not_active(self):
'Test to authenticate an inactive user.'
self.user.is_active = False
self.user.save()
backend = DakaraModelBackend()
self.assertIsNone(backend.authenticate(MagicMock(), username='TestUser', password='pass'))
| -5,009,936,732,395,484,000
|
Test to authenticate an inactive user.
|
dakara_server/users/tests/test_backends.py
|
test_authenticate_username_not_active
|
DakaraProject/dakara-server
|
python
|
def test_authenticate_username_not_active(self):
self.user.is_active = False
self.user.save()
backend = DakaraModelBackend()
self.assertIsNone(backend.authenticate(MagicMock(), username='TestUser', password='pass'))
|
def test_authenticate_username_not_validated_by_email(self):
'Test to authenticate when not validated by email.'
self.user.validated_by_email = False
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
with self.assertRaisesRegex(ValidationError, 'This user email has not been validated'):
backend.authenticate(MagicMock(), username='TestUser', password='pass')
| 7,254,216,078,541,471,000
|
Test to authenticate when not validated by email.
|
dakara_server/users/tests/test_backends.py
|
test_authenticate_username_not_validated_by_email
|
DakaraProject/dakara-server
|
python
|
def test_authenticate_username_not_validated_by_email(self):
self.user.validated_by_email = False
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
with self.assertRaisesRegex(ValidationError, 'This user email has not been validated'):
backend.authenticate(MagicMock(), username='TestUser', password='pass')
|
@config_email_disabled
def test_authenticate_username_not_validated_by_email_no_email(self):
'Test to authenticate when not validated by email and emails disabled.'
self.user.validated_by_email = False
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(backend.authenticate(MagicMock(), username='TestUser', password='pass'), self.user)
| -7,744,552,453,736,005,000
|
Test to authenticate when not validated by email and emails disabled.
|
dakara_server/users/tests/test_backends.py
|
test_authenticate_username_not_validated_by_email_no_email
|
DakaraProject/dakara-server
|
python
|
@config_email_disabled
def test_authenticate_username_not_validated_by_email_no_email(self):
self.user.validated_by_email = False
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(backend.authenticate(MagicMock(), username='TestUser', password='pass'), self.user)
|
def test_authenticate_username_not_validated_by_manager(self):
'Test to authenticate when not validated by manager.'
self.user.validated_by_email = True
self.user.validated_by_manager = False
self.user.save()
backend = DakaraModelBackend()
with self.assertRaisesRegex(ValidationError, 'This user account has not been validated by a manager'):
backend.authenticate(MagicMock(), username='TestUser', password='pass')
| 8,615,451,956,186,013,000
|
Test to authenticate when not validated by manager.
|
dakara_server/users/tests/test_backends.py
|
test_authenticate_username_not_validated_by_manager
|
DakaraProject/dakara-server
|
python
|
def test_authenticate_username_not_validated_by_manager(self):
self.user.validated_by_email = True
self.user.validated_by_manager = False
self.user.save()
backend = DakaraModelBackend()
with self.assertRaisesRegex(ValidationError, 'This user account has not been validated by a manager'):
backend.authenticate(MagicMock(), username='TestUser', password='pass')
|
def test_authenticate_username_ok(self):
'Test to authenticate.'
self.user.validated_by_email = True
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(backend.authenticate(MagicMock(), username='TestUser', password='pass'), self.user)
| 6,897,010,371,461,581,000
|
Test to authenticate.
|
dakara_server/users/tests/test_backends.py
|
test_authenticate_username_ok
|
DakaraProject/dakara-server
|
python
|
def test_authenticate_username_ok(self):
self.user.validated_by_email = True
self.user.validated_by_manager = True
self.user.save()
backend = DakaraModelBackend()
self.assertEqual(backend.authenticate(MagicMock(), username='TestUser', password='pass'), self.user)
|
def new_admissions_chart(alt, projection_admits: pd.DataFrame, parameters: Parameters) -> Chart:
'docstring'
plot_projection_days = (parameters.n_days - 10)
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
y_scale = alt.Scale()
if (max_y_axis is not None):
y_scale.domain = (0, max_y_axis)
tooltip_dict = {False: 'day', True: 'date:T'}
if as_date:
projection_admits = add_date_column(projection_admits)
x_kwargs = {'shorthand': 'date:T', 'title': 'Date', 'axis': alt.Axis(format=DATE_FORMAT)}
else:
x_kwargs = {'shorthand': 'day', 'title': 'Days from today'}
ceiled_admits = projection_admits.copy()
ceiled_admits.hospitalized = np.ceil(ceiled_admits.hospitalized)
ceiled_admits.icu = np.ceil(ceiled_admits.icu)
ceiled_admits.ventilated = np.ceil(ceiled_admits.ventilated)
return alt.Chart(ceiled_admits.head(plot_projection_days)).transform_fold(fold=['hospitalized', 'icu', 'ventilated']).mark_line(point=True).encode(x=alt.X(**x_kwargs), y=alt.Y('value:Q', title='Daily admissions', scale=y_scale), color='key:N', tooltip=[tooltip_dict[as_date], alt.Tooltip('value:Q', format='.0f', title='Admissions'), 'key:N']).interactive()
| 8,149,229,652,189,678,000
|
docstring
|
src/penn_chime/charts.py
|
new_admissions_chart
|
degerli/chime-1
|
python
|
def new_admissions_chart(alt, projection_admits: pd.DataFrame, parameters: Parameters) -> Chart:
plot_projection_days = (parameters.n_days - 10)
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
y_scale = alt.Scale()
if (max_y_axis is not None):
y_scale.domain = (0, max_y_axis)
tooltip_dict = {False: 'day', True: 'date:T'}
if as_date:
projection_admits = add_date_column(projection_admits)
x_kwargs = {'shorthand': 'date:T', 'title': 'Date', 'axis': alt.Axis(format=DATE_FORMAT)}
else:
x_kwargs = {'shorthand': 'day', 'title': 'Days from today'}
ceiled_admits = projection_admits.copy()
ceiled_admits.hospitalized = np.ceil(ceiled_admits.hospitalized)
ceiled_admits.icu = np.ceil(ceiled_admits.icu)
ceiled_admits.ventilated = np.ceil(ceiled_admits.ventilated)
return alt.Chart(ceiled_admits.head(plot_projection_days)).transform_fold(fold=['hospitalized', 'icu', 'ventilated']).mark_line(point=True).encode(x=alt.X(**x_kwargs), y=alt.Y('value:Q', title='Daily admissions', scale=y_scale), color='key:N', tooltip=[tooltip_dict[as_date], alt.Tooltip('value:Q', format='.0f', title='Admissions'), 'key:N']).interactive()
|
def admitted_patients_chart(alt, census: pd.DataFrame, parameters: Parameters) -> Chart:
'docstring'
plot_projection_days = (parameters.n_days - 10)
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
if as_date:
census = add_date_column(census)
x_kwargs = {'shorthand': 'date:T', 'title': 'Date', 'axis': alt.Axis(format=DATE_FORMAT)}
idx = 'date:T'
else:
x_kwargs = {'shorthand': 'day', 'title': 'Days from today'}
idx = 'day'
y_scale = alt.Scale()
if max_y_axis:
y_scale.domain = (0, max_y_axis)
return alt.Chart(census.head(plot_projection_days)).transform_fold(fold=['hospitalized', 'icu', 'ventilated']).mark_line(point=True).encode(x=alt.X(**x_kwargs), y=alt.Y('value:Q', title='Census', scale=y_scale), color='key:N', tooltip=[idx, alt.Tooltip('value:Q', format='.0f', title='Census'), 'key:N']).interactive()
| 5,839,135,649,114,293,000
|
docstring
|
src/penn_chime/charts.py
|
admitted_patients_chart
|
degerli/chime-1
|
python
|
def admitted_patients_chart(alt, census: pd.DataFrame, parameters: Parameters) -> Chart:
plot_projection_days = (parameters.n_days - 10)
max_y_axis = parameters.max_y_axis
as_date = parameters.as_date
if as_date:
census = add_date_column(census)
x_kwargs = {'shorthand': 'date:T', 'title': 'Date', 'axis': alt.Axis(format=DATE_FORMAT)}
idx = 'date:T'
else:
x_kwargs = {'shorthand': 'day', 'title': 'Days from today'}
idx = 'day'
y_scale = alt.Scale()
if max_y_axis:
y_scale.domain = (0, max_y_axis)
return alt.Chart(census.head(plot_projection_days)).transform_fold(fold=['hospitalized', 'icu', 'ventilated']).mark_line(point=True).encode(x=alt.X(**x_kwargs), y=alt.Y('value:Q', title='Census', scale=y_scale), color='key:N', tooltip=[idx, alt.Tooltip('value:Q', format='.0f', title='Census'), 'key:N']).interactive()
|
def chart_descriptions(chart: Chart, labels, suffix: str=''):
'\n\n :param chart: Chart: The alt chart to be used in finding max points\n :param suffix: str: The assumption is that the charts have similar column names.\n The census chart adds " Census" to the column names.\n Make sure to include a space or underscore as appropriate\n :return: str: Returns a multi-line string description of the results\n '
messages = []
cols = ['hospitalized', 'icu', 'ventilated']
asterisk = False
day = ('date' if ('date' in chart.data.columns) else 'day')
for col in cols:
if ((chart.data[col].idxmax() + 1) == len(chart.data)):
asterisk = True
on = chart.data[day][chart.data[col].idxmax()]
if (day == 'date'):
on = datetime.datetime.strftime(on, '%b %d')
else:
on += 1
messages.append('{}{} peaks at {:,} on day {}{}'.format(labels[col], suffix, ceil(chart.data[col].max()), on, ('*' if asterisk else '')))
if asterisk:
messages.append('_* The max is at the upper bound of the data, and therefore may not be the actual max_')
return '\n\n'.join(messages)
| -3,031,882,789,968,356,000
|
:param chart: Chart: The alt chart to be used in finding max points
:param suffix: str: The assumption is that the charts have similar column names.
The census chart adds " Census" to the column names.
Make sure to include a space or underscore as appropriate
:return: str: Returns a multi-line string description of the results
|
src/penn_chime/charts.py
|
chart_descriptions
|
degerli/chime-1
|
python
|
def chart_descriptions(chart: Chart, labels, suffix: str=):
'\n\n :param chart: Chart: The alt chart to be used in finding max points\n :param suffix: str: The assumption is that the charts have similar column names.\n The census chart adds " Census" to the column names.\n Make sure to include a space or underscore as appropriate\n :return: str: Returns a multi-line string description of the results\n '
messages = []
cols = ['hospitalized', 'icu', 'ventilated']
asterisk = False
day = ('date' if ('date' in chart.data.columns) else 'day')
for col in cols:
if ((chart.data[col].idxmax() + 1) == len(chart.data)):
asterisk = True
on = chart.data[day][chart.data[col].idxmax()]
if (day == 'date'):
on = datetime.datetime.strftime(on, '%b %d')
else:
on += 1
messages.append('{}{} peaks at {:,} on day {}{}'.format(labels[col], suffix, ceil(chart.data[col].max()), on, ('*' if asterisk else )))
if asterisk:
messages.append('_* The max is at the upper bound of the data, and therefore may not be the actual max_')
return '\n\n'.join(messages)
|
@classmethod
def setup(cls: Type[Dataclass], arguments: Optional[str]='', dest: Optional[str]=None, default: Optional[Dataclass]=None, conflict_resolution_mode: ConflictResolution=ConflictResolution.AUTO, add_option_string_dash_variants: DashVariant=DashVariant.AUTO, parse_known_args: bool=False, attempt_to_reorder: bool=False, *, argument_generation_mode: ArgumentGenerationMode=ArgumentGenerationMode.FLAT, nested_mode: NestedMode=NestedMode.DEFAULT) -> Dataclass:
'Basic setup for a test.\n\n Keyword Arguments:\n arguments {Optional[str]} -- The arguments to pass to the parser (default: {""})\n dest {Optional[str]} -- the attribute where the argument should be stored. (default: {None})\n\n Returns:\n {cls}} -- the class\'s type.\n '
parser = simple_parsing.ArgumentParser(conflict_resolution=conflict_resolution_mode, add_option_string_dash_variants=add_option_string_dash_variants, argument_generation_mode=argument_generation_mode, nested_mode=nested_mode)
if (dest is None):
dest = camel_case(cls.__name__)
parser.add_arguments(cls, dest=dest, default=default)
if (arguments is None):
if parse_known_args:
args = parser.parse_known_args(attempt_to_reorder=attempt_to_reorder)
else:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
if parse_known_args:
(args, unknown_args) = parser.parse_known_args(splits, attempt_to_reorder=attempt_to_reorder)
else:
args = parser.parse_args(splits)
assert hasattr(args, dest), f"attribute '{dest}' not found in args {args}"
instance: Dataclass = getattr(args, dest)
delattr(args, dest)
args_dict = vars(args).copy()
args_dict.pop('subgroups', None)
assert (not args_dict), f'Namespace has leftover garbage values (besides subgroups): {args}'
instance = cast(Dataclass, instance)
return instance
| 5,784,539,640,100,981,000
|
Basic setup for a test.
Keyword Arguments:
arguments {Optional[str]} -- The arguments to pass to the parser (default: {""})
dest {Optional[str]} -- the attribute where the argument should be stored. (default: {None})
Returns:
{cls}} -- the class's type.
|
test/testutils.py
|
setup
|
idoby/SimpleParsing
|
python
|
@classmethod
def setup(cls: Type[Dataclass], arguments: Optional[str]=, dest: Optional[str]=None, default: Optional[Dataclass]=None, conflict_resolution_mode: ConflictResolution=ConflictResolution.AUTO, add_option_string_dash_variants: DashVariant=DashVariant.AUTO, parse_known_args: bool=False, attempt_to_reorder: bool=False, *, argument_generation_mode: ArgumentGenerationMode=ArgumentGenerationMode.FLAT, nested_mode: NestedMode=NestedMode.DEFAULT) -> Dataclass:
'Basic setup for a test.\n\n Keyword Arguments:\n arguments {Optional[str]} -- The arguments to pass to the parser (default: {})\n dest {Optional[str]} -- the attribute where the argument should be stored. (default: {None})\n\n Returns:\n {cls}} -- the class\'s type.\n '
parser = simple_parsing.ArgumentParser(conflict_resolution=conflict_resolution_mode, add_option_string_dash_variants=add_option_string_dash_variants, argument_generation_mode=argument_generation_mode, nested_mode=nested_mode)
if (dest is None):
dest = camel_case(cls.__name__)
parser.add_arguments(cls, dest=dest, default=default)
if (arguments is None):
if parse_known_args:
args = parser.parse_known_args(attempt_to_reorder=attempt_to_reorder)
else:
args = parser.parse_args()
else:
splits = shlex.split(arguments)
if parse_known_args:
(args, unknown_args) = parser.parse_known_args(splits, attempt_to_reorder=attempt_to_reorder)
else:
args = parser.parse_args(splits)
assert hasattr(args, dest), f"attribute '{dest}' not found in args {args}"
instance: Dataclass = getattr(args, dest)
delattr(args, dest)
args_dict = vars(args).copy()
args_dict.pop('subgroups', None)
assert (not args_dict), f'Namespace has leftover garbage values (besides subgroups): {args}'
instance = cast(Dataclass, instance)
return instance
|
@classmethod
def required_components(cls) -> List[Type]:
'Components that should be included in the pipeline before this component.'
return [Featurizer]
| -3,653,919,952,852,145,000
|
Components that should be included in the pipeline before this component.
|
rasa/nlu/classifiers/diet_classifier.py
|
required_components
|
Adarshsng/rasa
|
python
|
@classmethod
def required_components(cls) -> List[Type]:
return [Featurizer]
|
@staticmethod
def get_default_config() -> Dict[(Text, Any)]:
"The component's default config (see parent class for full docstring)."
return {HIDDEN_LAYERS_SIZES: {TEXT: [], LABEL: []}, SHARE_HIDDEN_LAYERS: False, TRANSFORMER_SIZE: DEFAULT_TRANSFORMER_SIZE, NUM_TRANSFORMER_LAYERS: 2, NUM_HEADS: 4, KEY_RELATIVE_ATTENTION: False, VALUE_RELATIVE_ATTENTION: False, MAX_RELATIVE_POSITION: 5, UNIDIRECTIONAL_ENCODER: False, BATCH_SIZES: [64, 256], BATCH_STRATEGY: BALANCED, EPOCHS: 300, RANDOM_SEED: None, LEARNING_RATE: 0.001, EMBEDDING_DIMENSION: 20, DENSE_DIMENSION: {TEXT: 128, LABEL: 20}, CONCAT_DIMENSION: {TEXT: 128, LABEL: 20}, NUM_NEG: 20, SIMILARITY_TYPE: AUTO, LOSS_TYPE: CROSS_ENTROPY, RANKING_LENGTH: LABEL_RANKING_LENGTH, MAX_POS_SIM: 0.8, MAX_NEG_SIM: (- 0.4), USE_MAX_NEG_SIM: True, SCALE_LOSS: False, REGULARIZATION_CONSTANT: 0.002, NEGATIVE_MARGIN_SCALE: 0.8, DROP_RATE: 0.2, DROP_RATE_ATTENTION: 0, CONNECTION_DENSITY: 0.2, SPARSE_INPUT_DROPOUT: True, DENSE_INPUT_DROPOUT: True, EVAL_NUM_EPOCHS: 20, EVAL_NUM_EXAMPLES: 0, INTENT_CLASSIFICATION: True, ENTITY_RECOGNITION: True, MASKED_LM: False, BILOU_FLAG: True, TENSORBOARD_LOG_DIR: None, TENSORBOARD_LOG_LEVEL: 'epoch', CHECKPOINT_MODEL: False, FEATURIZERS: [], SPLIT_ENTITIES_BY_COMMA: True, CONSTRAIN_SIMILARITIES: False, MODEL_CONFIDENCE: SOFTMAX, RENORMALIZE_CONFIDENCES: False}
| -3,262,418,372,835,260,400
|
The component's default config (see parent class for full docstring).
|
rasa/nlu/classifiers/diet_classifier.py
|
get_default_config
|
Adarshsng/rasa
|
python
|
@staticmethod
def get_default_config() -> Dict[(Text, Any)]:
return {HIDDEN_LAYERS_SIZES: {TEXT: [], LABEL: []}, SHARE_HIDDEN_LAYERS: False, TRANSFORMER_SIZE: DEFAULT_TRANSFORMER_SIZE, NUM_TRANSFORMER_LAYERS: 2, NUM_HEADS: 4, KEY_RELATIVE_ATTENTION: False, VALUE_RELATIVE_ATTENTION: False, MAX_RELATIVE_POSITION: 5, UNIDIRECTIONAL_ENCODER: False, BATCH_SIZES: [64, 256], BATCH_STRATEGY: BALANCED, EPOCHS: 300, RANDOM_SEED: None, LEARNING_RATE: 0.001, EMBEDDING_DIMENSION: 20, DENSE_DIMENSION: {TEXT: 128, LABEL: 20}, CONCAT_DIMENSION: {TEXT: 128, LABEL: 20}, NUM_NEG: 20, SIMILARITY_TYPE: AUTO, LOSS_TYPE: CROSS_ENTROPY, RANKING_LENGTH: LABEL_RANKING_LENGTH, MAX_POS_SIM: 0.8, MAX_NEG_SIM: (- 0.4), USE_MAX_NEG_SIM: True, SCALE_LOSS: False, REGULARIZATION_CONSTANT: 0.002, NEGATIVE_MARGIN_SCALE: 0.8, DROP_RATE: 0.2, DROP_RATE_ATTENTION: 0, CONNECTION_DENSITY: 0.2, SPARSE_INPUT_DROPOUT: True, DENSE_INPUT_DROPOUT: True, EVAL_NUM_EPOCHS: 20, EVAL_NUM_EXAMPLES: 0, INTENT_CLASSIFICATION: True, ENTITY_RECOGNITION: True, MASKED_LM: False, BILOU_FLAG: True, TENSORBOARD_LOG_DIR: None, TENSORBOARD_LOG_LEVEL: 'epoch', CHECKPOINT_MODEL: False, FEATURIZERS: [], SPLIT_ENTITIES_BY_COMMA: True, CONSTRAIN_SIMILARITIES: False, MODEL_CONFIDENCE: SOFTMAX, RENORMALIZE_CONFIDENCES: False}
|
def __init__(self, config: Dict[(Text, Any)], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, index_label_id_mapping: Optional[Dict[(int, Text)]]=None, entity_tag_specs: Optional[List[EntityTagSpec]]=None, model: Optional[RasaModel]=None, sparse_feature_sizes: Optional[Dict[(Text, Dict[(Text, List[int])])]]=None) -> None:
'Declare instance variables with default values.'
if (EPOCHS not in config):
rasa.shared.utils.io.raise_warning(f"Please configure the number of '{EPOCHS}' in your configuration file. We will change the default value of '{EPOCHS}' in the future to 1. ")
self.component_config = config
self._model_storage = model_storage
self._resource = resource
self._execution_context = execution_context
self._check_config_parameters()
self.index_label_id_mapping = (index_label_id_mapping or {})
self._entity_tag_specs = entity_tag_specs
self.model = model
self.tmp_checkpoint_dir = None
if self.component_config[CHECKPOINT_MODEL]:
self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())
self._label_data: Optional[RasaModelData] = None
self._data_example: Optional[Dict[(Text, Dict[(Text, List[FeatureArray])])]] = None
self.split_entities_config = rasa.utils.train_utils.init_split_entities(self.component_config[SPLIT_ENTITIES_BY_COMMA], SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE)
self.finetune_mode = self._execution_context.is_finetuning
self._sparse_feature_sizes = sparse_feature_sizes
| -106,568,300,991,616,510
|
Declare instance variables with default values.
|
rasa/nlu/classifiers/diet_classifier.py
|
__init__
|
Adarshsng/rasa
|
python
|
def __init__(self, config: Dict[(Text, Any)], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, index_label_id_mapping: Optional[Dict[(int, Text)]]=None, entity_tag_specs: Optional[List[EntityTagSpec]]=None, model: Optional[RasaModel]=None, sparse_feature_sizes: Optional[Dict[(Text, Dict[(Text, List[int])])]]=None) -> None:
if (EPOCHS not in config):
rasa.shared.utils.io.raise_warning(f"Please configure the number of '{EPOCHS}' in your configuration file. We will change the default value of '{EPOCHS}' in the future to 1. ")
self.component_config = config
self._model_storage = model_storage
self._resource = resource
self._execution_context = execution_context
self._check_config_parameters()
self.index_label_id_mapping = (index_label_id_mapping or {})
self._entity_tag_specs = entity_tag_specs
self.model = model
self.tmp_checkpoint_dir = None
if self.component_config[CHECKPOINT_MODEL]:
self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())
self._label_data: Optional[RasaModelData] = None
self._data_example: Optional[Dict[(Text, Dict[(Text, List[FeatureArray])])]] = None
self.split_entities_config = rasa.utils.train_utils.init_split_entities(self.component_config[SPLIT_ENTITIES_BY_COMMA], SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE)
self.finetune_mode = self._execution_context.is_finetuning
self._sparse_feature_sizes = sparse_feature_sizes
|
@classmethod
def create(cls, config: Dict[(Text, Any)], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext) -> DIETClassifier:
'Creates a new untrained component (see parent class for full docstring).'
return cls(config, model_storage, resource, execution_context)
| 3,003,322,216,744,029,000
|
Creates a new untrained component (see parent class for full docstring).
|
rasa/nlu/classifiers/diet_classifier.py
|
create
|
Adarshsng/rasa
|
python
|
@classmethod
def create(cls, config: Dict[(Text, Any)], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext) -> DIETClassifier:
return cls(config, model_storage, resource, execution_context)
|
@property
def label_key(self) -> Optional[Text]:
'Return key if intent classification is activated.'
return (LABEL_KEY if self.component_config[INTENT_CLASSIFICATION] else None)
| 1,831,871,081,288,990,200
|
Return key if intent classification is activated.
|
rasa/nlu/classifiers/diet_classifier.py
|
label_key
|
Adarshsng/rasa
|
python
|
@property
def label_key(self) -> Optional[Text]:
return (LABEL_KEY if self.component_config[INTENT_CLASSIFICATION] else None)
|
@property
def label_sub_key(self) -> Optional[Text]:
'Return sub key if intent classification is activated.'
return (LABEL_SUB_KEY if self.component_config[INTENT_CLASSIFICATION] else None)
| 7,158,035,865,042,439,000
|
Return sub key if intent classification is activated.
|
rasa/nlu/classifiers/diet_classifier.py
|
label_sub_key
|
Adarshsng/rasa
|
python
|
@property
def label_sub_key(self) -> Optional[Text]:
return (LABEL_SUB_KEY if self.component_config[INTENT_CLASSIFICATION] else None)
|
@staticmethod
def _label_id_index_mapping(training_data: TrainingData, attribute: Text) -> Dict[(Text, int)]:
'Create label_id dictionary.'
distinct_label_ids = ({example.get(attribute) for example in training_data.intent_examples} - {None})
return {label_id: idx for (idx, label_id) in enumerate(sorted(distinct_label_ids))}
| 4,189,876,990,410,381,300
|
Create label_id dictionary.
|
rasa/nlu/classifiers/diet_classifier.py
|
_label_id_index_mapping
|
Adarshsng/rasa
|
python
|
@staticmethod
def _label_id_index_mapping(training_data: TrainingData, attribute: Text) -> Dict[(Text, int)]:
distinct_label_ids = ({example.get(attribute) for example in training_data.intent_examples} - {None})
return {label_id: idx for (idx, label_id) in enumerate(sorted(distinct_label_ids))}
|
def _create_entity_tag_specs(self, training_data: TrainingData) -> List[EntityTagSpec]:
'Create entity tag specifications with their respective tag id mappings.'
_tag_specs = []
for tag_name in POSSIBLE_TAGS:
if self.component_config[BILOU_FLAG]:
tag_id_index_mapping = bilou_utils.build_tag_id_dict(training_data, tag_name)
else:
tag_id_index_mapping = self._tag_id_index_mapping_for(tag_name, training_data)
if tag_id_index_mapping:
_tag_specs.append(EntityTagSpec(tag_name=tag_name, tags_to_ids=tag_id_index_mapping, ids_to_tags=self._invert_mapping(tag_id_index_mapping), num_tags=len(tag_id_index_mapping)))
return _tag_specs
| -2,915,100,119,132,239,000
|
Create entity tag specifications with their respective tag id mappings.
|
rasa/nlu/classifiers/diet_classifier.py
|
_create_entity_tag_specs
|
Adarshsng/rasa
|
python
|
def _create_entity_tag_specs(self, training_data: TrainingData) -> List[EntityTagSpec]:
_tag_specs = []
for tag_name in POSSIBLE_TAGS:
if self.component_config[BILOU_FLAG]:
tag_id_index_mapping = bilou_utils.build_tag_id_dict(training_data, tag_name)
else:
tag_id_index_mapping = self._tag_id_index_mapping_for(tag_name, training_data)
if tag_id_index_mapping:
_tag_specs.append(EntityTagSpec(tag_name=tag_name, tags_to_ids=tag_id_index_mapping, ids_to_tags=self._invert_mapping(tag_id_index_mapping), num_tags=len(tag_id_index_mapping)))
return _tag_specs
|
@staticmethod
def _tag_id_index_mapping_for(tag_name: Text, training_data: TrainingData) -> Optional[Dict[(Text, int)]]:
'Create mapping from tag name to id.'
if (tag_name == ENTITY_ATTRIBUTE_ROLE):
distinct_tags = training_data.entity_roles
elif (tag_name == ENTITY_ATTRIBUTE_GROUP):
distinct_tags = training_data.entity_groups
else:
distinct_tags = training_data.entities
distinct_tags = ((distinct_tags - {NO_ENTITY_TAG}) - {None})
if (not distinct_tags):
return None
tag_id_dict = {tag_id: idx for (idx, tag_id) in enumerate(sorted(distinct_tags), 1)}
tag_id_dict[NO_ENTITY_TAG] = 0
return tag_id_dict
| -7,342,428,663,700,129,000
|
Create mapping from tag name to id.
|
rasa/nlu/classifiers/diet_classifier.py
|
_tag_id_index_mapping_for
|
Adarshsng/rasa
|
python
|
@staticmethod
def _tag_id_index_mapping_for(tag_name: Text, training_data: TrainingData) -> Optional[Dict[(Text, int)]]:
if (tag_name == ENTITY_ATTRIBUTE_ROLE):
distinct_tags = training_data.entity_roles
elif (tag_name == ENTITY_ATTRIBUTE_GROUP):
distinct_tags = training_data.entity_groups
else:
distinct_tags = training_data.entities
distinct_tags = ((distinct_tags - {NO_ENTITY_TAG}) - {None})
if (not distinct_tags):
return None
tag_id_dict = {tag_id: idx for (idx, tag_id) in enumerate(sorted(distinct_tags), 1)}
tag_id_dict[NO_ENTITY_TAG] = 0
return tag_id_dict
|
def _check_labels_features_exist(self, labels_example: List[Message], attribute: Text) -> bool:
'Checks if all labels have features set.'
return all((label_example.features_present(attribute, self.component_config[FEATURIZERS]) for label_example in labels_example))
| 7,485,533,832,181,726,000
|
Checks if all labels have features set.
|
rasa/nlu/classifiers/diet_classifier.py
|
_check_labels_features_exist
|
Adarshsng/rasa
|
python
|
def _check_labels_features_exist(self, labels_example: List[Message], attribute: Text) -> bool:
return all((label_example.features_present(attribute, self.component_config[FEATURIZERS]) for label_example in labels_example))
|
def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None:
'Checks if features have same dimensionality if hidden layers are shared.'
if self.component_config.get(SHARE_HIDDEN_LAYERS):
num_text_sentence_features = model_data.number_of_units(TEXT, SENTENCE)
num_label_sentence_features = model_data.number_of_units(LABEL, SENTENCE)
num_text_sequence_features = model_data.number_of_units(TEXT, SEQUENCE)
num_label_sequence_features = model_data.number_of_units(LABEL, SEQUENCE)
if ((0 < num_text_sentence_features != num_label_sentence_features > 0) or (0 < num_text_sequence_features != num_label_sequence_features > 0)):
raise ValueError('If embeddings are shared text features and label features must coincide. Check the output dimensions of previous components.')
| -2,929,522,057,481,884,700
|
Checks if features have same dimensionality if hidden layers are shared.
|
rasa/nlu/classifiers/diet_classifier.py
|
_check_input_dimension_consistency
|
Adarshsng/rasa
|
python
|
def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None:
if self.component_config.get(SHARE_HIDDEN_LAYERS):
num_text_sentence_features = model_data.number_of_units(TEXT, SENTENCE)
num_label_sentence_features = model_data.number_of_units(LABEL, SENTENCE)
num_text_sequence_features = model_data.number_of_units(TEXT, SEQUENCE)
num_label_sequence_features = model_data.number_of_units(LABEL, SEQUENCE)
if ((0 < num_text_sentence_features != num_label_sentence_features > 0) or (0 < num_text_sequence_features != num_label_sequence_features > 0)):
raise ValueError('If embeddings are shared text features and label features must coincide. Check the output dimensions of previous components.')
|
def _extract_labels_precomputed_features(self, label_examples: List[Message], attribute: Text=INTENT) -> Tuple[(List[FeatureArray], List[FeatureArray])]:
'Collects precomputed encodings.'
features = defaultdict(list)
for e in label_examples:
label_features = self._extract_features(e, attribute)
for (feature_key, feature_value) in label_features.items():
features[feature_key].append(feature_value)
sequence_features = []
sentence_features = []
for (feature_name, feature_value) in features.items():
if (SEQUENCE in feature_name):
sequence_features.append(FeatureArray(np.array(feature_value), number_of_dimensions=3))
else:
sentence_features.append(FeatureArray(np.array(feature_value), number_of_dimensions=3))
return (sequence_features, sentence_features)
| -1,011,031,399,489,427,300
|
Collects precomputed encodings.
|
rasa/nlu/classifiers/diet_classifier.py
|
_extract_labels_precomputed_features
|
Adarshsng/rasa
|
python
|
def _extract_labels_precomputed_features(self, label_examples: List[Message], attribute: Text=INTENT) -> Tuple[(List[FeatureArray], List[FeatureArray])]:
features = defaultdict(list)
for e in label_examples:
label_features = self._extract_features(e, attribute)
for (feature_key, feature_value) in label_features.items():
features[feature_key].append(feature_value)
sequence_features = []
sentence_features = []
for (feature_name, feature_value) in features.items():
if (SEQUENCE in feature_name):
sequence_features.append(FeatureArray(np.array(feature_value), number_of_dimensions=3))
else:
sentence_features.append(FeatureArray(np.array(feature_value), number_of_dimensions=3))
return (sequence_features, sentence_features)
|
@staticmethod
def _compute_default_label_features(labels_example: List[Message]) -> List[FeatureArray]:
'Computes one-hot representation for the labels.'
logger.debug('No label features found. Computing default label features.')
eye_matrix = np.eye(len(labels_example), dtype=np.float32)
return [FeatureArray(np.array([np.expand_dims(a, 0) for a in eye_matrix]), number_of_dimensions=3)]
| 8,457,130,274,428,411,000
|
Computes one-hot representation for the labels.
|
rasa/nlu/classifiers/diet_classifier.py
|
_compute_default_label_features
|
Adarshsng/rasa
|
python
|
@staticmethod
def _compute_default_label_features(labels_example: List[Message]) -> List[FeatureArray]:
logger.debug('No label features found. Computing default label features.')
eye_matrix = np.eye(len(labels_example), dtype=np.float32)
return [FeatureArray(np.array([np.expand_dims(a, 0) for a in eye_matrix]), number_of_dimensions=3)]
|
def _create_label_data(self, training_data: TrainingData, label_id_dict: Dict[(Text, int)], attribute: Text) -> RasaModelData:
'Create matrix with label_ids encoded in rows as bag of words.\n\n Find a training example for each label and get the encoded features\n from the corresponding Message object.\n If the features are already computed, fetch them from the message object\n else compute a one hot encoding for the label as the feature vector.\n '
labels_idx_examples = []
for (label_name, idx) in label_id_dict.items():
label_example = self._find_example_for_label(label_name, training_data.intent_examples, attribute)
labels_idx_examples.append((idx, label_example))
labels_idx_examples = sorted(labels_idx_examples, key=(lambda x: x[0]))
labels_example = [example for (_, example) in labels_idx_examples]
if self._check_labels_features_exist(labels_example, attribute):
(sequence_features, sentence_features) = self._extract_labels_precomputed_features(labels_example, attribute)
else:
sequence_features = None
sentence_features = self._compute_default_label_features(labels_example)
label_data = RasaModelData()
label_data.add_features(LABEL, SEQUENCE, sequence_features)
label_data.add_features(LABEL, SENTENCE, sentence_features)
if (label_data.does_feature_not_exist(LABEL, SENTENCE) and label_data.does_feature_not_exist(LABEL, SEQUENCE)):
raise ValueError('No label features are present. Please check your configuration file.')
label_ids = np.array([idx for (idx, _) in labels_idx_examples])
label_data.add_features(LABEL_KEY, LABEL_SUB_KEY, [FeatureArray(np.expand_dims(label_ids, (- 1)), number_of_dimensions=2)])
label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
return label_data
| -1,096,988,557,676,506,600
|
Create matrix with label_ids encoded in rows as bag of words.
Find a training example for each label and get the encoded features
from the corresponding Message object.
If the features are already computed, fetch them from the message object
else compute a one hot encoding for the label as the feature vector.
|
rasa/nlu/classifiers/diet_classifier.py
|
_create_label_data
|
Adarshsng/rasa
|
python
|
def _create_label_data(self, training_data: TrainingData, label_id_dict: Dict[(Text, int)], attribute: Text) -> RasaModelData:
'Create matrix with label_ids encoded in rows as bag of words.\n\n Find a training example for each label and get the encoded features\n from the corresponding Message object.\n If the features are already computed, fetch them from the message object\n else compute a one hot encoding for the label as the feature vector.\n '
labels_idx_examples = []
for (label_name, idx) in label_id_dict.items():
label_example = self._find_example_for_label(label_name, training_data.intent_examples, attribute)
labels_idx_examples.append((idx, label_example))
labels_idx_examples = sorted(labels_idx_examples, key=(lambda x: x[0]))
labels_example = [example for (_, example) in labels_idx_examples]
if self._check_labels_features_exist(labels_example, attribute):
(sequence_features, sentence_features) = self._extract_labels_precomputed_features(labels_example, attribute)
else:
sequence_features = None
sentence_features = self._compute_default_label_features(labels_example)
label_data = RasaModelData()
label_data.add_features(LABEL, SEQUENCE, sequence_features)
label_data.add_features(LABEL, SENTENCE, sentence_features)
if (label_data.does_feature_not_exist(LABEL, SENTENCE) and label_data.does_feature_not_exist(LABEL, SEQUENCE)):
raise ValueError('No label features are present. Please check your configuration file.')
label_ids = np.array([idx for (idx, _) in labels_idx_examples])
label_data.add_features(LABEL_KEY, LABEL_SUB_KEY, [FeatureArray(np.expand_dims(label_ids, (- 1)), number_of_dimensions=2)])
label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
return label_data
|
def _create_model_data(self, training_data: List[Message], label_id_dict: Optional[Dict[(Text, int)]]=None, label_attribute: Optional[Text]=None, training: bool=True) -> RasaModelData:
'Prepare data for training and create a RasaModelData object.'
from rasa.utils.tensorflow import model_data_utils
attributes_to_consider = [TEXT]
if (training and self.component_config[INTENT_CLASSIFICATION]):
attributes_to_consider.append(label_attribute)
if (training and self.component_config[ENTITY_RECOGNITION] and self._entity_tag_specs):
attributes_to_consider.append(ENTITIES)
if (training and (label_attribute is not None)):
training_data = [example for example in training_data if (label_attribute in example.data)]
training_data = [message for message in training_data if message.features_present(attribute=TEXT, featurizers=self.component_config.get(FEATURIZERS))]
if (not training_data):
return RasaModelData()
(features_for_examples, sparse_feature_sizes) = model_data_utils.featurize_training_examples(training_data, attributes_to_consider, entity_tag_specs=self._entity_tag_specs, featurizers=self.component_config[FEATURIZERS], bilou_tagging=self.component_config[BILOU_FLAG])
(attribute_data, _) = model_data_utils.convert_to_data_format(features_for_examples, consider_dialogue_dimension=False)
model_data = RasaModelData(label_key=self.label_key, label_sub_key=self.label_sub_key)
model_data.add_data(attribute_data)
model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)
sparse_feature_sizes = self._remove_label_sparse_feature_sizes(sparse_feature_sizes=sparse_feature_sizes, label_attribute=label_attribute)
model_data.add_sparse_feature_sizes(sparse_feature_sizes)
self._add_label_features(model_data, training_data, label_attribute, label_id_dict, training)
model_data.sort()
return model_data
| 1,932,037,351,382,631,700
|
Prepare data for training and create a RasaModelData object.
|
rasa/nlu/classifiers/diet_classifier.py
|
_create_model_data
|
Adarshsng/rasa
|
python
|
def _create_model_data(self, training_data: List[Message], label_id_dict: Optional[Dict[(Text, int)]]=None, label_attribute: Optional[Text]=None, training: bool=True) -> RasaModelData:
from rasa.utils.tensorflow import model_data_utils
attributes_to_consider = [TEXT]
if (training and self.component_config[INTENT_CLASSIFICATION]):
attributes_to_consider.append(label_attribute)
if (training and self.component_config[ENTITY_RECOGNITION] and self._entity_tag_specs):
attributes_to_consider.append(ENTITIES)
if (training and (label_attribute is not None)):
training_data = [example for example in training_data if (label_attribute in example.data)]
training_data = [message for message in training_data if message.features_present(attribute=TEXT, featurizers=self.component_config.get(FEATURIZERS))]
if (not training_data):
return RasaModelData()
(features_for_examples, sparse_feature_sizes) = model_data_utils.featurize_training_examples(training_data, attributes_to_consider, entity_tag_specs=self._entity_tag_specs, featurizers=self.component_config[FEATURIZERS], bilou_tagging=self.component_config[BILOU_FLAG])
(attribute_data, _) = model_data_utils.convert_to_data_format(features_for_examples, consider_dialogue_dimension=False)
model_data = RasaModelData(label_key=self.label_key, label_sub_key=self.label_sub_key)
model_data.add_data(attribute_data)
model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)
sparse_feature_sizes = self._remove_label_sparse_feature_sizes(sparse_feature_sizes=sparse_feature_sizes, label_attribute=label_attribute)
model_data.add_sparse_feature_sizes(sparse_feature_sizes)
self._add_label_features(model_data, training_data, label_attribute, label_id_dict, training)
model_data.sort()
return model_data
|
def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:
'Prepares data for training.\n\n Performs sanity checks on training data, extracts encodings for labels.\n '
if self.component_config[BILOU_FLAG]:
bilou_utils.apply_bilou_schema(training_data)
label_id_index_mapping = self._label_id_index_mapping(training_data, attribute=INTENT)
if (not label_id_index_mapping):
return RasaModelData()
self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)
self._label_data = self._create_label_data(training_data, label_id_index_mapping, attribute=INTENT)
self._entity_tag_specs = self._create_entity_tag_specs(training_data)
label_attribute = (INTENT if self.component_config[INTENT_CLASSIFICATION] else None)
model_data = self._create_model_data(training_data.nlu_examples, label_id_index_mapping, label_attribute=label_attribute)
self._check_input_dimension_consistency(model_data)
return model_data
| 557,687,017,003,223,200
|
Prepares data for training.
Performs sanity checks on training data, extracts encodings for labels.
|
rasa/nlu/classifiers/diet_classifier.py
|
preprocess_train_data
|
Adarshsng/rasa
|
python
|
def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:
'Prepares data for training.\n\n Performs sanity checks on training data, extracts encodings for labels.\n '
if self.component_config[BILOU_FLAG]:
bilou_utils.apply_bilou_schema(training_data)
label_id_index_mapping = self._label_id_index_mapping(training_data, attribute=INTENT)
if (not label_id_index_mapping):
return RasaModelData()
self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)
self._label_data = self._create_label_data(training_data, label_id_index_mapping, attribute=INTENT)
self._entity_tag_specs = self._create_entity_tag_specs(training_data)
label_attribute = (INTENT if self.component_config[INTENT_CLASSIFICATION] else None)
model_data = self._create_model_data(training_data.nlu_examples, label_id_index_mapping, label_attribute=label_attribute)
self._check_input_dimension_consistency(model_data)
return model_data
|
def train(self, training_data: TrainingData) -> Resource:
'Train the embedding intent classifier on a data set.'
model_data = self.preprocess_train_data(training_data)
if model_data.is_empty():
logger.debug(f"Cannot train '{self.__class__.__name__}'. No data was provided. Skipping training of the classifier.")
return self._resource
if ((not self.model) and self.finetune_mode):
raise rasa.shared.exceptions.InvalidParameterException(f'{self.__class__.__name__} was instantiated with `model=None` and `finetune_mode=True`. This is not a valid combination as the component needs an already instantiated and trained model to continue training in finetune mode.')
if self.component_config.get(INTENT_CLASSIFICATION):
if (not self._check_enough_labels(model_data)):
logger.error(f"Cannot train '{self.__class__.__name__}'. Need at least 2 different intent classes. Skipping training of classifier.")
return self._resource
if self.component_config.get(ENTITY_RECOGNITION):
self.check_correct_entity_annotations(training_data)
self._data_example = model_data.first_data_example()
if (not self.finetune_mode):
self.model = self._instantiate_model_class(model_data)
self.model.compile(optimizer=tf.keras.optimizers.Adam(self.component_config[LEARNING_RATE]))
else:
self.model.adjust_for_incremental_training(data_example=self._data_example, new_sparse_feature_sizes=model_data.get_sparse_feature_sizes(), old_sparse_feature_sizes=self._sparse_feature_sizes)
self._sparse_feature_sizes = model_data.get_sparse_feature_sizes()
(data_generator, validation_data_generator) = train_utils.create_data_generators(model_data, self.component_config[BATCH_SIZES], self.component_config[EPOCHS], self.component_config[BATCH_STRATEGY], self.component_config[EVAL_NUM_EXAMPLES], self.component_config[RANDOM_SEED])
callbacks = train_utils.create_common_callbacks(self.component_config[EPOCHS], self.component_config[TENSORBOARD_LOG_DIR], self.component_config[TENSORBOARD_LOG_LEVEL], self.tmp_checkpoint_dir)
self.model.fit(data_generator, epochs=self.component_config[EPOCHS], validation_data=validation_data_generator, validation_freq=self.component_config[EVAL_NUM_EPOCHS], callbacks=callbacks, verbose=False, shuffle=False)
self.persist()
return self._resource
| -3,389,418,153,500,108,300
|
Train the embedding intent classifier on a data set.
|
rasa/nlu/classifiers/diet_classifier.py
|
train
|
Adarshsng/rasa
|
python
|
def train(self, training_data: TrainingData) -> Resource:
model_data = self.preprocess_train_data(training_data)
if model_data.is_empty():
logger.debug(f"Cannot train '{self.__class__.__name__}'. No data was provided. Skipping training of the classifier.")
return self._resource
if ((not self.model) and self.finetune_mode):
raise rasa.shared.exceptions.InvalidParameterException(f'{self.__class__.__name__} was instantiated with `model=None` and `finetune_mode=True`. This is not a valid combination as the component needs an already instantiated and trained model to continue training in finetune mode.')
if self.component_config.get(INTENT_CLASSIFICATION):
if (not self._check_enough_labels(model_data)):
logger.error(f"Cannot train '{self.__class__.__name__}'. Need at least 2 different intent classes. Skipping training of classifier.")
return self._resource
if self.component_config.get(ENTITY_RECOGNITION):
self.check_correct_entity_annotations(training_data)
self._data_example = model_data.first_data_example()
if (not self.finetune_mode):
self.model = self._instantiate_model_class(model_data)
self.model.compile(optimizer=tf.keras.optimizers.Adam(self.component_config[LEARNING_RATE]))
else:
self.model.adjust_for_incremental_training(data_example=self._data_example, new_sparse_feature_sizes=model_data.get_sparse_feature_sizes(), old_sparse_feature_sizes=self._sparse_feature_sizes)
self._sparse_feature_sizes = model_data.get_sparse_feature_sizes()
(data_generator, validation_data_generator) = train_utils.create_data_generators(model_data, self.component_config[BATCH_SIZES], self.component_config[EPOCHS], self.component_config[BATCH_STRATEGY], self.component_config[EVAL_NUM_EXAMPLES], self.component_config[RANDOM_SEED])
callbacks = train_utils.create_common_callbacks(self.component_config[EPOCHS], self.component_config[TENSORBOARD_LOG_DIR], self.component_config[TENSORBOARD_LOG_LEVEL], self.tmp_checkpoint_dir)
self.model.fit(data_generator, epochs=self.component_config[EPOCHS], validation_data=validation_data_generator, validation_freq=self.component_config[EVAL_NUM_EPOCHS], callbacks=callbacks, verbose=False, shuffle=False)
self.persist()
return self._resource
|
def _predict_label(self, predict_out: Optional[Dict[(Text, tf.Tensor)]]) -> Tuple[(Dict[(Text, Any)], List[Dict[(Text, Any)]])]:
'Predicts the intent of the provided message.'
label: Dict[(Text, Any)] = {'name': None, 'confidence': 0.0}
label_ranking = []
if (predict_out is None):
return (label, label_ranking)
message_sim = predict_out['i_scores']
message_sim = message_sim.flatten()
if (message_sim.size == 0):
return (label, label_ranking)
ranking_length = self.component_config[RANKING_LENGTH]
renormalize = (self.component_config[RENORMALIZE_CONFIDENCES] and (self.component_config[MODEL_CONFIDENCE] == SOFTMAX))
(ranked_label_indices, message_sim) = train_utils.rank_and_mask(message_sim, ranking_length=ranking_length, renormalize=renormalize)
casted_message_sim: List[float] = message_sim.tolist()
top_label_idx = ranked_label_indices[0]
label = {'name': self.index_label_id_mapping[top_label_idx], 'confidence': casted_message_sim[top_label_idx]}
ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices]
label_ranking = [{'name': self.index_label_id_mapping[label_idx], 'confidence': score} for (label_idx, score) in ranking]
return (label, label_ranking)
| 5,663,114,333,651,413,000
|
Predicts the intent of the provided message.
|
rasa/nlu/classifiers/diet_classifier.py
|
_predict_label
|
Adarshsng/rasa
|
python
|
def _predict_label(self, predict_out: Optional[Dict[(Text, tf.Tensor)]]) -> Tuple[(Dict[(Text, Any)], List[Dict[(Text, Any)]])]:
label: Dict[(Text, Any)] = {'name': None, 'confidence': 0.0}
label_ranking = []
if (predict_out is None):
return (label, label_ranking)
message_sim = predict_out['i_scores']
message_sim = message_sim.flatten()
if (message_sim.size == 0):
return (label, label_ranking)
ranking_length = self.component_config[RANKING_LENGTH]
renormalize = (self.component_config[RENORMALIZE_CONFIDENCES] and (self.component_config[MODEL_CONFIDENCE] == SOFTMAX))
(ranked_label_indices, message_sim) = train_utils.rank_and_mask(message_sim, ranking_length=ranking_length, renormalize=renormalize)
casted_message_sim: List[float] = message_sim.tolist()
top_label_idx = ranked_label_indices[0]
label = {'name': self.index_label_id_mapping[top_label_idx], 'confidence': casted_message_sim[top_label_idx]}
ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices]
label_ranking = [{'name': self.index_label_id_mapping[label_idx], 'confidence': score} for (label_idx, score) in ranking]
return (label, label_ranking)
|
def process(self, messages: List[Message]) -> List[Message]:
'Augments the message with intents, entities, and diagnostic data.'
for message in messages:
out = self._predict(message)
if self.component_config[INTENT_CLASSIFICATION]:
(label, label_ranking) = self._predict_label(out)
message.set(INTENT, label, add_to_output=True)
message.set('intent_ranking', label_ranking, add_to_output=True)
if self.component_config[ENTITY_RECOGNITION]:
entities = self._predict_entities(out, message)
message.set(ENTITIES, entities, add_to_output=True)
if (out and self._execution_context.should_add_diagnostic_data):
message.add_diagnostic_data(self._execution_context.node_name, out.get(DIAGNOSTIC_DATA))
return messages
| 109,263,573,167,095,840
|
Augments the message with intents, entities, and diagnostic data.
|
rasa/nlu/classifiers/diet_classifier.py
|
process
|
Adarshsng/rasa
|
python
|
def process(self, messages: List[Message]) -> List[Message]:
for message in messages:
out = self._predict(message)
if self.component_config[INTENT_CLASSIFICATION]:
(label, label_ranking) = self._predict_label(out)
message.set(INTENT, label, add_to_output=True)
message.set('intent_ranking', label_ranking, add_to_output=True)
if self.component_config[ENTITY_RECOGNITION]:
entities = self._predict_entities(out, message)
message.set(ENTITIES, entities, add_to_output=True)
if (out and self._execution_context.should_add_diagnostic_data):
message.add_diagnostic_data(self._execution_context.node_name, out.get(DIAGNOSTIC_DATA))
return messages
|
def persist(self) -> None:
'Persist this model into the passed directory.'
if (self.model is None):
return None
with self._model_storage.write_to(self._resource) as model_path:
file_name = self.__class__.__name__
tf_model_file = (model_path / f'{file_name}.tf_model')
rasa.shared.utils.io.create_directory_for_file(tf_model_file)
if (self.component_config[CHECKPOINT_MODEL] and self.tmp_checkpoint_dir):
self.model.load_weights((self.tmp_checkpoint_dir / 'checkpoint.tf_model'))
checkpoint_marker = (model_path / f'{file_name}.from_checkpoint.pkl')
checkpoint_marker.touch()
self.model.save(str(tf_model_file))
io_utils.pickle_dump((model_path / f'{file_name}.data_example.pkl'), self._data_example)
io_utils.pickle_dump((model_path / f'{file_name}.sparse_feature_sizes.pkl'), self._sparse_feature_sizes)
io_utils.pickle_dump((model_path / f'{file_name}.label_data.pkl'), dict(self._label_data.data))
io_utils.json_pickle((model_path / f'{file_name}.index_label_id_mapping.json'), self.index_label_id_mapping)
entity_tag_specs = ([tag_spec._asdict() for tag_spec in self._entity_tag_specs] if self._entity_tag_specs else [])
rasa.shared.utils.io.dump_obj_as_json_to_file((model_path / f'{file_name}.entity_tag_specs.json'), entity_tag_specs)
| 211,940,360,593,332,900
|
Persist this model into the passed directory.
|
rasa/nlu/classifiers/diet_classifier.py
|
persist
|
Adarshsng/rasa
|
python
|
def persist(self) -> None:
if (self.model is None):
return None
with self._model_storage.write_to(self._resource) as model_path:
file_name = self.__class__.__name__
tf_model_file = (model_path / f'{file_name}.tf_model')
rasa.shared.utils.io.create_directory_for_file(tf_model_file)
if (self.component_config[CHECKPOINT_MODEL] and self.tmp_checkpoint_dir):
self.model.load_weights((self.tmp_checkpoint_dir / 'checkpoint.tf_model'))
checkpoint_marker = (model_path / f'{file_name}.from_checkpoint.pkl')
checkpoint_marker.touch()
self.model.save(str(tf_model_file))
io_utils.pickle_dump((model_path / f'{file_name}.data_example.pkl'), self._data_example)
io_utils.pickle_dump((model_path / f'{file_name}.sparse_feature_sizes.pkl'), self._sparse_feature_sizes)
io_utils.pickle_dump((model_path / f'{file_name}.label_data.pkl'), dict(self._label_data.data))
io_utils.json_pickle((model_path / f'{file_name}.index_label_id_mapping.json'), self.index_label_id_mapping)
entity_tag_specs = ([tag_spec._asdict() for tag_spec in self._entity_tag_specs] if self._entity_tag_specs else [])
rasa.shared.utils.io.dump_obj_as_json_to_file((model_path / f'{file_name}.entity_tag_specs.json'), entity_tag_specs)
|
@classmethod
def load(cls, config: Dict[(Text, Any)], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, **kwargs: Any) -> DIETClassifier:
'Loads a policy from the storage (see parent class for full docstring).'
try:
with model_storage.read_from(resource) as model_path:
return cls._load(model_path, config, model_storage, resource, execution_context)
except ValueError:
logger.debug(f"Failed to load {cls.__class__.__name__} from model storage. Resource '{resource.name}' doesn't exist.")
return cls(config, model_storage, resource, execution_context)
| 2,106,258,776,940,941,800
|
Loads a policy from the storage (see parent class for full docstring).
|
rasa/nlu/classifiers/diet_classifier.py
|
load
|
Adarshsng/rasa
|
python
|
@classmethod
def load(cls, config: Dict[(Text, Any)], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, **kwargs: Any) -> DIETClassifier:
try:
with model_storage.read_from(resource) as model_path:
return cls._load(model_path, config, model_storage, resource, execution_context)
except ValueError:
logger.debug(f"Failed to load {cls.__class__.__name__} from model storage. Resource '{resource.name}' doesn't exist.")
return cls(config, model_storage, resource, execution_context)
|
@classmethod
def _load(cls, model_path: Path, config: Dict[(Text, Any)], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext) -> 'DIETClassifier':
'Loads the trained model from the provided directory.'
(index_label_id_mapping, entity_tag_specs, label_data, data_example, sparse_feature_sizes) = cls._load_from_files(model_path)
config = train_utils.update_confidence_type(config)
config = train_utils.update_similarity_type(config)
model = cls._load_model(entity_tag_specs, label_data, config, data_example, model_path, finetune_mode=execution_context.is_finetuning)
return cls(config=config, model_storage=model_storage, resource=resource, execution_context=execution_context, index_label_id_mapping=index_label_id_mapping, entity_tag_specs=entity_tag_specs, model=model, sparse_feature_sizes=sparse_feature_sizes)
| 1,848,851,608,624,442,400
|
Loads the trained model from the provided directory.
|
rasa/nlu/classifiers/diet_classifier.py
|
_load
|
Adarshsng/rasa
|
python
|
@classmethod
def _load(cls, model_path: Path, config: Dict[(Text, Any)], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext) -> 'DIETClassifier':
(index_label_id_mapping, entity_tag_specs, label_data, data_example, sparse_feature_sizes) = cls._load_from_files(model_path)
config = train_utils.update_confidence_type(config)
config = train_utils.update_similarity_type(config)
model = cls._load_model(entity_tag_specs, label_data, config, data_example, model_path, finetune_mode=execution_context.is_finetuning)
return cls(config=config, model_storage=model_storage, resource=resource, execution_context=execution_context, index_label_id_mapping=index_label_id_mapping, entity_tag_specs=entity_tag_specs, model=model, sparse_feature_sizes=sparse_feature_sizes)
|
@staticmethod
def _ordered_tag_specs(entity_tag_specs: Optional[List[EntityTagSpec]]) -> List[EntityTagSpec]:
'Ensure that order of entity tag specs matches CRF layer order.'
if (entity_tag_specs is None):
return []
crf_order = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]
ordered_tag_spec = []
for tag_name in crf_order:
for tag_spec in entity_tag_specs:
if (tag_name == tag_spec.tag_name):
ordered_tag_spec.append(tag_spec)
return ordered_tag_spec
| 7,247,348,547,029,321,000
|
Ensure that order of entity tag specs matches CRF layer order.
|
rasa/nlu/classifiers/diet_classifier.py
|
_ordered_tag_specs
|
Adarshsng/rasa
|
python
|
@staticmethod
def _ordered_tag_specs(entity_tag_specs: Optional[List[EntityTagSpec]]) -> List[EntityTagSpec]:
if (entity_tag_specs is None):
return []
crf_order = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]
ordered_tag_spec = []
for tag_name in crf_order:
for tag_spec in entity_tag_specs:
if (tag_name == tag_spec.tag_name):
ordered_tag_spec.append(tag_spec)
return ordered_tag_spec
|
def batch_loss(self, batch_in: Union[(Tuple[tf.Tensor], Tuple[np.ndarray])]) -> tf.Tensor:
'Calculates the loss for the given batch.\n\n Args:\n batch_in: The batch.\n\n Returns:\n The loss of the given batch.\n '
tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)
sequence_feature_lengths = self._get_sequence_feature_lengths(tf_batch_data, TEXT)
(text_transformed, text_in, mask_combined_sequence_sentence, text_seq_ids, mlm_mask_boolean_text, _) = self._tf_layers[f'sequence_layer.{self.text_name}']((tf_batch_data[TEXT][SEQUENCE], tf_batch_data[TEXT][SENTENCE], sequence_feature_lengths), training=self._training)
losses = []
sentence_feature_lengths = self._get_sentence_feature_lengths(tf_batch_data, TEXT)
combined_sequence_sentence_feature_lengths = (sequence_feature_lengths + sentence_feature_lengths)
if self.config[MASKED_LM]:
(loss, acc) = self._mask_loss(text_transformed, text_in, text_seq_ids, mlm_mask_boolean_text, TEXT)
self.mask_loss.update_state(loss)
self.mask_acc.update_state(acc)
losses.append(loss)
if self.config[INTENT_CLASSIFICATION]:
loss = self._batch_loss_intent(combined_sequence_sentence_feature_lengths, text_transformed, tf_batch_data)
losses.append(loss)
if self.config[ENTITY_RECOGNITION]:
losses += self._batch_loss_entities(mask_combined_sequence_sentence, sequence_feature_lengths, text_transformed, tf_batch_data)
return tf.math.add_n(losses)
| 1,193,514,375,686,330,000
|
Calculates the loss for the given batch.
Args:
batch_in: The batch.
Returns:
The loss of the given batch.
|
rasa/nlu/classifiers/diet_classifier.py
|
batch_loss
|
Adarshsng/rasa
|
python
|
def batch_loss(self, batch_in: Union[(Tuple[tf.Tensor], Tuple[np.ndarray])]) -> tf.Tensor:
'Calculates the loss for the given batch.\n\n Args:\n batch_in: The batch.\n\n Returns:\n The loss of the given batch.\n '
tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)
sequence_feature_lengths = self._get_sequence_feature_lengths(tf_batch_data, TEXT)
(text_transformed, text_in, mask_combined_sequence_sentence, text_seq_ids, mlm_mask_boolean_text, _) = self._tf_layers[f'sequence_layer.{self.text_name}']((tf_batch_data[TEXT][SEQUENCE], tf_batch_data[TEXT][SENTENCE], sequence_feature_lengths), training=self._training)
losses = []
sentence_feature_lengths = self._get_sentence_feature_lengths(tf_batch_data, TEXT)
combined_sequence_sentence_feature_lengths = (sequence_feature_lengths + sentence_feature_lengths)
if self.config[MASKED_LM]:
(loss, acc) = self._mask_loss(text_transformed, text_in, text_seq_ids, mlm_mask_boolean_text, TEXT)
self.mask_loss.update_state(loss)
self.mask_acc.update_state(acc)
losses.append(loss)
if self.config[INTENT_CLASSIFICATION]:
loss = self._batch_loss_intent(combined_sequence_sentence_feature_lengths, text_transformed, tf_batch_data)
losses.append(loss)
if self.config[ENTITY_RECOGNITION]:
losses += self._batch_loss_entities(mask_combined_sequence_sentence, sequence_feature_lengths, text_transformed, tf_batch_data)
return tf.math.add_n(losses)
|
def prepare_for_predict(self) -> None:
'Prepares the model for prediction.'
if self.config[INTENT_CLASSIFICATION]:
(_, self.all_labels_embed) = self._create_all_labels()
| 1,332,386,574,597,657,600
|
Prepares the model for prediction.
|
rasa/nlu/classifiers/diet_classifier.py
|
prepare_for_predict
|
Adarshsng/rasa
|
python
|
def prepare_for_predict(self) -> None:
if self.config[INTENT_CLASSIFICATION]:
(_, self.all_labels_embed) = self._create_all_labels()
|
def batch_predict(self, batch_in: Union[(Tuple[tf.Tensor], Tuple[np.ndarray])]) -> Dict[(Text, tf.Tensor)]:
'Predicts the output of the given batch.\n\n Args:\n batch_in: The batch.\n\n Returns:\n The output to predict.\n '
tf_batch_data = self.batch_to_model_data_format(batch_in, self.predict_data_signature)
sequence_feature_lengths = self._get_sequence_feature_lengths(tf_batch_data, TEXT)
sentence_feature_lengths = self._get_sentence_feature_lengths(tf_batch_data, TEXT)
(text_transformed, _, _, _, _, attention_weights) = self._tf_layers[f'sequence_layer.{self.text_name}']((tf_batch_data[TEXT][SEQUENCE], tf_batch_data[TEXT][SENTENCE], sequence_feature_lengths), training=self._training)
predictions = {DIAGNOSTIC_DATA: {'attention_weights': attention_weights, 'text_transformed': text_transformed}}
if self.config[INTENT_CLASSIFICATION]:
predictions.update(self._batch_predict_intents((sequence_feature_lengths + sentence_feature_lengths), text_transformed))
if self.config[ENTITY_RECOGNITION]:
predictions.update(self._batch_predict_entities(sequence_feature_lengths, text_transformed))
return predictions
| 1,617,009,859,647,779,300
|
Predicts the output of the given batch.
Args:
batch_in: The batch.
Returns:
The output to predict.
|
rasa/nlu/classifiers/diet_classifier.py
|
batch_predict
|
Adarshsng/rasa
|
python
|
def batch_predict(self, batch_in: Union[(Tuple[tf.Tensor], Tuple[np.ndarray])]) -> Dict[(Text, tf.Tensor)]:
'Predicts the output of the given batch.\n\n Args:\n batch_in: The batch.\n\n Returns:\n The output to predict.\n '
tf_batch_data = self.batch_to_model_data_format(batch_in, self.predict_data_signature)
sequence_feature_lengths = self._get_sequence_feature_lengths(tf_batch_data, TEXT)
sentence_feature_lengths = self._get_sentence_feature_lengths(tf_batch_data, TEXT)
(text_transformed, _, _, _, _, attention_weights) = self._tf_layers[f'sequence_layer.{self.text_name}']((tf_batch_data[TEXT][SEQUENCE], tf_batch_data[TEXT][SENTENCE], sequence_feature_lengths), training=self._training)
predictions = {DIAGNOSTIC_DATA: {'attention_weights': attention_weights, 'text_transformed': text_transformed}}
if self.config[INTENT_CLASSIFICATION]:
predictions.update(self._batch_predict_intents((sequence_feature_lengths + sentence_feature_lengths), text_transformed))
if self.config[ENTITY_RECOGNITION]:
predictions.update(self._batch_predict_entities(sequence_feature_lengths, text_transformed))
return predictions
|
def _create_dictionary(self, document):
'Creates mapping key = word, value = row index'
words = map(self.normalize_word, document.words)
unique_words = frozenset((self.stem_word(w) for w in words if (w not in self._stop_words)))
return dict(((w, i) for (i, w) in enumerate(unique_words)))
| 2,916,173,340,952,540,000
|
Creates mapping key = word, value = row index
|
util_common/nlp/Sumy/summarizers/lsa.py
|
_create_dictionary
|
Sohone-Guo/Pointer-Generator
|
python
|
def _create_dictionary(self, document):
words = map(self.normalize_word, document.words)
unique_words = frozenset((self.stem_word(w) for w in words if (w not in self._stop_words)))
return dict(((w, i) for (i, w) in enumerate(unique_words)))
|
def _create_matrix(self, document, dictionary):
'\n Creates matrix of shape |unique words|×|sentences| where cells\n contains number of occurences of words (rows) in senteces (cols).\n '
sentences = document.sentences
words_count = len(dictionary)
sentences_count = len(sentences)
if (words_count < sentences_count):
message = 'Number of words (%d) is lower than number of sentences (%d). LSA algorithm may not work properly.'
warn((message % (words_count, sentences_count)))
matrix = numpy.zeros((words_count, sentences_count))
for (col, sentence) in enumerate(sentences):
for word in map(self.stem_word, sentence.words):
if (word in dictionary):
row = dictionary[word]
matrix[(row, col)] += 1
return matrix
| -3,011,865,140,669,539,300
|
Creates matrix of shape |unique words|×|sentences| where cells
contains number of occurences of words (rows) in senteces (cols).
|
util_common/nlp/Sumy/summarizers/lsa.py
|
_create_matrix
|
Sohone-Guo/Pointer-Generator
|
python
|
def _create_matrix(self, document, dictionary):
'\n Creates matrix of shape |unique words|×|sentences| where cells\n contains number of occurences of words (rows) in senteces (cols).\n '
sentences = document.sentences
words_count = len(dictionary)
sentences_count = len(sentences)
if (words_count < sentences_count):
message = 'Number of words (%d) is lower than number of sentences (%d). LSA algorithm may not work properly.'
warn((message % (words_count, sentences_count)))
matrix = numpy.zeros((words_count, sentences_count))
for (col, sentence) in enumerate(sentences):
for word in map(self.stem_word, sentence.words):
if (word in dictionary):
row = dictionary[word]
matrix[(row, col)] += 1
return matrix
|
def _compute_term_frequency(self, matrix, smooth=0.4):
'\n Computes TF metrics for each sentence (column) in the given matrix.\n You can read more about smoothing parameter at URL below:\n http://nlp.stanford.edu/IR-book/html/htmledition/maximum-tf-normalization-1.html\n '
assert (0.0 <= smooth < 1.0)
max_word_frequencies = numpy.max(matrix, axis=0)
(rows, cols) = matrix.shape
for row in range(rows):
for col in range(cols):
max_word_frequency = max_word_frequencies[col]
if (max_word_frequency != 0):
frequency = (matrix[(row, col)] / max_word_frequency)
matrix[(row, col)] = (smooth + ((1.0 - smooth) * frequency))
return matrix
| 1,475,393,266,677,919,200
|
Computes TF metrics for each sentence (column) in the given matrix.
You can read more about smoothing parameter at URL below:
http://nlp.stanford.edu/IR-book/html/htmledition/maximum-tf-normalization-1.html
|
util_common/nlp/Sumy/summarizers/lsa.py
|
_compute_term_frequency
|
Sohone-Guo/Pointer-Generator
|
python
|
def _compute_term_frequency(self, matrix, smooth=0.4):
'\n Computes TF metrics for each sentence (column) in the given matrix.\n You can read more about smoothing parameter at URL below:\n http://nlp.stanford.edu/IR-book/html/htmledition/maximum-tf-normalization-1.html\n '
assert (0.0 <= smooth < 1.0)
max_word_frequencies = numpy.max(matrix, axis=0)
(rows, cols) = matrix.shape
for row in range(rows):
for col in range(cols):
max_word_frequency = max_word_frequencies[col]
if (max_word_frequency != 0):
frequency = (matrix[(row, col)] / max_word_frequency)
matrix[(row, col)] = (smooth + ((1.0 - smooth) * frequency))
return matrix
|
@skipIfReproducer
def test_read_memory(self):
'Test Python SBProcess.ReadMemory() API.'
self.build()
exe = self.getBuildArtifact('a.out')
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation('main.cpp', self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), 'There should be a thread stopped due to breakpoint')
frame = thread.GetFrameAtIndex(0)
val = frame.FindValue('my_char', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
error = lldb.SBError()
self.assertFalse(val.TypeIsPointerType())
content = process.ReadMemory(val.AddressOf().GetValueAsUnsigned(), 1, error)
if (not error.Success()):
self.fail('SBProcess.ReadMemory() failed')
if self.TraceOn():
print('memory content:', content)
self.expect(content, "Result from SBProcess.ReadMemory() matches our expected output: 'x'", exe=False, startstr=b'x')
val = frame.FindValue('my_char_ptr', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
cstring = process.ReadCStringFromMemory(val.GetValueAsUnsigned(), 256, error)
if (not error.Success()):
self.fail('SBProcess.ReadCStringFromMemory() failed')
if self.TraceOn():
print('cstring read is:', cstring)
self.expect(cstring, 'Result from SBProcess.ReadCStringFromMemory() matches our expected output', exe=False, startstr='Does it work?')
val = frame.FindValue('my_cstring', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
self.assertFalse(val.TypeIsPointerType())
cstring = process.ReadCStringFromMemory(val.AddressOf().GetValueAsUnsigned(), 256, error)
if (not error.Success()):
self.fail('SBProcess.ReadCStringFromMemory() failed')
if self.TraceOn():
print('cstring read is:', cstring)
self.expect(cstring, 'Result from SBProcess.ReadCStringFromMemory() matches our expected output', exe=False, startstr='lldb.SBProcess.ReadCStringFromMemory() works!')
val = frame.FindValue('my_uint32', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
self.assertFalse(val.TypeIsPointerType())
my_uint32 = process.ReadUnsignedFromMemory(val.AddressOf().GetValueAsUnsigned(), 4, error)
if (not error.Success()):
self.fail('SBProcess.ReadCStringFromMemory() failed')
if self.TraceOn():
print('uint32 read is:', my_uint32)
if (my_uint32 != 12345):
self.fail('Result from SBProcess.ReadUnsignedFromMemory() does not match our expected output')
| -3,021,106,652,205,118,500
|
Test Python SBProcess.ReadMemory() API.
|
lldb/test/API/python_api/process/TestProcessAPI.py
|
test_read_memory
|
AaronBallman/llvm
|
python
|
@skipIfReproducer
def test_read_memory(self):
self.build()
exe = self.getBuildArtifact('a.out')
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation('main.cpp', self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), 'There should be a thread stopped due to breakpoint')
frame = thread.GetFrameAtIndex(0)
val = frame.FindValue('my_char', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
error = lldb.SBError()
self.assertFalse(val.TypeIsPointerType())
content = process.ReadMemory(val.AddressOf().GetValueAsUnsigned(), 1, error)
if (not error.Success()):
self.fail('SBProcess.ReadMemory() failed')
if self.TraceOn():
print('memory content:', content)
self.expect(content, "Result from SBProcess.ReadMemory() matches our expected output: 'x'", exe=False, startstr=b'x')
val = frame.FindValue('my_char_ptr', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
cstring = process.ReadCStringFromMemory(val.GetValueAsUnsigned(), 256, error)
if (not error.Success()):
self.fail('SBProcess.ReadCStringFromMemory() failed')
if self.TraceOn():
print('cstring read is:', cstring)
self.expect(cstring, 'Result from SBProcess.ReadCStringFromMemory() matches our expected output', exe=False, startstr='Does it work?')
val = frame.FindValue('my_cstring', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
self.assertFalse(val.TypeIsPointerType())
cstring = process.ReadCStringFromMemory(val.AddressOf().GetValueAsUnsigned(), 256, error)
if (not error.Success()):
self.fail('SBProcess.ReadCStringFromMemory() failed')
if self.TraceOn():
print('cstring read is:', cstring)
self.expect(cstring, 'Result from SBProcess.ReadCStringFromMemory() matches our expected output', exe=False, startstr='lldb.SBProcess.ReadCStringFromMemory() works!')
val = frame.FindValue('my_uint32', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
self.assertFalse(val.TypeIsPointerType())
my_uint32 = process.ReadUnsignedFromMemory(val.AddressOf().GetValueAsUnsigned(), 4, error)
if (not error.Success()):
self.fail('SBProcess.ReadCStringFromMemory() failed')
if self.TraceOn():
print('uint32 read is:', my_uint32)
if (my_uint32 != 12345):
self.fail('Result from SBProcess.ReadUnsignedFromMemory() does not match our expected output')
|
@skipIfReproducer
def test_write_memory(self):
'Test Python SBProcess.WriteMemory() API.'
self.build()
exe = self.getBuildArtifact('a.out')
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation('main.cpp', self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), 'There should be a thread stopped due to breakpoint')
frame = thread.GetFrameAtIndex(0)
val = frame.FindValue('my_char', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
if (not val.GetLocation().startswith('0x')):
return
location = int(val.GetLocation(), 16)
error = lldb.SBError()
result = process.WriteMemory(location, 'a', error)
if ((not error.Success()) or (result != 1)):
self.fail('SBProcess.WriteMemory() failed')
content = process.ReadMemory(location, 1, error)
if (not error.Success()):
self.fail('SBProcess.ReadMemory() failed')
if self.TraceOn():
print('memory content:', content)
self.expect(content, "Result from SBProcess.ReadMemory() matches our expected output: 'a'", exe=False, startstr=b'a')
| -1,242,185,819,707,027,200
|
Test Python SBProcess.WriteMemory() API.
|
lldb/test/API/python_api/process/TestProcessAPI.py
|
test_write_memory
|
AaronBallman/llvm
|
python
|
@skipIfReproducer
def test_write_memory(self):
self.build()
exe = self.getBuildArtifact('a.out')
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation('main.cpp', self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), 'There should be a thread stopped due to breakpoint')
frame = thread.GetFrameAtIndex(0)
val = frame.FindValue('my_char', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
if (not val.GetLocation().startswith('0x')):
return
location = int(val.GetLocation(), 16)
error = lldb.SBError()
result = process.WriteMemory(location, 'a', error)
if ((not error.Success()) or (result != 1)):
self.fail('SBProcess.WriteMemory() failed')
content = process.ReadMemory(location, 1, error)
if (not error.Success()):
self.fail('SBProcess.ReadMemory() failed')
if self.TraceOn():
print('memory content:', content)
self.expect(content, "Result from SBProcess.ReadMemory() matches our expected output: 'a'", exe=False, startstr=b'a')
|
@skipIfReproducer
def test_access_my_int(self):
"Test access 'my_int' using Python SBProcess.GetByteOrder() and other APIs."
self.build()
exe = self.getBuildArtifact('a.out')
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation('main.cpp', self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), 'There should be a thread stopped due to breakpoint')
frame = thread.GetFrameAtIndex(0)
val = frame.FindValue('my_int', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
if (not val.GetLocation().startswith('0x')):
return
location = int(val.GetLocation(), 16)
from lldbsuite.test.lldbutil import int_to_bytearray, bytearray_to_int
byteSize = val.GetByteSize()
bytes = int_to_bytearray(256, byteSize)
byteOrder = process.GetByteOrder()
if (byteOrder == lldb.eByteOrderBig):
bytes.reverse()
elif (byteOrder == lldb.eByteOrderLittle):
pass
else:
return
error = lldb.SBError()
result = process.WriteMemory(location, bytes, error)
if ((not error.Success()) or (result != byteSize)):
self.fail('SBProcess.WriteMemory() failed')
self.expect(val.GetValue(), "SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'", exe=False, startstr='256')
val = frame.FindValue('my_int', lldb.eValueTypeVariableGlobal)
self.expect(val.GetValue(), "SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'", exe=False, startstr='256')
content = process.ReadMemory(location, byteSize, error)
if (not error.Success()):
self.fail('SBProcess.ReadMemory() failed')
if (byteOrder == lldb.eByteOrderBig):
content = bytearray(content, 'ascii')
content.reverse()
new_value = bytearray_to_int(content, byteSize)
if (new_value != 256):
self.fail("Memory content read from 'my_int' does not match (int)256")
if self.TraceOn():
for i in content:
print('byte:', i)
| 2,923,101,617,713,456,000
|
Test access 'my_int' using Python SBProcess.GetByteOrder() and other APIs.
|
lldb/test/API/python_api/process/TestProcessAPI.py
|
test_access_my_int
|
AaronBallman/llvm
|
python
|
@skipIfReproducer
def test_access_my_int(self):
self.build()
exe = self.getBuildArtifact('a.out')
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation('main.cpp', self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
self.assertTrue(thread.IsValid(), 'There should be a thread stopped due to breakpoint')
frame = thread.GetFrameAtIndex(0)
val = frame.FindValue('my_int', lldb.eValueTypeVariableGlobal)
self.DebugSBValue(val)
if (not val.GetLocation().startswith('0x')):
return
location = int(val.GetLocation(), 16)
from lldbsuite.test.lldbutil import int_to_bytearray, bytearray_to_int
byteSize = val.GetByteSize()
bytes = int_to_bytearray(256, byteSize)
byteOrder = process.GetByteOrder()
if (byteOrder == lldb.eByteOrderBig):
bytes.reverse()
elif (byteOrder == lldb.eByteOrderLittle):
pass
else:
return
error = lldb.SBError()
result = process.WriteMemory(location, bytes, error)
if ((not error.Success()) or (result != byteSize)):
self.fail('SBProcess.WriteMemory() failed')
self.expect(val.GetValue(), "SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'", exe=False, startstr='256')
val = frame.FindValue('my_int', lldb.eValueTypeVariableGlobal)
self.expect(val.GetValue(), "SBProcess.ReadMemory() successfully writes (int)256 to the memory location for 'my_int'", exe=False, startstr='256')
content = process.ReadMemory(location, byteSize, error)
if (not error.Success()):
self.fail('SBProcess.ReadMemory() failed')
if (byteOrder == lldb.eByteOrderBig):
content = bytearray(content, 'ascii')
content.reverse()
new_value = bytearray_to_int(content, byteSize)
if (new_value != 256):
self.fail("Memory content read from 'my_int' does not match (int)256")
if self.TraceOn():
for i in content:
print('byte:', i)
|
def test_remote_launch(self):
'Test SBProcess.RemoteLaunch() API with a process not in eStateConnected, and it should fail.'
self.build()
exe = self.getBuildArtifact('a.out')
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
if self.TraceOn():
print('process state:', state_type_to_str(process.GetState()))
self.assertTrue((process.GetState() != lldb.eStateConnected))
error = lldb.SBError()
success = process.RemoteLaunch(None, None, None, None, None, None, 0, False, error)
self.assertTrue((not success), 'RemoteLaunch() should fail for process state != eStateConnected')
| 323,723,085,478,817,100
|
Test SBProcess.RemoteLaunch() API with a process not in eStateConnected, and it should fail.
|
lldb/test/API/python_api/process/TestProcessAPI.py
|
test_remote_launch
|
AaronBallman/llvm
|
python
|
def test_remote_launch(self):
self.build()
exe = self.getBuildArtifact('a.out')
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
if self.TraceOn():
print('process state:', state_type_to_str(process.GetState()))
self.assertTrue((process.GetState() != lldb.eStateConnected))
error = lldb.SBError()
success = process.RemoteLaunch(None, None, None, None, None, None, 0, False, error)
self.assertTrue((not success), 'RemoteLaunch() should fail for process state != eStateConnected')
|
def test_get_num_supported_hardware_watchpoints(self):
'Test SBProcess.GetNumSupportedHardwareWatchpoints() API with a process.'
self.build()
exe = self.getBuildArtifact('a.out')
self.runCmd(('file ' + exe), CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation('main.cpp', self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
error = lldb.SBError()
num = process.GetNumSupportedHardwareWatchpoints(error)
if (self.TraceOn() and error.Success()):
print(('Number of supported hardware watchpoints: %d' % num))
| -8,014,264,649,563,344,000
|
Test SBProcess.GetNumSupportedHardwareWatchpoints() API with a process.
|
lldb/test/API/python_api/process/TestProcessAPI.py
|
test_get_num_supported_hardware_watchpoints
|
AaronBallman/llvm
|
python
|
def test_get_num_supported_hardware_watchpoints(self):
self.build()
exe = self.getBuildArtifact('a.out')
self.runCmd(('file ' + exe), CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
breakpoint = target.BreakpointCreateByLocation('main.cpp', self.line)
self.assertTrue(breakpoint, VALID_BREAKPOINT)
process = target.LaunchSimple(None, None, self.get_process_working_directory())
error = lldb.SBError()
num = process.GetNumSupportedHardwareWatchpoints(error)
if (self.TraceOn() and error.Success()):
print(('Number of supported hardware watchpoints: %d' % num))
|
@no_debug_info_test
def test_get_process_info(self):
'Test SBProcess::GetProcessInfo() API with a locally launched process.'
self.build()
exe = self.getBuildArtifact('a.out')
self.runCmd(('file ' + exe), CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
launch_info = target.GetLaunchInfo()
launch_info.SetWorkingDirectory(self.get_process_working_directory())
launch_flags = launch_info.GetLaunchFlags()
launch_flags |= lldb.eLaunchFlagStopAtEntry
launch_info.SetLaunchFlags(launch_flags)
error = lldb.SBError()
process = target.Launch(launch_info, error)
if (not error.Success()):
self.fail('Failed to launch process')
process_info = process.GetProcessInfo()
self.assertTrue(process_info.IsValid())
file_spec = process_info.GetExecutableFile()
self.assertTrue(file_spec.IsValid())
process_name = process_info.GetName()
self.assertIsNotNone(process_name, 'Process has a name')
self.assertGreater(len(process_name), 0, "Process name isn't blank")
self.assertEqual(file_spec.GetFilename(), 'a.out')
self.assertNotEqual(process_info.GetProcessID(), lldb.LLDB_INVALID_PROCESS_ID, 'Process ID is valid')
triple = process_info.GetTriple()
self.assertIsNotNone(triple, 'Process has a triple')
if process_info.UserIDIsValid():
self.assertNotEqual(process_info.GetUserID(), lldb.UINT32_MAX, 'Process user ID is valid')
else:
self.assertEqual(process_info.GetUserID(), lldb.UINT32_MAX, 'Process user ID is invalid')
if process_info.GroupIDIsValid():
self.assertNotEqual(process_info.GetGroupID(), lldb.UINT32_MAX, 'Process group ID is valid')
else:
self.assertEqual(process_info.GetGroupID(), lldb.UINT32_MAX, 'Process group ID is invalid')
if process_info.EffectiveUserIDIsValid():
self.assertNotEqual(process_info.GetEffectiveUserID(), lldb.UINT32_MAX, 'Process effective user ID is valid')
else:
self.assertEqual(process_info.GetEffectiveUserID(), lldb.UINT32_MAX, 'Process effective user ID is invalid')
if process_info.EffectiveGroupIDIsValid():
self.assertNotEqual(process_info.GetEffectiveGroupID(), lldb.UINT32_MAX, 'Process effective group ID is valid')
else:
self.assertEqual(process_info.GetEffectiveGroupID(), lldb.UINT32_MAX, 'Process effective group ID is invalid')
process_info.GetParentProcessID()
| 7,278,161,170,177,846,000
|
Test SBProcess::GetProcessInfo() API with a locally launched process.
|
lldb/test/API/python_api/process/TestProcessAPI.py
|
test_get_process_info
|
AaronBallman/llvm
|
python
|
@no_debug_info_test
def test_get_process_info(self):
self.build()
exe = self.getBuildArtifact('a.out')
self.runCmd(('file ' + exe), CURRENT_EXECUTABLE_SET)
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
launch_info = target.GetLaunchInfo()
launch_info.SetWorkingDirectory(self.get_process_working_directory())
launch_flags = launch_info.GetLaunchFlags()
launch_flags |= lldb.eLaunchFlagStopAtEntry
launch_info.SetLaunchFlags(launch_flags)
error = lldb.SBError()
process = target.Launch(launch_info, error)
if (not error.Success()):
self.fail('Failed to launch process')
process_info = process.GetProcessInfo()
self.assertTrue(process_info.IsValid())
file_spec = process_info.GetExecutableFile()
self.assertTrue(file_spec.IsValid())
process_name = process_info.GetName()
self.assertIsNotNone(process_name, 'Process has a name')
self.assertGreater(len(process_name), 0, "Process name isn't blank")
self.assertEqual(file_spec.GetFilename(), 'a.out')
self.assertNotEqual(process_info.GetProcessID(), lldb.LLDB_INVALID_PROCESS_ID, 'Process ID is valid')
triple = process_info.GetTriple()
self.assertIsNotNone(triple, 'Process has a triple')
if process_info.UserIDIsValid():
self.assertNotEqual(process_info.GetUserID(), lldb.UINT32_MAX, 'Process user ID is valid')
else:
self.assertEqual(process_info.GetUserID(), lldb.UINT32_MAX, 'Process user ID is invalid')
if process_info.GroupIDIsValid():
self.assertNotEqual(process_info.GetGroupID(), lldb.UINT32_MAX, 'Process group ID is valid')
else:
self.assertEqual(process_info.GetGroupID(), lldb.UINT32_MAX, 'Process group ID is invalid')
if process_info.EffectiveUserIDIsValid():
self.assertNotEqual(process_info.GetEffectiveUserID(), lldb.UINT32_MAX, 'Process effective user ID is valid')
else:
self.assertEqual(process_info.GetEffectiveUserID(), lldb.UINT32_MAX, 'Process effective user ID is invalid')
if process_info.EffectiveGroupIDIsValid():
self.assertNotEqual(process_info.GetEffectiveGroupID(), lldb.UINT32_MAX, 'Process effective group ID is valid')
else:
self.assertEqual(process_info.GetEffectiveGroupID(), lldb.UINT32_MAX, 'Process effective group ID is invalid')
process_info.GetParentProcessID()
|
def test_allocate_deallocate_memory(self):
'Test Python SBProcess.AllocateMemory() and SBProcess.DeallocateMemory() APIs.'
self.build()
(target, process, main_thread, main_breakpoint) = lldbutil.run_to_source_breakpoint(self, '// Set break point at this line', lldb.SBFileSpec('main.cpp'))
error = lldb.SBError()
addr = process.AllocateMemory(16384, lldb.ePermissionsReadable, error)
if ((not error.Success()) or (addr == lldb.LLDB_INVALID_ADDRESS)):
self.fail('SBProcess.AllocateMemory() failed')
result = process.WriteMemory(addr, 'a', error)
if ((not error.Success()) or (result != 1)):
self.fail('SBProcess.WriteMemory() failed')
content = process.ReadMemory(addr, 1, error)
if (not error.Success()):
self.fail('SBProcess.ReadMemory() failed')
if self.TraceOn():
print('memory content:', content)
self.expect(content, "Result from SBProcess.ReadMemory() matches our expected output: 'a'", exe=False, startstr=b'a')
frame = main_thread.GetFrameAtIndex(0)
val = frame.EvaluateExpression('test_read(reinterpret_cast<char *>({:#x}))'.format(addr))
self.expect(val.GetValue(), "Result of test_read() matches expected output 'a'", exe=False, startstr="'a'")
val = frame.EvaluateExpression("test_write(reinterpret_cast<char *>({:#x}), 'b')".format(addr))
if val.GetError().Success():
self.fail('test_write() to allocated memory without write permission unexpectedly succeeded')
error = process.DeallocateMemory(addr)
if (not error.Success()):
self.fail('SBProcess.DeallocateMemory() failed')
| -1,831,053,923,018,552,000
|
Test Python SBProcess.AllocateMemory() and SBProcess.DeallocateMemory() APIs.
|
lldb/test/API/python_api/process/TestProcessAPI.py
|
test_allocate_deallocate_memory
|
AaronBallman/llvm
|
python
|
def test_allocate_deallocate_memory(self):
self.build()
(target, process, main_thread, main_breakpoint) = lldbutil.run_to_source_breakpoint(self, '// Set break point at this line', lldb.SBFileSpec('main.cpp'))
error = lldb.SBError()
addr = process.AllocateMemory(16384, lldb.ePermissionsReadable, error)
if ((not error.Success()) or (addr == lldb.LLDB_INVALID_ADDRESS)):
self.fail('SBProcess.AllocateMemory() failed')
result = process.WriteMemory(addr, 'a', error)
if ((not error.Success()) or (result != 1)):
self.fail('SBProcess.WriteMemory() failed')
content = process.ReadMemory(addr, 1, error)
if (not error.Success()):
self.fail('SBProcess.ReadMemory() failed')
if self.TraceOn():
print('memory content:', content)
self.expect(content, "Result from SBProcess.ReadMemory() matches our expected output: 'a'", exe=False, startstr=b'a')
frame = main_thread.GetFrameAtIndex(0)
val = frame.EvaluateExpression('test_read(reinterpret_cast<char *>({:#x}))'.format(addr))
self.expect(val.GetValue(), "Result of test_read() matches expected output 'a'", exe=False, startstr="'a'")
val = frame.EvaluateExpression("test_write(reinterpret_cast<char *>({:#x}), 'b')".format(addr))
if val.GetError().Success():
self.fail('test_write() to allocated memory without write permission unexpectedly succeeded')
error = process.DeallocateMemory(addr)
if (not error.Success()):
self.fail('SBProcess.DeallocateMemory() failed')
|
def plot_gll(x, y, z):
' Plots values on 2D unstructured GLL mesh\n '
r = ((max(x) - min(x)) / (max(y) - min(y)))
rx = (r / np.sqrt((1 + (r ** 2))))
ry = (1 / np.sqrt((1 + (r ** 2))))
f = plt.figure(figsize=((10 * rx), (10 * ry)))
p = plt.tricontourf(x, y, z, 125)
plt.axis('image')
return (f, p)
| -6,270,445,130,626,942,000
|
Plots values on 2D unstructured GLL mesh
|
seisflows/tools/graphics.py
|
plot_gll
|
fanwu8/SeisFlowsQ
|
python
|
def plot_gll(x, y, z):
' \n '
r = ((max(x) - min(x)) / (max(y) - min(y)))
rx = (r / np.sqrt((1 + (r ** 2))))
ry = (1 / np.sqrt((1 + (r ** 2))))
f = plt.figure(figsize=((10 * rx), (10 * ry)))
p = plt.tricontourf(x, y, z, 125)
plt.axis('image')
return (f, p)
|
def plot_vector(t, v, xlabel='', ylabel='', title=''):
' Plots a vector or time series.\n\n Parameters\n ----------\n v: ndarray, ndims = 1/2\n Vector or time series to plot\n xlabel: str\n x axis label\n ylabel: str\n y axis label\n title: str\n plot title\n\n Raises\n ------\n ValueError\n If dimensions of v are greater than 2\n '
if (v.ndim > 2):
raise ValueError('v must be a vector or a time series')
if (v.ndim == 1):
x = list(range(len(v)))
y = v
else:
x = v[:, 0]
y = v[:, 1]
plt.plot(t, v)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
| 8,343,828,763,393,565,000
|
Plots a vector or time series.
Parameters
----------
v: ndarray, ndims = 1/2
Vector or time series to plot
xlabel: str
x axis label
ylabel: str
y axis label
title: str
plot title
Raises
------
ValueError
If dimensions of v are greater than 2
|
seisflows/tools/graphics.py
|
plot_vector
|
fanwu8/SeisFlowsQ
|
python
|
def plot_vector(t, v, xlabel=, ylabel=, title=):
' Plots a vector or time series.\n\n Parameters\n ----------\n v: ndarray, ndims = 1/2\n Vector or time series to plot\n xlabel: str\n x axis label\n ylabel: str\n y axis label\n title: str\n plot title\n\n Raises\n ------\n ValueError\n If dimensions of v are greater than 2\n '
if (v.ndim > 2):
raise ValueError('v must be a vector or a time series')
if (v.ndim == 1):
x = list(range(len(v)))
y = v
else:
x = v[:, 0]
y = v[:, 1]
plt.plot(t, v)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
|
def plot_section(stream, ax=None, cmap='seismic', clip=100, title='', x_interval=1.0, y_interval=1.0):
' Plots a seismic section from an obspy stream.\n\n Parameters\n ----------\n stream: Obspy stream object\n Obspy stream object created from a SU data file\n ax: Matplotlib Axes object\n Optional axis object\n cmap: str\n Matplotlib colormap option.\n clip: float\n Percentage value (0-100) for amplitude clipping\n title: str\n plot title\n x_interval: float\n Offset axis tick interval in km\n y_interval: float\n Time axis tick interval in km\n\n Raises\n ------\n NotImplementedError\n If stream object does not have SU format\n '
if (stream[0].stats._format != 'SU'):
raise NotImplemented('plot_section currently only supports streams for SU data files.')
nr = len(stream)
nt = len(stream[0].data)
dt = stream[0].stats.delta
d_aspect = (nr / float(nt))
data = _convert_to_array(stream)
fsize = 6
scale_factor = 1.5
if (ax is None):
(fig, ax) = plt.subplots(figsize=(fsize, (scale_factor * fsize)))
im = ax.imshow(data, aspect=(scale_factor * d_aspect), clim=_cscale(data, clip=clip))
im.set_cmap(cmap)
ax.set_title(title)
ax.set_xlabel('Offset [km]')
ax.set_ylabel('Time [s]')
t = _get_time(stream)
(yticks, ytick_labels) = get_regular_ticks(t, y_interval)
ax.set_yticks(yticks)
ax.set_yticklabels(ytick_labels)
offsets = _get_offsets(stream)
(xticks, xtick_labels) = get_regular_ticks(offsets, x_interval)
ax.set_xticks(xticks)
ax.set_xticklabels(xtick_labels)
return ax
| -203,811,772,725,341,600
|
Plots a seismic section from an obspy stream.
Parameters
----------
stream: Obspy stream object
Obspy stream object created from a SU data file
ax: Matplotlib Axes object
Optional axis object
cmap: str
Matplotlib colormap option.
clip: float
Percentage value (0-100) for amplitude clipping
title: str
plot title
x_interval: float
Offset axis tick interval in km
y_interval: float
Time axis tick interval in km
Raises
------
NotImplementedError
If stream object does not have SU format
|
seisflows/tools/graphics.py
|
plot_section
|
fanwu8/SeisFlowsQ
|
python
|
def plot_section(stream, ax=None, cmap='seismic', clip=100, title=, x_interval=1.0, y_interval=1.0):
' Plots a seismic section from an obspy stream.\n\n Parameters\n ----------\n stream: Obspy stream object\n Obspy stream object created from a SU data file\n ax: Matplotlib Axes object\n Optional axis object\n cmap: str\n Matplotlib colormap option.\n clip: float\n Percentage value (0-100) for amplitude clipping\n title: str\n plot title\n x_interval: float\n Offset axis tick interval in km\n y_interval: float\n Time axis tick interval in km\n\n Raises\n ------\n NotImplementedError\n If stream object does not have SU format\n '
if (stream[0].stats._format != 'SU'):
raise NotImplemented('plot_section currently only supports streams for SU data files.')
nr = len(stream)
nt = len(stream[0].data)
dt = stream[0].stats.delta
d_aspect = (nr / float(nt))
data = _convert_to_array(stream)
fsize = 6
scale_factor = 1.5
if (ax is None):
(fig, ax) = plt.subplots(figsize=(fsize, (scale_factor * fsize)))
im = ax.imshow(data, aspect=(scale_factor * d_aspect), clim=_cscale(data, clip=clip))
im.set_cmap(cmap)
ax.set_title(title)
ax.set_xlabel('Offset [km]')
ax.set_ylabel('Time [s]')
t = _get_time(stream)
(yticks, ytick_labels) = get_regular_ticks(t, y_interval)
ax.set_yticks(yticks)
ax.set_yticklabels(ytick_labels)
offsets = _get_offsets(stream)
(xticks, xtick_labels) = get_regular_ticks(offsets, x_interval)
ax.set_xticks(xticks)
ax.set_xticklabels(xtick_labels)
return ax
|
def _convert_to_array(stream):
' Extracts trace data from an obspy stream and returns a 2D array.\n\n Parameters\n ----------\n stream: Obspy stream object\n Stream storing trace data\n\n Returns\n -------\n output: ndarray, ndim=2\n Returns an (nt*nr) array. nt and nr are the number of sample points\n and number of traces respectively. Assumes trace lengths are equal\n for all traces.\n\n Raises\n ------\n TypeError\n If stream is not an obspy stream\n '
if (not isinstance(stream, Stream)):
raise TypeError('Input object should be an obspy stream.')
nt = len(stream.traces[0].data)
nr = len(stream)
output = np.zeros((nt, nr))
for (i, trace) in enumerate(stream):
output[:, i] = trace.data[:]
return output
| 5,514,239,679,669,107,000
|
Extracts trace data from an obspy stream and returns a 2D array.
Parameters
----------
stream: Obspy stream object
Stream storing trace data
Returns
-------
output: ndarray, ndim=2
Returns an (nt*nr) array. nt and nr are the number of sample points
and number of traces respectively. Assumes trace lengths are equal
for all traces.
Raises
------
TypeError
If stream is not an obspy stream
|
seisflows/tools/graphics.py
|
_convert_to_array
|
fanwu8/SeisFlowsQ
|
python
|
def _convert_to_array(stream):
' Extracts trace data from an obspy stream and returns a 2D array.\n\n Parameters\n ----------\n stream: Obspy stream object\n Stream storing trace data\n\n Returns\n -------\n output: ndarray, ndim=2\n Returns an (nt*nr) array. nt and nr are the number of sample points\n and number of traces respectively. Assumes trace lengths are equal\n for all traces.\n\n Raises\n ------\n TypeError\n If stream is not an obspy stream\n '
if (not isinstance(stream, Stream)):
raise TypeError('Input object should be an obspy stream.')
nt = len(stream.traces[0].data)
nr = len(stream)
output = np.zeros((nt, nr))
for (i, trace) in enumerate(stream):
output[:, i] = trace.data[:]
return output
|
def _cscale(v, clip=100):
' Return limits for colormap.\n '
perc = (clip / 100.0)
return (((- perc) * abs(v).max()), (perc * abs(v).max()))
| 1,486,020,659,555,331,300
|
Return limits for colormap.
|
seisflows/tools/graphics.py
|
_cscale
|
fanwu8/SeisFlowsQ
|
python
|
def _cscale(v, clip=100):
' \n '
perc = (clip / 100.0)
return (((- perc) * abs(v).max()), (perc * abs(v).max()))
|
def _get_time(stream):
' Get fixed time vector for stream object.\n '
dt = stream[0].stats.delta
nt = len(stream[0].data)
return np.arange(0, (nt * dt), dt)
| 3,518,869,647,590,698,500
|
Get fixed time vector for stream object.
|
seisflows/tools/graphics.py
|
_get_time
|
fanwu8/SeisFlowsQ
|
python
|
def _get_time(stream):
' \n '
dt = stream[0].stats.delta
nt = len(stream[0].data)
return np.arange(0, (nt * dt), dt)
|
def _get_offsets(stream):
' Return offsets.\n '
nr = len(stream)
offsets = np.zeros(nr)
scalco = stream[0].stats.su.trace_header.scalar_to_be_applied_to_all_coordinates
if (scalco == 0):
scalco = 0.001
else:
scalco = (0.001 / scalco)
for (i, tr) in enumerate(stream):
offsets[i] = ((tr.stats.su.trace_header.group_coordinate_x - tr.stats.su.trace_header.source_coordinate_x) * scalco)
return offsets
| 8,875,202,697,701,741,000
|
Return offsets.
|
seisflows/tools/graphics.py
|
_get_offsets
|
fanwu8/SeisFlowsQ
|
python
|
def _get_offsets(stream):
' \n '
nr = len(stream)
offsets = np.zeros(nr)
scalco = stream[0].stats.su.trace_header.scalar_to_be_applied_to_all_coordinates
if (scalco == 0):
scalco = 0.001
else:
scalco = (0.001 / scalco)
for (i, tr) in enumerate(stream):
offsets[i] = ((tr.stats.su.trace_header.group_coordinate_x - tr.stats.su.trace_header.source_coordinate_x) * scalco)
return offsets
|
def get_regular_ticks(v, interval):
' Returns regular tick intervals.\n '
f = interp1d(v, list(range(len(v))))
begin = (int((v[0] / interval)) * interval)
end = v[(- 1)]
tick_labels = np.arange(begin, end, interval)
ticks = f(tick_labels)
return (ticks, tick_labels)
| -4,995,927,191,784,999,000
|
Returns regular tick intervals.
|
seisflows/tools/graphics.py
|
get_regular_ticks
|
fanwu8/SeisFlowsQ
|
python
|
def get_regular_ticks(v, interval):
' \n '
f = interp1d(v, list(range(len(v))))
begin = (int((v[0] / interval)) * interval)
end = v[(- 1)]
tick_labels = np.arange(begin, end, interval)
ticks = f(tick_labels)
return (ticks, tick_labels)
|
def generate_kernel_pod_yaml(keywords):
'Return the kubernetes pod spec as a yaml string.\n\n - load jinja2 template from this file directory.\n - substitute template variables with keywords items.\n '
j_env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True, lstrip_blocks=True)
k8s_yaml = j_env.get_template(KERNEL_POD_TEMPLATE_PATH).render(**keywords)
return k8s_yaml
| 5,852,460,280,651,974,000
|
Return the kubernetes pod spec as a yaml string.
- load jinja2 template from this file directory.
- substitute template variables with keywords items.
|
tools/kernelspecs/kernels/R_kubernetes/scripts/launch_kubernetes.py
|
generate_kernel_pod_yaml
|
spotinst/wave-operator
|
python
|
def generate_kernel_pod_yaml(keywords):
'Return the kubernetes pod spec as a yaml string.\n\n - load jinja2 template from this file directory.\n - substitute template variables with keywords items.\n '
j_env = Environment(loader=FileSystemLoader(os.path.dirname(__file__)), trim_blocks=True, lstrip_blocks=True)
k8s_yaml = j_env.get_template(KERNEL_POD_TEMPLATE_PATH).render(**keywords)
return k8s_yaml
|
def error_run(arguments: list[str], message: bytes) -> None:
'Run command that should fail and check error message.'
with Popen((['map-machine'] + arguments), stderr=PIPE) as pipe:
(_, error) = pipe.communicate()
assert (pipe.returncode != 0)
assert (error == message)
| 8,372,083,281,896,209,000
|
Run command that should fail and check error message.
|
tests/test_command_line.py
|
error_run
|
LaoshuBaby/map-machine
|
python
|
def error_run(arguments: list[str], message: bytes) -> None:
with Popen((['map-machine'] + arguments), stderr=PIPE) as pipe:
(_, error) = pipe.communicate()
assert (pipe.returncode != 0)
assert (error == message)
|
def run(arguments: list[str], message: bytes) -> None:
'Run command that should fail and check error message.'
with Popen((['map-machine'] + arguments), stderr=PIPE) as pipe:
(_, error) = pipe.communicate()
assert (pipe.returncode == 0)
assert (error == message)
| 1,844,572,730,295,534,300
|
Run command that should fail and check error message.
|
tests/test_command_line.py
|
run
|
LaoshuBaby/map-machine
|
python
|
def run(arguments: list[str], message: bytes) -> None:
with Popen((['map-machine'] + arguments), stderr=PIPE) as pipe:
(_, error) = pipe.communicate()
assert (pipe.returncode == 0)
assert (error == message)
|
def test_wrong_render_arguments() -> None:
'Test `render` command with wrong arguments.'
error_run(['render', '-z', '17'], b'CRITICAL Specify either --input, or --boundary-box, or --coordinates and --size.\n')
| 5,530,455,509,936,444,000
|
Test `render` command with wrong arguments.
|
tests/test_command_line.py
|
test_wrong_render_arguments
|
LaoshuBaby/map-machine
|
python
|
def test_wrong_render_arguments() -> None:
error_run(['render', '-z', '17'], b'CRITICAL Specify either --input, or --boundary-box, or --coordinates and --size.\n')
|
def test_render() -> None:
'Test `render` command.'
run((COMMAND_LINES['render'] + ['--cache', 'tests/data']), (LOG + b'INFO Writing output SVG to out/map.svg...\n'))
with Path('out/map.svg').open(encoding='utf-8') as output_file:
root: Element = ElementTree.parse(output_file).getroot()
assert (len(root) == 8)
assert (len(root[3][0]) == 0)
assert (root.get('width') == '186.0')
assert (root.get('height') == '198.0')
| -8,102,067,962,734,609,000
|
Test `render` command.
|
tests/test_command_line.py
|
test_render
|
LaoshuBaby/map-machine
|
python
|
def test_render() -> None:
run((COMMAND_LINES['render'] + ['--cache', 'tests/data']), (LOG + b'INFO Writing output SVG to out/map.svg...\n'))
with Path('out/map.svg').open(encoding='utf-8') as output_file:
root: Element = ElementTree.parse(output_file).getroot()
assert (len(root) == 8)
assert (len(root[3][0]) == 0)
assert (root.get('width') == '186.0')
assert (root.get('height') == '198.0')
|
def test_render_with_tooltips() -> None:
'Test `render` command.'
run((COMMAND_LINES['render_with_tooltips'] + ['--cache', 'tests/data']), (LOG + b'INFO Writing output SVG to out/map.svg...\n'))
with Path('out/map.svg').open(encoding='utf-8') as output_file:
root: Element = ElementTree.parse(output_file).getroot()
assert (len(root) == 8)
assert (len(root[3][0]) == 1)
assert (root[3][0][0].text == 'natural: tree')
assert (root.get('width') == '186.0')
assert (root.get('height') == '198.0')
| -3,097,467,188,967,107,000
|
Test `render` command.
|
tests/test_command_line.py
|
test_render_with_tooltips
|
LaoshuBaby/map-machine
|
python
|
def test_render_with_tooltips() -> None:
run((COMMAND_LINES['render_with_tooltips'] + ['--cache', 'tests/data']), (LOG + b'INFO Writing output SVG to out/map.svg...\n'))
with Path('out/map.svg').open(encoding='utf-8') as output_file:
root: Element = ElementTree.parse(output_file).getroot()
assert (len(root) == 8)
assert (len(root[3][0]) == 1)
assert (root[3][0][0].text == 'natural: tree')
assert (root.get('width') == '186.0')
assert (root.get('height') == '198.0')
|
def test_icons() -> None:
'Test `icons` command.'
run(COMMAND_LINES['icons'], b'INFO Icons are written to out/icons_by_name and out/icons_by_id.\nINFO Icon grid is written to out/icon_grid.svg.\nINFO Icon grid is written to doc/grid.svg.\n')
assert (Path('out') / 'icon_grid.svg').is_file()
assert (Path('out') / 'icons_by_name').is_dir()
assert (Path('out') / 'icons_by_id').is_dir()
assert ((Path('out') / 'icons_by_name') / 'Röntgen apple.svg').is_file()
assert ((Path('out') / 'icons_by_id') / 'apple.svg').is_file()
| -1,171,357,103,594,110,700
|
Test `icons` command.
|
tests/test_command_line.py
|
test_icons
|
LaoshuBaby/map-machine
|
python
|
def test_icons() -> None:
run(COMMAND_LINES['icons'], b'INFO Icons are written to out/icons_by_name and out/icons_by_id.\nINFO Icon grid is written to out/icon_grid.svg.\nINFO Icon grid is written to doc/grid.svg.\n')
assert (Path('out') / 'icon_grid.svg').is_file()
assert (Path('out') / 'icons_by_name').is_dir()
assert (Path('out') / 'icons_by_id').is_dir()
assert ((Path('out') / 'icons_by_name') / 'Röntgen apple.svg').is_file()
assert ((Path('out') / 'icons_by_id') / 'apple.svg').is_file()
|
def test_mapcss() -> None:
'Test `mapcss` command.'
run(COMMAND_LINES['mapcss'], b'INFO MapCSS 0.2 scheme is written to out/map_machine_mapcss.\n')
assert (Path('out') / 'map_machine_mapcss').is_dir()
assert ((Path('out') / 'map_machine_mapcss') / 'icons').is_dir()
assert (((Path('out') / 'map_machine_mapcss') / 'icons') / 'apple.svg').is_file()
assert ((Path('out') / 'map_machine_mapcss') / 'map_machine.mapcss').is_file()
| -34,753,347,926,657,268
|
Test `mapcss` command.
|
tests/test_command_line.py
|
test_mapcss
|
LaoshuBaby/map-machine
|
python
|
def test_mapcss() -> None:
run(COMMAND_LINES['mapcss'], b'INFO MapCSS 0.2 scheme is written to out/map_machine_mapcss.\n')
assert (Path('out') / 'map_machine_mapcss').is_dir()
assert ((Path('out') / 'map_machine_mapcss') / 'icons').is_dir()
assert (((Path('out') / 'map_machine_mapcss') / 'icons') / 'apple.svg').is_file()
assert ((Path('out') / 'map_machine_mapcss') / 'map_machine.mapcss').is_file()
|
def test_element() -> None:
'Test `element` command.'
run(COMMAND_LINES['element'], b'INFO Element is written to out/element.svg.\n')
assert (Path('out') / 'element.svg').is_file()
| 3,954,329,336,101,989,000
|
Test `element` command.
|
tests/test_command_line.py
|
test_element
|
LaoshuBaby/map-machine
|
python
|
def test_element() -> None:
run(COMMAND_LINES['element'], b'INFO Element is written to out/element.svg.\n')
assert (Path('out') / 'element.svg').is_file()
|
def test_tile() -> None:
'Test `tile` command.'
run((COMMAND_LINES['tile'] + ['--cache', 'tests/data']), (LOG + b'INFO Tile is drawn to out/tiles/tile_18_160199_88904.svg.\nINFO SVG file is rasterized to out/tiles/tile_18_160199_88904.png.\n'))
assert ((Path('out') / 'tiles') / 'tile_18_160199_88904.svg').is_file()
assert ((Path('out') / 'tiles') / 'tile_18_160199_88904.png').is_file()
| -5,938,328,673,918,745,000
|
Test `tile` command.
|
tests/test_command_line.py
|
test_tile
|
LaoshuBaby/map-machine
|
python
|
def test_tile() -> None:
run((COMMAND_LINES['tile'] + ['--cache', 'tests/data']), (LOG + b'INFO Tile is drawn to out/tiles/tile_18_160199_88904.svg.\nINFO SVG file is rasterized to out/tiles/tile_18_160199_88904.png.\n'))
assert ((Path('out') / 'tiles') / 'tile_18_160199_88904.svg').is_file()
assert ((Path('out') / 'tiles') / 'tile_18_160199_88904.png').is_file()
|
def run(path: str, output_file: str='', mongo=False) -> Union[(None, List[dict])]:
'Invoca o utilitário `isis2json` com os parâmetros adaptados para a\n leitura de arquivos MST de acordo com as definições padrões utilizadas\n pelo __main__ da ferramenta `isis2json`.\n\n O resultado de saída pode ser escrito diretamente para um arquivo em disco\n ou retornará uma lista contento as linhas passíveis de conversão para\n JSON.\n\n Exemplo:\n >>> run("file.mst")\n >>> [{"mfn": 1}, {"mfn": 2}]\n\n >>> run("file.mst", output_file="/tmp/output.json")\n >>> None\n '
if (not os.path.exists(path)):
raise FileNotFoundError("File '%s' does not exist.")
if (len(output_file) > 0):
output_file = open(output_file, 'wb')
else:
output_file = OutputContainer()
isis2json.writeJsonArray(iterRecords=isis2json.iterMstRecords, file_name=path, output=output_file, qty=isis2json.DEFAULT_QTY, skip=0, id_tag=0, gen_uuid=False, mongo=mongo, mfn=True, isis_json_type=3, prefix='v', constant='')
output_file.close()
if isinstance(output_file, OutputContainer):
return output_file.lines
| 5,417,742,210,112,840,000
|
Invoca o utilitário `isis2json` com os parâmetros adaptados para a
leitura de arquivos MST de acordo com as definições padrões utilizadas
pelo __main__ da ferramenta `isis2json`.
O resultado de saída pode ser escrito diretamente para um arquivo em disco
ou retornará uma lista contento as linhas passíveis de conversão para
JSON.
Exemplo:
>>> run("file.mst")
>>> [{"mfn": 1}, {"mfn": 2}]
>>> run("file.mst", output_file="/tmp/output.json")
>>> None
|
documentstore_migracao/utils/extract_isis.py
|
run
|
patymori/document-store-migracao
|
python
|
def run(path: str, output_file: str=, mongo=False) -> Union[(None, List[dict])]:
'Invoca o utilitário `isis2json` com os parâmetros adaptados para a\n leitura de arquivos MST de acordo com as definições padrões utilizadas\n pelo __main__ da ferramenta `isis2json`.\n\n O resultado de saída pode ser escrito diretamente para um arquivo em disco\n ou retornará uma lista contento as linhas passíveis de conversão para\n JSON.\n\n Exemplo:\n >>> run("file.mst")\n >>> [{"mfn": 1}, {"mfn": 2}]\n\n >>> run("file.mst", output_file="/tmp/output.json")\n >>> None\n '
if (not os.path.exists(path)):
raise FileNotFoundError("File '%s' does not exist.")
if (len(output_file) > 0):
output_file = open(output_file, 'wb')
else:
output_file = OutputContainer()
isis2json.writeJsonArray(iterRecords=isis2json.iterMstRecords, file_name=path, output=output_file, qty=isis2json.DEFAULT_QTY, skip=0, id_tag=0, gen_uuid=False, mongo=mongo, mfn=True, isis_json_type=3, prefix='v', constant=)
output_file.close()
if isinstance(output_file, OutputContainer):
return output_file.lines
|
def __init__(__self__, *, host_account_names: pulumi.Input[Sequence[pulumi.Input[str]]], host_group_id: pulumi.Input[str], instance_id: pulumi.Input[str], user_group_id: pulumi.Input[str]):
'\n The set of arguments for constructing a HostGroupAccountUserGroupAttachment resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.\n :param pulumi.Input[str] host_group_id: The ID of the host group.\n :param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.\n :param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.\n '
pulumi.set(__self__, 'host_account_names', host_account_names)
pulumi.set(__self__, 'host_group_id', host_group_id)
pulumi.set(__self__, 'instance_id', instance_id)
pulumi.set(__self__, 'user_group_id', user_group_id)
| 8,620,752,537,416,545,000
|
The set of arguments for constructing a HostGroupAccountUserGroupAttachment resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
__init__
|
pulumi/pulumi-alicloud
|
python
|
def __init__(__self__, *, host_account_names: pulumi.Input[Sequence[pulumi.Input[str]]], host_group_id: pulumi.Input[str], instance_id: pulumi.Input[str], user_group_id: pulumi.Input[str]):
'\n The set of arguments for constructing a HostGroupAccountUserGroupAttachment resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.\n :param pulumi.Input[str] host_group_id: The ID of the host group.\n :param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.\n :param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.\n '
pulumi.set(__self__, 'host_account_names', host_account_names)
pulumi.set(__self__, 'host_group_id', host_group_id)
pulumi.set(__self__, 'instance_id', instance_id)
pulumi.set(__self__, 'user_group_id', user_group_id)
|
@property
@pulumi.getter(name='hostAccountNames')
def host_account_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
'\n A list names of the host account.\n '
return pulumi.get(self, 'host_account_names')
| 8,201,247,303,138,980,000
|
A list names of the host account.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
host_account_names
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='hostAccountNames')
def host_account_names(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
'\n \n '
return pulumi.get(self, 'host_account_names')
|
@property
@pulumi.getter(name='hostGroupId')
def host_group_id(self) -> pulumi.Input[str]:
'\n The ID of the host group.\n '
return pulumi.get(self, 'host_group_id')
| 3,970,179,039,349,197,000
|
The ID of the host group.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
host_group_id
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='hostGroupId')
def host_group_id(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'host_group_id')
|
@property
@pulumi.getter(name='instanceId')
def instance_id(self) -> pulumi.Input[str]:
'\n The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.\n '
return pulumi.get(self, 'instance_id')
| -3,452,605,385,748,844,000
|
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
instance_id
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='instanceId')
def instance_id(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'instance_id')
|
@property
@pulumi.getter(name='userGroupId')
def user_group_id(self) -> pulumi.Input[str]:
'\n The ID of the user group that you want to authorize to manage the specified hosts and host accounts.\n '
return pulumi.get(self, 'user_group_id')
| -8,345,498,789,096,243,000
|
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
user_group_id
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='userGroupId')
def user_group_id(self) -> pulumi.Input[str]:
'\n \n '
return pulumi.get(self, 'user_group_id')
|
def __init__(__self__, *, host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, host_group_id: Optional[pulumi.Input[str]]=None, instance_id: Optional[pulumi.Input[str]]=None, user_group_id: Optional[pulumi.Input[str]]=None):
'\n Input properties used for looking up and filtering HostGroupAccountUserGroupAttachment resources.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.\n :param pulumi.Input[str] host_group_id: The ID of the host group.\n :param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.\n :param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.\n '
if (host_account_names is not None):
pulumi.set(__self__, 'host_account_names', host_account_names)
if (host_group_id is not None):
pulumi.set(__self__, 'host_group_id', host_group_id)
if (instance_id is not None):
pulumi.set(__self__, 'instance_id', instance_id)
if (user_group_id is not None):
pulumi.set(__self__, 'user_group_id', user_group_id)
| 4,413,283,507,895,842,300
|
Input properties used for looking up and filtering HostGroupAccountUserGroupAttachment resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
__init__
|
pulumi/pulumi-alicloud
|
python
|
def __init__(__self__, *, host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, host_group_id: Optional[pulumi.Input[str]]=None, instance_id: Optional[pulumi.Input[str]]=None, user_group_id: Optional[pulumi.Input[str]]=None):
'\n Input properties used for looking up and filtering HostGroupAccountUserGroupAttachment resources.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.\n :param pulumi.Input[str] host_group_id: The ID of the host group.\n :param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.\n :param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.\n '
if (host_account_names is not None):
pulumi.set(__self__, 'host_account_names', host_account_names)
if (host_group_id is not None):
pulumi.set(__self__, 'host_group_id', host_group_id)
if (instance_id is not None):
pulumi.set(__self__, 'instance_id', instance_id)
if (user_group_id is not None):
pulumi.set(__self__, 'user_group_id', user_group_id)
|
@property
@pulumi.getter(name='hostAccountNames')
def host_account_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
'\n A list names of the host account.\n '
return pulumi.get(self, 'host_account_names')
| -6,218,258,946,766,746,000
|
A list names of the host account.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
host_account_names
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='hostAccountNames')
def host_account_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
'\n \n '
return pulumi.get(self, 'host_account_names')
|
@property
@pulumi.getter(name='hostGroupId')
def host_group_id(self) -> Optional[pulumi.Input[str]]:
'\n The ID of the host group.\n '
return pulumi.get(self, 'host_group_id')
| -1,706,495,266,037,631,500
|
The ID of the host group.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
host_group_id
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='hostGroupId')
def host_group_id(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'host_group_id')
|
@property
@pulumi.getter(name='instanceId')
def instance_id(self) -> Optional[pulumi.Input[str]]:
'\n The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.\n '
return pulumi.get(self, 'instance_id')
| 1,680,588,397,272,546,600
|
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
instance_id
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='instanceId')
def instance_id(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'instance_id')
|
@property
@pulumi.getter(name='userGroupId')
def user_group_id(self) -> Optional[pulumi.Input[str]]:
'\n The ID of the user group that you want to authorize to manage the specified hosts and host accounts.\n '
return pulumi.get(self, 'user_group_id')
| -1,121,814,425,541,299,700
|
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
user_group_id
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='userGroupId')
def user_group_id(self) -> Optional[pulumi.Input[str]]:
'\n \n '
return pulumi.get(self, 'user_group_id')
|
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, host_group_id: Optional[pulumi.Input[str]]=None, instance_id: Optional[pulumi.Input[str]]=None, user_group_id: Optional[pulumi.Input[str]]=None, __props__=None):
'\n Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.\n\n > **NOTE:** Available in v1.135.0+.\n\n ## Example Usage\n\n Basic Usage\n\n ```python\n import pulumi\n import pulumi_alicloud as alicloud\n\n default_host = alicloud.bastionhost.Host("defaultHost",\n instance_id="bastionhost-cn-tl3xxxxxxx",\n host_name=var["name"],\n active_address_type="Private",\n host_private_address="172.16.0.10",\n os_type="Linux",\n source="Local")\n default_host_account = []\n for range in [{"value": i} for i in range(0, 3)]:\n default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range[\'value\']}",\n instance_id=default_host.instance_id,\n host_account_name=f"example_value-{range[\'value\']}",\n host_id=default_host.host_id,\n protocol_name="SSH",\n password="YourPassword12345"))\n default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",\n instance_id=default_host.instance_id,\n user_group_name="my-local-user")\n default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",\n host_group_name="example_value",\n instance_id="bastionhost-cn-tl3xxxxxxx")\n default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",\n instance_id=default_host.instance_id,\n user_group_id=default_user_group.user_group_id,\n host_group_id=default_host_group.host_group_id,\n host_account_names=[__item.host_account_name for __item in default_host_account])\n ```\n\n ## Import\n\n Bastion Host Host Account can be imported using the id, e.g.\n\n ```sh\n $ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.\n :param pulumi.Input[str] host_group_id: The ID of the host group.\n :param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.\n :param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.\n '
...
| 8,111,317,736,864,197,000
|
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl3xxxxxxx",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range['value']}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range['value']}",
host_id=default_host.host_id,
protocol_name="SSH",
password="YourPassword12345"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id=default_host.instance_id,
user_group_name="my-local-user")
default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",
host_group_name="example_value",
instance_id="bastionhost-cn-tl3xxxxxxx")
default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_group_id=default_host_group.host_group_id,
host_account_names=[__item.host_account_name for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
__init__
|
pulumi/pulumi-alicloud
|
python
|
@overload
def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, host_group_id: Optional[pulumi.Input[str]]=None, instance_id: Optional[pulumi.Input[str]]=None, user_group_id: Optional[pulumi.Input[str]]=None, __props__=None):
'\n Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.\n\n > **NOTE:** Available in v1.135.0+.\n\n ## Example Usage\n\n Basic Usage\n\n ```python\n import pulumi\n import pulumi_alicloud as alicloud\n\n default_host = alicloud.bastionhost.Host("defaultHost",\n instance_id="bastionhost-cn-tl3xxxxxxx",\n host_name=var["name"],\n active_address_type="Private",\n host_private_address="172.16.0.10",\n os_type="Linux",\n source="Local")\n default_host_account = []\n for range in [{"value": i} for i in range(0, 3)]:\n default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range[\'value\']}",\n instance_id=default_host.instance_id,\n host_account_name=f"example_value-{range[\'value\']}",\n host_id=default_host.host_id,\n protocol_name="SSH",\n password="YourPassword12345"))\n default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",\n instance_id=default_host.instance_id,\n user_group_name="my-local-user")\n default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",\n host_group_name="example_value",\n instance_id="bastionhost-cn-tl3xxxxxxx")\n default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",\n instance_id=default_host.instance_id,\n user_group_id=default_user_group.user_group_id,\n host_group_id=default_host_group.host_group_id,\n host_account_names=[__item.host_account_name for __item in default_host_account])\n ```\n\n ## Import\n\n Bastion Host Host Account can be imported using the id, e.g.\n\n ```sh\n $ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>\n ```\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.\n :param pulumi.Input[str] host_group_id: The ID of the host group.\n :param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.\n :param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.\n '
...
|
@overload
def __init__(__self__, resource_name: str, args: HostGroupAccountUserGroupAttachmentArgs, opts: Optional[pulumi.ResourceOptions]=None):
'\n Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.\n\n > **NOTE:** Available in v1.135.0+.\n\n ## Example Usage\n\n Basic Usage\n\n ```python\n import pulumi\n import pulumi_alicloud as alicloud\n\n default_host = alicloud.bastionhost.Host("defaultHost",\n instance_id="bastionhost-cn-tl3xxxxxxx",\n host_name=var["name"],\n active_address_type="Private",\n host_private_address="172.16.0.10",\n os_type="Linux",\n source="Local")\n default_host_account = []\n for range in [{"value": i} for i in range(0, 3)]:\n default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range[\'value\']}",\n instance_id=default_host.instance_id,\n host_account_name=f"example_value-{range[\'value\']}",\n host_id=default_host.host_id,\n protocol_name="SSH",\n password="YourPassword12345"))\n default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",\n instance_id=default_host.instance_id,\n user_group_name="my-local-user")\n default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",\n host_group_name="example_value",\n instance_id="bastionhost-cn-tl3xxxxxxx")\n default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",\n instance_id=default_host.instance_id,\n user_group_id=default_user_group.user_group_id,\n host_group_id=default_host_group.host_group_id,\n host_account_names=[__item.host_account_name for __item in default_host_account])\n ```\n\n ## Import\n\n Bastion Host Host Account can be imported using the id, e.g.\n\n ```sh\n $ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>\n ```\n\n :param str resource_name: The name of the resource.\n :param HostGroupAccountUserGroupAttachmentArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n '
...
| 8,345,629,500,227,280,000
|
Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.
> **NOTE:** Available in v1.135.0+.
## Example Usage
Basic Usage
```python
import pulumi
import pulumi_alicloud as alicloud
default_host = alicloud.bastionhost.Host("defaultHost",
instance_id="bastionhost-cn-tl3xxxxxxx",
host_name=var["name"],
active_address_type="Private",
host_private_address="172.16.0.10",
os_type="Linux",
source="Local")
default_host_account = []
for range in [{"value": i} for i in range(0, 3)]:
default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range['value']}",
instance_id=default_host.instance_id,
host_account_name=f"example_value-{range['value']}",
host_id=default_host.host_id,
protocol_name="SSH",
password="YourPassword12345"))
default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",
instance_id=default_host.instance_id,
user_group_name="my-local-user")
default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",
host_group_name="example_value",
instance_id="bastionhost-cn-tl3xxxxxxx")
default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",
instance_id=default_host.instance_id,
user_group_id=default_user_group.user_group_id,
host_group_id=default_host_group.host_group_id,
host_account_names=[__item.host_account_name for __item in default_host_account])
```
## Import
Bastion Host Host Account can be imported using the id, e.g.
```sh
$ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>
```
:param str resource_name: The name of the resource.
:param HostGroupAccountUserGroupAttachmentArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
__init__
|
pulumi/pulumi-alicloud
|
python
|
@overload
def __init__(__self__, resource_name: str, args: HostGroupAccountUserGroupAttachmentArgs, opts: Optional[pulumi.ResourceOptions]=None):
'\n Provides a Bastion Host Host Account Attachment resource to add list host accounts into one user group and one host group.\n\n > **NOTE:** Available in v1.135.0+.\n\n ## Example Usage\n\n Basic Usage\n\n ```python\n import pulumi\n import pulumi_alicloud as alicloud\n\n default_host = alicloud.bastionhost.Host("defaultHost",\n instance_id="bastionhost-cn-tl3xxxxxxx",\n host_name=var["name"],\n active_address_type="Private",\n host_private_address="172.16.0.10",\n os_type="Linux",\n source="Local")\n default_host_account = []\n for range in [{"value": i} for i in range(0, 3)]:\n default_host_account.append(alicloud.bastionhost.HostAccount(f"defaultHostAccount-{range[\'value\']}",\n instance_id=default_host.instance_id,\n host_account_name=f"example_value-{range[\'value\']}",\n host_id=default_host.host_id,\n protocol_name="SSH",\n password="YourPassword12345"))\n default_user_group = alicloud.bastionhost.UserGroup("defaultUserGroup",\n instance_id=default_host.instance_id,\n user_group_name="my-local-user")\n default_host_group = alicloud.bastionhost.HostGroup("defaultHostGroup",\n host_group_name="example_value",\n instance_id="bastionhost-cn-tl3xxxxxxx")\n default_host_group_account_user_group_attachment = alicloud.bastionhost.HostGroupAccountUserGroupAttachment("defaultHostGroupAccountUserGroupAttachment",\n instance_id=default_host.instance_id,\n user_group_id=default_user_group.user_group_id,\n host_group_id=default_host_group.host_group_id,\n host_account_names=[__item.host_account_name for __item in default_host_account])\n ```\n\n ## Import\n\n Bastion Host Host Account can be imported using the id, e.g.\n\n ```sh\n $ pulumi import alicloud:bastionhost/hostGroupAccountUserGroupAttachment:HostGroupAccountUserGroupAttachment example <instance_id>:<user_group_id>:<host_group_id>\n ```\n\n :param str resource_name: The name of the resource.\n :param HostGroupAccountUserGroupAttachmentArgs args: The arguments to use to populate this resource\'s properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n '
...
|
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, host_group_id: Optional[pulumi.Input[str]]=None, instance_id: Optional[pulumi.Input[str]]=None, user_group_id: Optional[pulumi.Input[str]]=None) -> 'HostGroupAccountUserGroupAttachment':
"\n Get an existing HostGroupAccountUserGroupAttachment resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.\n :param pulumi.Input[str] host_group_id: The ID of the host group.\n :param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.\n :param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HostGroupAccountUserGroupAttachmentState.__new__(_HostGroupAccountUserGroupAttachmentState)
__props__.__dict__['host_account_names'] = host_account_names
__props__.__dict__['host_group_id'] = host_group_id
__props__.__dict__['instance_id'] = instance_id
__props__.__dict__['user_group_id'] = user_group_id
return HostGroupAccountUserGroupAttachment(resource_name, opts=opts, __props__=__props__)
| -4,569,158,122,378,688,500
|
Get an existing HostGroupAccountUserGroupAttachment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.
:param pulumi.Input[str] host_group_id: The ID of the host group.
:param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
:param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
get
|
pulumi/pulumi-alicloud
|
python
|
@staticmethod
def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None, host_account_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]=None, host_group_id: Optional[pulumi.Input[str]]=None, instance_id: Optional[pulumi.Input[str]]=None, user_group_id: Optional[pulumi.Input[str]]=None) -> 'HostGroupAccountUserGroupAttachment':
"\n Get an existing HostGroupAccountUserGroupAttachment resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n :param pulumi.Input[Sequence[pulumi.Input[str]]] host_account_names: A list names of the host account.\n :param pulumi.Input[str] host_group_id: The ID of the host group.\n :param pulumi.Input[str] instance_id: The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.\n :param pulumi.Input[str] user_group_id: The ID of the user group that you want to authorize to manage the specified hosts and host accounts.\n "
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _HostGroupAccountUserGroupAttachmentState.__new__(_HostGroupAccountUserGroupAttachmentState)
__props__.__dict__['host_account_names'] = host_account_names
__props__.__dict__['host_group_id'] = host_group_id
__props__.__dict__['instance_id'] = instance_id
__props__.__dict__['user_group_id'] = user_group_id
return HostGroupAccountUserGroupAttachment(resource_name, opts=opts, __props__=__props__)
|
@property
@pulumi.getter(name='hostAccountNames')
def host_account_names(self) -> pulumi.Output[Sequence[str]]:
'\n A list names of the host account.\n '
return pulumi.get(self, 'host_account_names')
| 8,301,108,355,006,178,000
|
A list names of the host account.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
host_account_names
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='hostAccountNames')
def host_account_names(self) -> pulumi.Output[Sequence[str]]:
'\n \n '
return pulumi.get(self, 'host_account_names')
|
@property
@pulumi.getter(name='hostGroupId')
def host_group_id(self) -> pulumi.Output[str]:
'\n The ID of the host group.\n '
return pulumi.get(self, 'host_group_id')
| -2,142,794,302,333,448,700
|
The ID of the host group.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
host_group_id
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='hostGroupId')
def host_group_id(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'host_group_id')
|
@property
@pulumi.getter(name='instanceId')
def instance_id(self) -> pulumi.Output[str]:
'\n The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.\n '
return pulumi.get(self, 'instance_id')
| 6,237,722,831,590,990,000
|
The ID of the Bastionhost instance where you want to authorize the user to manage the specified hosts and host accounts.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
instance_id
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='instanceId')
def instance_id(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'instance_id')
|
@property
@pulumi.getter(name='userGroupId')
def user_group_id(self) -> pulumi.Output[str]:
'\n The ID of the user group that you want to authorize to manage the specified hosts and host accounts.\n '
return pulumi.get(self, 'user_group_id')
| -7,346,775,687,300,818,000
|
The ID of the user group that you want to authorize to manage the specified hosts and host accounts.
|
sdk/python/pulumi_alicloud/bastionhost/host_group_account_user_group_attachment.py
|
user_group_id
|
pulumi/pulumi-alicloud
|
python
|
@property
@pulumi.getter(name='userGroupId')
def user_group_id(self) -> pulumi.Output[str]:
'\n \n '
return pulumi.get(self, 'user_group_id')
|
def is_start_piece(piece):
'Check if the current word piece is the starting piece (BERT).'
return (not piece.startswith('##'))
| 6,410,489,472,930,046,000
|
Check if the current word piece is the starting piece (BERT).
|
libai/data/datasets/bert_dataset.py
|
is_start_piece
|
Oneflow-Inc/libai
|
python
|
def is_start_piece(piece):
return (not piece.startswith('##'))
|
def truncate_seq_pair(self, tokens_a, tokens_b, max_num_tokens, np_rng):
'Truncate sequence pair to a maximum sequence length.'
(len_a, len_b) = (len(tokens_a), len(tokens_b))
while True:
total_length = (len_a + len_b)
if (total_length <= max_num_tokens):
break
if (len_a > len_b):
trunc_tokens = tokens_a
len_a -= 1
else:
trunc_tokens = tokens_b
len_b -= 1
if (np_rng.random() < 0.5):
trunc_tokens.pop(0)
else:
trunc_tokens.pop()
return (tokens_a, tokens_b)
| -3,613,704,199,210,650,000
|
Truncate sequence pair to a maximum sequence length.
|
libai/data/datasets/bert_dataset.py
|
truncate_seq_pair
|
Oneflow-Inc/libai
|
python
|
def truncate_seq_pair(self, tokens_a, tokens_b, max_num_tokens, np_rng):
(len_a, len_b) = (len(tokens_a), len(tokens_b))
while True:
total_length = (len_a + len_b)
if (total_length <= max_num_tokens):
break
if (len_a > len_b):
trunc_tokens = tokens_a
len_a -= 1
else:
trunc_tokens = tokens_b
len_b -= 1
if (np_rng.random() < 0.5):
trunc_tokens.pop(0)
else:
trunc_tokens.pop()
return (tokens_a, tokens_b)
|
def create_tokens_and_token_types(self, tokens_a, tokens_b):
'Merge segments A and B, add [CLS] and [SEP] and build token types.'
tokens = (([self.cls_id] + tokens_a) + [self.sep_id])
token_types = ([0] * (len(tokens_a) + 2))
if (len(tokens_b) > 0):
tokens = ((tokens + tokens_b) + [self.sep_id])
token_types = (token_types + ([1] * (len(tokens_b) + 1)))
return (tokens, token_types)
| -7,094,127,822,114,704,000
|
Merge segments A and B, add [CLS] and [SEP] and build token types.
|
libai/data/datasets/bert_dataset.py
|
create_tokens_and_token_types
|
Oneflow-Inc/libai
|
python
|
def create_tokens_and_token_types(self, tokens_a, tokens_b):
tokens = (([self.cls_id] + tokens_a) + [self.sep_id])
token_types = ([0] * (len(tokens_a) + 2))
if (len(tokens_b) > 0):
tokens = ((tokens + tokens_b) + [self.sep_id])
token_types = (token_types + ([1] * (len(tokens_b) + 1)))
return (tokens, token_types)
|
def mask_token(self, idx, tokens, np_rng):
'\n Helper function to mask `idx` token from `tokens` according to\n section 3.3.1 of https://arxiv.org/pdf/1810.04805.pdf\n '
label = tokens[idx]
if (np_rng.random() < 0.8):
new_label = self.mask_id
elif (np_rng.random() < 0.5):
new_label = label
else:
new_label = np_rng.choice(self.vocab_id_list)
tokens[idx] = new_label
return label
| 5,268,266,264,196,588,000
|
Helper function to mask `idx` token from `tokens` according to
section 3.3.1 of https://arxiv.org/pdf/1810.04805.pdf
|
libai/data/datasets/bert_dataset.py
|
mask_token
|
Oneflow-Inc/libai
|
python
|
def mask_token(self, idx, tokens, np_rng):
'\n Helper function to mask `idx` token from `tokens` according to\n section 3.3.1 of https://arxiv.org/pdf/1810.04805.pdf\n '
label = tokens[idx]
if (np_rng.random() < 0.8):
new_label = self.mask_id
elif (np_rng.random() < 0.5):
new_label = label
else:
new_label = np_rng.choice(self.vocab_id_list)
tokens[idx] = new_label
return label
|
def create_masked_lm_predictions(self, tokens, np_rng, max_ngrams=3, do_whole_word_mask=True, favor_longer_ngram=False, geometric_dist=False):
'Creates the predictions for the masked LM objective.\n Note: Tokens here are vocab ids and not text tokens.'
cand_indexes = []
token_boundary = ([0] * len(tokens))
new_tokens = []
for (i, token) in enumerate(tokens):
new_tokens.append((token % len(self.tokenizer)))
if ((token == self.cls_id) or (token == self.sep_id)):
token_boundary[i] = 1
continue
if (do_whole_word_mask and (len(cand_indexes) >= 1) and (not is_start_piece(self.tokenizer._convert_id_to_token(token)))):
cand_indexes[(- 1)].append(i)
else:
cand_indexes.append([i])
if is_start_piece(self.tokenizer._convert_id_to_token(token)):
token_boundary[i] = 1
tokens = new_tokens
masked_positions = []
masked_labels = []
output_tokens = list(tokens)
if (self.mask_lm_prob == 0):
return (output_tokens, masked_positions, masked_labels)
cand_indexes = []
for (i, token) in enumerate(tokens):
if ((token == self.cls_id) or (token == self.sep_id)):
continue
if (do_whole_word_mask and (len(cand_indexes) >= 1) and (token_boundary[i] == 0)):
cand_indexes[(- 1)].append(i)
else:
cand_indexes.append([i])
num_to_predict = min(self.max_preds_per_seq, max(1, int(round((len(tokens) * self.mask_lm_prob)))))
ngrams = np.arange(1, (max_ngrams + 1), dtype=np.int64)
if (not geometric_dist):
pvals = (1.0 / np.arange(1, (max_ngrams + 1)))
pvals /= pvals.sum(keepdims=True)
if favor_longer_ngram:
pvals = pvals[::(- 1)]
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx:(idx + n)])
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
masked_lms = []
covered_indexes = set()
for cand_index_set in ngram_indexes:
if (len(masked_lms) >= num_to_predict):
break
if (not cand_index_set):
continue
for index_set in cand_index_set[0]:
for index in index_set:
if (index in covered_indexes):
continue
if (not geometric_dist):
n = np_rng.choice(ngrams[:len(cand_index_set)], p=(pvals[:len(cand_index_set)] / pvals[:len(cand_index_set)].sum(keepdims=True)))
else:
n = min(np_rng.geometric(0.2), max_ngrams)
index_set = sum(cand_index_set[(n - 1)], [])
n -= 1
while ((len(masked_lms) + len(index_set)) > num_to_predict):
if (n == 0):
break
index_set = sum(cand_index_set[(n - 1)], [])
n -= 1
if ((len(masked_lms) + len(index_set)) > num_to_predict):
continue
is_any_index_covered = False
for index in index_set:
if (index in covered_indexes):
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
label = self.mask_token(index, output_tokens, np_rng)
masked_lms.append(MaskedLmInstance(index=index, label=label))
masked_lms = sorted(masked_lms, key=(lambda x: x.index))
for p in masked_lms:
masked_positions.append(p.index)
masked_labels.append(p.label)
return (output_tokens, masked_positions, masked_labels)
| -3,165,146,543,570,706,400
|
Creates the predictions for the masked LM objective.
Note: Tokens here are vocab ids and not text tokens.
|
libai/data/datasets/bert_dataset.py
|
create_masked_lm_predictions
|
Oneflow-Inc/libai
|
python
|
def create_masked_lm_predictions(self, tokens, np_rng, max_ngrams=3, do_whole_word_mask=True, favor_longer_ngram=False, geometric_dist=False):
'Creates the predictions for the masked LM objective.\n Note: Tokens here are vocab ids and not text tokens.'
cand_indexes = []
token_boundary = ([0] * len(tokens))
new_tokens = []
for (i, token) in enumerate(tokens):
new_tokens.append((token % len(self.tokenizer)))
if ((token == self.cls_id) or (token == self.sep_id)):
token_boundary[i] = 1
continue
if (do_whole_word_mask and (len(cand_indexes) >= 1) and (not is_start_piece(self.tokenizer._convert_id_to_token(token)))):
cand_indexes[(- 1)].append(i)
else:
cand_indexes.append([i])
if is_start_piece(self.tokenizer._convert_id_to_token(token)):
token_boundary[i] = 1
tokens = new_tokens
masked_positions = []
masked_labels = []
output_tokens = list(tokens)
if (self.mask_lm_prob == 0):
return (output_tokens, masked_positions, masked_labels)
cand_indexes = []
for (i, token) in enumerate(tokens):
if ((token == self.cls_id) or (token == self.sep_id)):
continue
if (do_whole_word_mask and (len(cand_indexes) >= 1) and (token_boundary[i] == 0)):
cand_indexes[(- 1)].append(i)
else:
cand_indexes.append([i])
num_to_predict = min(self.max_preds_per_seq, max(1, int(round((len(tokens) * self.mask_lm_prob)))))
ngrams = np.arange(1, (max_ngrams + 1), dtype=np.int64)
if (not geometric_dist):
pvals = (1.0 / np.arange(1, (max_ngrams + 1)))
pvals /= pvals.sum(keepdims=True)
if favor_longer_ngram:
pvals = pvals[::(- 1)]
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = []
for n in ngrams:
ngram_index.append(cand_indexes[idx:(idx + n)])
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
masked_lms = []
covered_indexes = set()
for cand_index_set in ngram_indexes:
if (len(masked_lms) >= num_to_predict):
break
if (not cand_index_set):
continue
for index_set in cand_index_set[0]:
for index in index_set:
if (index in covered_indexes):
continue
if (not geometric_dist):
n = np_rng.choice(ngrams[:len(cand_index_set)], p=(pvals[:len(cand_index_set)] / pvals[:len(cand_index_set)].sum(keepdims=True)))
else:
n = min(np_rng.geometric(0.2), max_ngrams)
index_set = sum(cand_index_set[(n - 1)], [])
n -= 1
while ((len(masked_lms) + len(index_set)) > num_to_predict):
if (n == 0):
break
index_set = sum(cand_index_set[(n - 1)], [])
n -= 1
if ((len(masked_lms) + len(index_set)) > num_to_predict):
continue
is_any_index_covered = False
for index in index_set:
if (index in covered_indexes):
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
label = self.mask_token(index, output_tokens, np_rng)
masked_lms.append(MaskedLmInstance(index=index, label=label))
masked_lms = sorted(masked_lms, key=(lambda x: x.index))
for p in masked_lms:
masked_positions.append(p.index)
masked_labels.append(p.label)
return (output_tokens, masked_positions, masked_labels)
|
def pad_and_convert_to_tensor(self, tokens, token_types, masked_positions, masked_labels):
'Pad sequences and convert them to tensor.'
num_tokens = len(tokens)
num_pad = (self.max_seq_length - num_tokens)
assert (num_pad >= 0)
assert (len(token_types) == num_tokens)
assert (len(masked_positions) == len(masked_labels))
filler = ([self.pad_id] * num_pad)
tokens = flow.tensor((tokens + filler), dtype=flow.long)
token_types = flow.tensor((token_types + filler), dtype=flow.long)
padding_mask = flow.tensor((([1] * num_tokens) + ([0] * num_pad)), dtype=flow.long)
labels = ([(- 1)] * self.max_seq_length)
loss_mask = ([0] * self.max_seq_length)
for (idx, label) in zip(masked_positions, masked_labels):
assert (idx < num_tokens)
labels[idx] = label
loss_mask[idx] = 1
labels = flow.tensor(labels, dtype=flow.long)
loss_mask = flow.tensor(loss_mask, dtype=flow.long)
return (tokens, token_types, labels, padding_mask, loss_mask)
| -7,624,780,470,859,637,000
|
Pad sequences and convert them to tensor.
|
libai/data/datasets/bert_dataset.py
|
pad_and_convert_to_tensor
|
Oneflow-Inc/libai
|
python
|
def pad_and_convert_to_tensor(self, tokens, token_types, masked_positions, masked_labels):
num_tokens = len(tokens)
num_pad = (self.max_seq_length - num_tokens)
assert (num_pad >= 0)
assert (len(token_types) == num_tokens)
assert (len(masked_positions) == len(masked_labels))
filler = ([self.pad_id] * num_pad)
tokens = flow.tensor((tokens + filler), dtype=flow.long)
token_types = flow.tensor((token_types + filler), dtype=flow.long)
padding_mask = flow.tensor((([1] * num_tokens) + ([0] * num_pad)), dtype=flow.long)
labels = ([(- 1)] * self.max_seq_length)
loss_mask = ([0] * self.max_seq_length)
for (idx, label) in zip(masked_positions, masked_labels):
assert (idx < num_tokens)
labels[idx] = label
loss_mask[idx] = 1
labels = flow.tensor(labels, dtype=flow.long)
loss_mask = flow.tensor(loss_mask, dtype=flow.long)
return (tokens, token_types, labels, padding_mask, loss_mask)
|
def escape(s):
'\n Do the standard xml escapes, and replace newlines and tabs.\n '
return saxutils.escape(s, {'\n': '<br />', '\t': ' '})
| 776,089,565,378,620,500
|
Do the standard xml escapes, and replace newlines and tabs.
|
leo/plugins/leowapp.py
|
escape
|
Anu082000/leo-editor
|
python
|
def escape(s):
'\n \n '
return saxutils.escape(s, {'\n': '<br />', '\t': ' '})
|
def init():
'Return True if the plugin has loaded successfully.'
if (not websockets):
return False
g.plugin_signon(__name__)
return True
| 1,146,193,736,146,613,200
|
Return True if the plugin has loaded successfully.
|
leo/plugins/leowapp.py
|
init
|
Anu082000/leo-editor
|
python
|
def init():
if (not websockets):
return False
g.plugin_signon(__name__)
return True
|
def __getattr__(self, attr):
'Handle an missing attribute.'
if (attr in ('frameFactory', 'set_minibuffer_label')):
raise AttributeError
return self.message(attr)
| -3,310,694,605,964,671,000
|
Handle an missing attribute.
|
leo/plugins/leowapp.py
|
__getattr__
|
Anu082000/leo-editor
|
python
|
def __getattr__(self, attr):
if (attr in ('frameFactory', 'set_minibuffer_label')):
raise AttributeError
return self.message(attr)
|
def message(self, func):
'\n Send a message to the framework.\n '
g.trace('=====', func, g.callers())
| 1,021,533,959,370,079,700
|
Send a message to the framework.
|
leo/plugins/leowapp.py
|
message
|
Anu082000/leo-editor
|
python
|
def message(self, func):
'\n \n '
g.trace('=====', func, g.callers())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.