repo stringlengths 7 55 | path stringlengths 4 223 | func_name stringlengths 1 134 | original_string stringlengths 75 104k | language stringclasses 1 value | code stringlengths 75 104k | code_tokens listlengths 19 28.4k | docstring stringlengths 1 46.9k | docstring_tokens listlengths 1 1.97k | sha stringlengths 40 40 | url stringlengths 87 315 | partition stringclasses 1 value |
|---|---|---|---|---|---|---|---|---|---|---|---|
tensorflow/cleverhans | cleverhans/utils_tfe.py | train | def train(model, X_train=None, Y_train=None, save=False,
predictions_adv=None, evaluate=None,
args=None, rng=None, var_list=None,
attack=None, attack_args=None):
"""
Train a TF Eager model
:param model: cleverhans.model.Model
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'train_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:param var_list: List of variables to train.
:param attack: Instance of the class cleverhans.attacks.attacks_eager
:param attack_args: Parameters required for the attack.
:return: True if model trained
"""
assert isinstance(model, Model)
args = _ArgsWrapper(args or {})
if ((attack is None) != (attack_args is None)):
raise ValueError("attack and attack_args must be "
"passed together.")
if X_train is None or Y_train is None:
raise ValueError("X_train argument and Y_train argument "
"must be supplied.")
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
assert args.learning_rate, "Learning rate was not given in args dict"
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.train_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if rng is None:
rng = np.random.RandomState()
# Optimizer
tfe = tf.contrib.eager
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
batch_x = tfe.Variable(X_train[0:args.batch_size], dtype=tf.float32)
batch_y = tfe.Variable(Y_train[0:args.batch_size], dtype=tf.float32)
# One epoch of training.
for epoch in xrange(args.nb_epochs):
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), args.batch_size)
# Perform one training step
tf.assign(batch_x, X_train[index_shuf[start:end]])
tf.assign(batch_y, Y_train[index_shuf[start:end]])
# Compute grads
with tf.GradientTape() as tape:
# Define loss
loss_clean_obj = LossCrossEntropy(model, smoothing=0.)
loss_clean = loss_clean_obj.fprop(x=batch_x, y=batch_y)
loss = loss_clean
# Adversarial training
if attack is not None:
batch_adv_x = attack.generate(batch_x, **attack_args)
loss_adv_obj = LossCrossEntropy(model, smoothing=0.)
loss_adv = loss_adv_obj.fprop(x=batch_adv_x, y=batch_y)
loss = (loss_clean + loss_adv) / 2.0
# Apply grads
model_variables = model.get_params()
grads = tape.gradient(loss, model_variables)
optimizer.apply_gradients(zip(grads, model_variables))
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
evaluate()
if save:
save_path = os.path.join(args.train_dir, args.filename)
saver = tf.train.Saver()
saver.save(save_path, model_variables)
_logger.info("Completed model training and saved at: " +
str(save_path))
else:
_logger.info("Completed model training.")
return True | python | def train(model, X_train=None, Y_train=None, save=False,
predictions_adv=None, evaluate=None,
args=None, rng=None, var_list=None,
attack=None, attack_args=None):
"""
Train a TF Eager model
:param model: cleverhans.model.Model
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'train_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:param var_list: List of variables to train.
:param attack: Instance of the class cleverhans.attacks.attacks_eager
:param attack_args: Parameters required for the attack.
:return: True if model trained
"""
assert isinstance(model, Model)
args = _ArgsWrapper(args or {})
if ((attack is None) != (attack_args is None)):
raise ValueError("attack and attack_args must be "
"passed together.")
if X_train is None or Y_train is None:
raise ValueError("X_train argument and Y_train argument "
"must be supplied.")
# Check that necessary arguments were given (see doc above)
assert args.nb_epochs, "Number of epochs was not given in args dict"
assert args.learning_rate, "Learning rate was not given in args dict"
assert args.batch_size, "Batch size was not given in args dict"
if save:
assert args.train_dir, "Directory for save was not given in args dict"
assert args.filename, "Filename for save was not given in args dict"
if rng is None:
rng = np.random.RandomState()
# Optimizer
tfe = tf.contrib.eager
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
batch_x = tfe.Variable(X_train[0:args.batch_size], dtype=tf.float32)
batch_y = tfe.Variable(Y_train[0:args.batch_size], dtype=tf.float32)
# One epoch of training.
for epoch in xrange(args.nb_epochs):
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_train)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_train)
# Indices to shuffle training set
index_shuf = list(range(len(X_train)))
rng.shuffle(index_shuf)
prev = time.time()
for batch in range(nb_batches):
# Compute batch start and end indices
start, end = batch_indices(
batch, len(X_train), args.batch_size)
# Perform one training step
tf.assign(batch_x, X_train[index_shuf[start:end]])
tf.assign(batch_y, Y_train[index_shuf[start:end]])
# Compute grads
with tf.GradientTape() as tape:
# Define loss
loss_clean_obj = LossCrossEntropy(model, smoothing=0.)
loss_clean = loss_clean_obj.fprop(x=batch_x, y=batch_y)
loss = loss_clean
# Adversarial training
if attack is not None:
batch_adv_x = attack.generate(batch_x, **attack_args)
loss_adv_obj = LossCrossEntropy(model, smoothing=0.)
loss_adv = loss_adv_obj.fprop(x=batch_adv_x, y=batch_y)
loss = (loss_clean + loss_adv) / 2.0
# Apply grads
model_variables = model.get_params()
grads = tape.gradient(loss, model_variables)
optimizer.apply_gradients(zip(grads, model_variables))
assert end >= len(X_train) # Check that all examples were used
cur = time.time()
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
evaluate()
if save:
save_path = os.path.join(args.train_dir, args.filename)
saver = tf.train.Saver()
saver.save(save_path, model_variables)
_logger.info("Completed model training and saved at: " +
str(save_path))
else:
_logger.info("Completed model training.")
return True | [
"def",
"train",
"(",
"model",
",",
"X_train",
"=",
"None",
",",
"Y_train",
"=",
"None",
",",
"save",
"=",
"False",
",",
"predictions_adv",
"=",
"None",
",",
"evaluate",
"=",
"None",
",",
"args",
"=",
"None",
",",
"rng",
"=",
"None",
",",
"var_list",
"=",
"None",
",",
"attack",
"=",
"None",
",",
"attack_args",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"model",
",",
"Model",
")",
"args",
"=",
"_ArgsWrapper",
"(",
"args",
"or",
"{",
"}",
")",
"if",
"(",
"(",
"attack",
"is",
"None",
")",
"!=",
"(",
"attack_args",
"is",
"None",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"attack and attack_args must be \"",
"\"passed together.\"",
")",
"if",
"X_train",
"is",
"None",
"or",
"Y_train",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"X_train argument and Y_train argument \"",
"\"must be supplied.\"",
")",
"# Check that necessary arguments were given (see doc above)",
"assert",
"args",
".",
"nb_epochs",
",",
"\"Number of epochs was not given in args dict\"",
"assert",
"args",
".",
"learning_rate",
",",
"\"Learning rate was not given in args dict\"",
"assert",
"args",
".",
"batch_size",
",",
"\"Batch size was not given in args dict\"",
"if",
"save",
":",
"assert",
"args",
".",
"train_dir",
",",
"\"Directory for save was not given in args dict\"",
"assert",
"args",
".",
"filename",
",",
"\"Filename for save was not given in args dict\"",
"if",
"rng",
"is",
"None",
":",
"rng",
"=",
"np",
".",
"random",
".",
"RandomState",
"(",
")",
"# Optimizer",
"tfe",
"=",
"tf",
".",
"contrib",
".",
"eager",
"optimizer",
"=",
"tf",
".",
"train",
".",
"AdamOptimizer",
"(",
"learning_rate",
"=",
"args",
".",
"learning_rate",
")",
"batch_x",
"=",
"tfe",
".",
"Variable",
"(",
"X_train",
"[",
"0",
":",
"args",
".",
"batch_size",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"batch_y",
"=",
"tfe",
".",
"Variable",
"(",
"Y_train",
"[",
"0",
":",
"args",
".",
"batch_size",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"# One epoch of training.",
"for",
"epoch",
"in",
"xrange",
"(",
"args",
".",
"nb_epochs",
")",
":",
"# Compute number of batches",
"nb_batches",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"float",
"(",
"len",
"(",
"X_train",
")",
")",
"/",
"args",
".",
"batch_size",
")",
")",
"assert",
"nb_batches",
"*",
"args",
".",
"batch_size",
">=",
"len",
"(",
"X_train",
")",
"# Indices to shuffle training set",
"index_shuf",
"=",
"list",
"(",
"range",
"(",
"len",
"(",
"X_train",
")",
")",
")",
"rng",
".",
"shuffle",
"(",
"index_shuf",
")",
"prev",
"=",
"time",
".",
"time",
"(",
")",
"for",
"batch",
"in",
"range",
"(",
"nb_batches",
")",
":",
"# Compute batch start and end indices",
"start",
",",
"end",
"=",
"batch_indices",
"(",
"batch",
",",
"len",
"(",
"X_train",
")",
",",
"args",
".",
"batch_size",
")",
"# Perform one training step",
"tf",
".",
"assign",
"(",
"batch_x",
",",
"X_train",
"[",
"index_shuf",
"[",
"start",
":",
"end",
"]",
"]",
")",
"tf",
".",
"assign",
"(",
"batch_y",
",",
"Y_train",
"[",
"index_shuf",
"[",
"start",
":",
"end",
"]",
"]",
")",
"# Compute grads",
"with",
"tf",
".",
"GradientTape",
"(",
")",
"as",
"tape",
":",
"# Define loss",
"loss_clean_obj",
"=",
"LossCrossEntropy",
"(",
"model",
",",
"smoothing",
"=",
"0.",
")",
"loss_clean",
"=",
"loss_clean_obj",
".",
"fprop",
"(",
"x",
"=",
"batch_x",
",",
"y",
"=",
"batch_y",
")",
"loss",
"=",
"loss_clean",
"# Adversarial training",
"if",
"attack",
"is",
"not",
"None",
":",
"batch_adv_x",
"=",
"attack",
".",
"generate",
"(",
"batch_x",
",",
"*",
"*",
"attack_args",
")",
"loss_adv_obj",
"=",
"LossCrossEntropy",
"(",
"model",
",",
"smoothing",
"=",
"0.",
")",
"loss_adv",
"=",
"loss_adv_obj",
".",
"fprop",
"(",
"x",
"=",
"batch_adv_x",
",",
"y",
"=",
"batch_y",
")",
"loss",
"=",
"(",
"loss_clean",
"+",
"loss_adv",
")",
"/",
"2.0",
"# Apply grads",
"model_variables",
"=",
"model",
".",
"get_params",
"(",
")",
"grads",
"=",
"tape",
".",
"gradient",
"(",
"loss",
",",
"model_variables",
")",
"optimizer",
".",
"apply_gradients",
"(",
"zip",
"(",
"grads",
",",
"model_variables",
")",
")",
"assert",
"end",
">=",
"len",
"(",
"X_train",
")",
"# Check that all examples were used",
"cur",
"=",
"time",
".",
"time",
"(",
")",
"_logger",
".",
"info",
"(",
"\"Epoch \"",
"+",
"str",
"(",
"epoch",
")",
"+",
"\" took \"",
"+",
"str",
"(",
"cur",
"-",
"prev",
")",
"+",
"\" seconds\"",
")",
"if",
"evaluate",
"is",
"not",
"None",
":",
"evaluate",
"(",
")",
"if",
"save",
":",
"save_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"train_dir",
",",
"args",
".",
"filename",
")",
"saver",
"=",
"tf",
".",
"train",
".",
"Saver",
"(",
")",
"saver",
".",
"save",
"(",
"save_path",
",",
"model_variables",
")",
"_logger",
".",
"info",
"(",
"\"Completed model training and saved at: \"",
"+",
"str",
"(",
"save_path",
")",
")",
"else",
":",
"_logger",
".",
"info",
"(",
"\"Completed model training.\"",
")",
"return",
"True"
] | Train a TF Eager model
:param model: cleverhans.model.Model
:param X_train: numpy array with training inputs
:param Y_train: numpy array with training outputs
:param save: boolean controlling the save operation
:param predictions_adv: if set with the adversarial example tensor,
will run adversarial training
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
If save is True, should also contain 'train_dir'
and 'filename'
:param rng: Instance of numpy.random.RandomState
:param var_list: List of variables to train.
:param attack: Instance of the class cleverhans.attacks.attacks_eager
:param attack_args: Parameters required for the attack.
:return: True if model trained | [
"Train",
"a",
"TF",
"Eager",
"model",
":",
"param",
"model",
":",
"cleverhans",
".",
"model",
".",
"Model",
":",
"param",
"X_train",
":",
"numpy",
"array",
"with",
"training",
"inputs",
":",
"param",
"Y_train",
":",
"numpy",
"array",
"with",
"training",
"outputs",
":",
"param",
"save",
":",
"boolean",
"controlling",
"the",
"save",
"operation",
":",
"param",
"predictions_adv",
":",
"if",
"set",
"with",
"the",
"adversarial",
"example",
"tensor",
"will",
"run",
"adversarial",
"training",
":",
"param",
"evaluate",
":",
"function",
"that",
"is",
"run",
"after",
"each",
"training",
"iteration",
"(",
"typically",
"to",
"display",
"the",
"test",
"/",
"validation",
"accuracy",
")",
".",
":",
"param",
"args",
":",
"dict",
"or",
"argparse",
"Namespace",
"object",
".",
"Should",
"contain",
"nb_epochs",
"learning_rate",
"batch_size",
"If",
"save",
"is",
"True",
"should",
"also",
"contain",
"train_dir",
"and",
"filename",
":",
"param",
"rng",
":",
"Instance",
"of",
"numpy",
".",
"random",
".",
"RandomState",
":",
"param",
"var_list",
":",
"List",
"of",
"variables",
"to",
"train",
".",
":",
"param",
"attack",
":",
"Instance",
"of",
"the",
"class",
"cleverhans",
".",
"attacks",
".",
"attacks_eager",
":",
"param",
"attack_args",
":",
"Parameters",
"required",
"for",
"the",
"attack",
".",
":",
"return",
":",
"True",
"if",
"model",
"trained"
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tfe.py#L24-L128 | train |
tensorflow/cleverhans | cleverhans/utils_tfe.py | model_eval | def model_eval(model, X_test=None, Y_test=None, args=None,
attack=None, attack_args=None):
"""
Compute the accuracy of a TF Eager model on some data
:param model: instance of cleverhans.model.Model_Eager
with pretrained weights for evaluation.
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:param attack: instance of the class cleverhans.attacks.attacks_eager
:param attack_args: parameters required for the attack.
:return: a float with the accuracy value
"""
args = _ArgsWrapper(args or {})
if ((attack is None) != (attack_args is None)):
raise ValueError("attack and attack_args must be "
"passed together.")
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
# Init result var
accuracy = 0.0
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
tfe = tf.contrib.eager
batch_x = tfe.Variable(X_test[0:args.batch_size], dtype=tf.float32)
batch_y = tfe.Variable(Y_test[0:args.batch_size], dtype=tf.float32)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
tf.assign(batch_x, X_cur)
tf.assign(batch_y, Y_cur)
if attack is not None:
batch_adv_x = attack.generate(batch_x, **attack_args)
predictions = model.get_probs(batch_adv_x)
else:
predictions = model.get_probs(batch_x)
cur_corr_preds = tf.equal(tf.argmax(batch_y, axis=-1),
tf.argmax(predictions, axis=-1))
accuracy += cur_corr_preds.numpy()[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy | python | def model_eval(model, X_test=None, Y_test=None, args=None,
attack=None, attack_args=None):
"""
Compute the accuracy of a TF Eager model on some data
:param model: instance of cleverhans.model.Model_Eager
with pretrained weights for evaluation.
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:param attack: instance of the class cleverhans.attacks.attacks_eager
:param attack_args: parameters required for the attack.
:return: a float with the accuracy value
"""
args = _ArgsWrapper(args or {})
if ((attack is None) != (attack_args is None)):
raise ValueError("attack and attack_args must be "
"passed together.")
assert args.batch_size, "Batch size was not given in args dict"
if X_test is None or Y_test is None:
raise ValueError("X_test argument and Y_test argument "
"must be supplied.")
# Init result var
accuracy = 0.0
# Compute number of batches
nb_batches = int(math.ceil(float(len(X_test)) / args.batch_size))
assert nb_batches * args.batch_size >= len(X_test)
X_cur = np.zeros((args.batch_size,) + X_test.shape[1:],
dtype=X_test.dtype)
Y_cur = np.zeros((args.batch_size,) + Y_test.shape[1:],
dtype=Y_test.dtype)
tfe = tf.contrib.eager
batch_x = tfe.Variable(X_test[0:args.batch_size], dtype=tf.float32)
batch_y = tfe.Variable(Y_test[0:args.batch_size], dtype=tf.float32)
for batch in range(nb_batches):
if batch % 100 == 0 and batch > 0:
_logger.debug("Batch " + str(batch))
# Must not use the `batch_indices` function here, because it
# repeats some examples.
# It's acceptable to repeat during training, but not eval.
start = batch * args.batch_size
end = min(len(X_test), start + args.batch_size)
# The last batch may be smaller than all others. This should not
# affect the accuarcy disproportionately.
cur_batch_size = end - start
X_cur[:cur_batch_size] = X_test[start:end]
Y_cur[:cur_batch_size] = Y_test[start:end]
tf.assign(batch_x, X_cur)
tf.assign(batch_y, Y_cur)
if attack is not None:
batch_adv_x = attack.generate(batch_x, **attack_args)
predictions = model.get_probs(batch_adv_x)
else:
predictions = model.get_probs(batch_x)
cur_corr_preds = tf.equal(tf.argmax(batch_y, axis=-1),
tf.argmax(predictions, axis=-1))
accuracy += cur_corr_preds.numpy()[:cur_batch_size].sum()
assert end >= len(X_test)
# Divide by number of examples to get final value
accuracy /= len(X_test)
return accuracy | [
"def",
"model_eval",
"(",
"model",
",",
"X_test",
"=",
"None",
",",
"Y_test",
"=",
"None",
",",
"args",
"=",
"None",
",",
"attack",
"=",
"None",
",",
"attack_args",
"=",
"None",
")",
":",
"args",
"=",
"_ArgsWrapper",
"(",
"args",
"or",
"{",
"}",
")",
"if",
"(",
"(",
"attack",
"is",
"None",
")",
"!=",
"(",
"attack_args",
"is",
"None",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"attack and attack_args must be \"",
"\"passed together.\"",
")",
"assert",
"args",
".",
"batch_size",
",",
"\"Batch size was not given in args dict\"",
"if",
"X_test",
"is",
"None",
"or",
"Y_test",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"X_test argument and Y_test argument \"",
"\"must be supplied.\"",
")",
"# Init result var",
"accuracy",
"=",
"0.0",
"# Compute number of batches",
"nb_batches",
"=",
"int",
"(",
"math",
".",
"ceil",
"(",
"float",
"(",
"len",
"(",
"X_test",
")",
")",
"/",
"args",
".",
"batch_size",
")",
")",
"assert",
"nb_batches",
"*",
"args",
".",
"batch_size",
">=",
"len",
"(",
"X_test",
")",
"X_cur",
"=",
"np",
".",
"zeros",
"(",
"(",
"args",
".",
"batch_size",
",",
")",
"+",
"X_test",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"X_test",
".",
"dtype",
")",
"Y_cur",
"=",
"np",
".",
"zeros",
"(",
"(",
"args",
".",
"batch_size",
",",
")",
"+",
"Y_test",
".",
"shape",
"[",
"1",
":",
"]",
",",
"dtype",
"=",
"Y_test",
".",
"dtype",
")",
"tfe",
"=",
"tf",
".",
"contrib",
".",
"eager",
"batch_x",
"=",
"tfe",
".",
"Variable",
"(",
"X_test",
"[",
"0",
":",
"args",
".",
"batch_size",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"batch_y",
"=",
"tfe",
".",
"Variable",
"(",
"Y_test",
"[",
"0",
":",
"args",
".",
"batch_size",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
"for",
"batch",
"in",
"range",
"(",
"nb_batches",
")",
":",
"if",
"batch",
"%",
"100",
"==",
"0",
"and",
"batch",
">",
"0",
":",
"_logger",
".",
"debug",
"(",
"\"Batch \"",
"+",
"str",
"(",
"batch",
")",
")",
"# Must not use the `batch_indices` function here, because it",
"# repeats some examples.",
"# It's acceptable to repeat during training, but not eval.",
"start",
"=",
"batch",
"*",
"args",
".",
"batch_size",
"end",
"=",
"min",
"(",
"len",
"(",
"X_test",
")",
",",
"start",
"+",
"args",
".",
"batch_size",
")",
"# The last batch may be smaller than all others. This should not",
"# affect the accuarcy disproportionately.",
"cur_batch_size",
"=",
"end",
"-",
"start",
"X_cur",
"[",
":",
"cur_batch_size",
"]",
"=",
"X_test",
"[",
"start",
":",
"end",
"]",
"Y_cur",
"[",
":",
"cur_batch_size",
"]",
"=",
"Y_test",
"[",
"start",
":",
"end",
"]",
"tf",
".",
"assign",
"(",
"batch_x",
",",
"X_cur",
")",
"tf",
".",
"assign",
"(",
"batch_y",
",",
"Y_cur",
")",
"if",
"attack",
"is",
"not",
"None",
":",
"batch_adv_x",
"=",
"attack",
".",
"generate",
"(",
"batch_x",
",",
"*",
"*",
"attack_args",
")",
"predictions",
"=",
"model",
".",
"get_probs",
"(",
"batch_adv_x",
")",
"else",
":",
"predictions",
"=",
"model",
".",
"get_probs",
"(",
"batch_x",
")",
"cur_corr_preds",
"=",
"tf",
".",
"equal",
"(",
"tf",
".",
"argmax",
"(",
"batch_y",
",",
"axis",
"=",
"-",
"1",
")",
",",
"tf",
".",
"argmax",
"(",
"predictions",
",",
"axis",
"=",
"-",
"1",
")",
")",
"accuracy",
"+=",
"cur_corr_preds",
".",
"numpy",
"(",
")",
"[",
":",
"cur_batch_size",
"]",
".",
"sum",
"(",
")",
"assert",
"end",
">=",
"len",
"(",
"X_test",
")",
"# Divide by number of examples to get final value",
"accuracy",
"/=",
"len",
"(",
"X_test",
")",
"return",
"accuracy"
] | Compute the accuracy of a TF Eager model on some data
:param model: instance of cleverhans.model.Model_Eager
with pretrained weights for evaluation.
:param X_test: numpy array with training inputs
:param Y_test: numpy array with training outputs
:param args: dict or argparse `Namespace` object.
Should contain `batch_size`
:param attack: instance of the class cleverhans.attacks.attacks_eager
:param attack_args: parameters required for the attack.
:return: a float with the accuracy value | [
"Compute",
"the",
"accuracy",
"of",
"a",
"TF",
"Eager",
"model",
"on",
"some",
"data",
":",
"param",
"model",
":",
"instance",
"of",
"cleverhans",
".",
"model",
".",
"Model_Eager",
"with",
"pretrained",
"weights",
"for",
"evaluation",
".",
":",
"param",
"X_test",
":",
"numpy",
"array",
"with",
"training",
"inputs",
":",
"param",
"Y_test",
":",
"numpy",
"array",
"with",
"training",
"outputs",
":",
"param",
"args",
":",
"dict",
"or",
"argparse",
"Namespace",
"object",
".",
"Should",
"contain",
"batch_size",
":",
"param",
"attack",
":",
"instance",
"of",
"the",
"class",
"cleverhans",
".",
"attacks",
".",
"attacks_eager",
":",
"param",
"attack_args",
":",
"parameters",
"required",
"for",
"the",
"attack",
".",
":",
"return",
":",
"a",
"float",
"with",
"the",
"accuracy",
"value"
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tfe.py#L131-L202 | train |
tensorflow/cleverhans | cleverhans/utils_tfe.py | model_argmax | def model_argmax(model, samples):
"""
Helper function that computes the current class prediction
:param samples: numpy array with input samples (dims must match x)
:return: the argmax output of predictions, i.e. the current predicted class
"""
tfe = tf.contrib.eager
tf_samples = tfe.Variable(samples)
probabilities = model.get_probs(tf_samples)
if samples.shape[0] == 1:
return tf.argmax(probabilities)
else:
return tf.argmax(probabilities, axis=1) | python | def model_argmax(model, samples):
"""
Helper function that computes the current class prediction
:param samples: numpy array with input samples (dims must match x)
:return: the argmax output of predictions, i.e. the current predicted class
"""
tfe = tf.contrib.eager
tf_samples = tfe.Variable(samples)
probabilities = model.get_probs(tf_samples)
if samples.shape[0] == 1:
return tf.argmax(probabilities)
else:
return tf.argmax(probabilities, axis=1) | [
"def",
"model_argmax",
"(",
"model",
",",
"samples",
")",
":",
"tfe",
"=",
"tf",
".",
"contrib",
".",
"eager",
"tf_samples",
"=",
"tfe",
".",
"Variable",
"(",
"samples",
")",
"probabilities",
"=",
"model",
".",
"get_probs",
"(",
"tf_samples",
")",
"if",
"samples",
".",
"shape",
"[",
"0",
"]",
"==",
"1",
":",
"return",
"tf",
".",
"argmax",
"(",
"probabilities",
")",
"else",
":",
"return",
"tf",
".",
"argmax",
"(",
"probabilities",
",",
"axis",
"=",
"1",
")"
] | Helper function that computes the current class prediction
:param samples: numpy array with input samples (dims must match x)
:return: the argmax output of predictions, i.e. the current predicted class | [
"Helper",
"function",
"that",
"computes",
"the",
"current",
"class",
"prediction",
":",
"param",
"samples",
":",
"numpy",
"array",
"with",
"input",
"samples",
"(",
"dims",
"must",
"match",
"x",
")",
":",
"return",
":",
"the",
"argmax",
"output",
"of",
"predictions",
"i",
".",
"e",
".",
"the",
"current",
"predicted",
"class"
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/utils_tfe.py#L205-L218 | train |
tensorflow/cleverhans | cleverhans/attacks/noise.py | Noise.generate | def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
if self.ord != np.inf:
raise NotImplementedError(self.ord)
eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps,
dtype=self.tf_dtype)
adv_x = x + eta
if self.clip_min is not None or self.clip_max is not None:
assert self.clip_min is not None and self.clip_max is not None
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x | python | def generate(self, x, **kwargs):
"""
Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params`
"""
# Parse and save attack-specific parameters
assert self.parse_params(**kwargs)
if self.ord != np.inf:
raise NotImplementedError(self.ord)
eta = tf.random_uniform(tf.shape(x), -self.eps, self.eps,
dtype=self.tf_dtype)
adv_x = x + eta
if self.clip_min is not None or self.clip_max is not None:
assert self.clip_min is not None and self.clip_max is not None
adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max)
return adv_x | [
"def",
"generate",
"(",
"self",
",",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"# Parse and save attack-specific parameters",
"assert",
"self",
".",
"parse_params",
"(",
"*",
"*",
"kwargs",
")",
"if",
"self",
".",
"ord",
"!=",
"np",
".",
"inf",
":",
"raise",
"NotImplementedError",
"(",
"self",
".",
"ord",
")",
"eta",
"=",
"tf",
".",
"random_uniform",
"(",
"tf",
".",
"shape",
"(",
"x",
")",
",",
"-",
"self",
".",
"eps",
",",
"self",
".",
"eps",
",",
"dtype",
"=",
"self",
".",
"tf_dtype",
")",
"adv_x",
"=",
"x",
"+",
"eta",
"if",
"self",
".",
"clip_min",
"is",
"not",
"None",
"or",
"self",
".",
"clip_max",
"is",
"not",
"None",
":",
"assert",
"self",
".",
"clip_min",
"is",
"not",
"None",
"and",
"self",
".",
"clip_max",
"is",
"not",
"None",
"adv_x",
"=",
"tf",
".",
"clip_by_value",
"(",
"adv_x",
",",
"self",
".",
"clip_min",
",",
"self",
".",
"clip_max",
")",
"return",
"adv_x"
] | Generate symbolic graph for adversarial examples and return.
:param x: The model's symbolic inputs.
:param kwargs: See `parse_params` | [
"Generate",
"symbolic",
"graph",
"for",
"adversarial",
"examples",
"and",
"return",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/noise.py#L38-L57 | train |
tensorflow/cleverhans | cleverhans/experimental/certification/utils.py | initialize_dual | def initialize_dual(neural_net_params_object, init_dual_file=None,
random_init_variance=0.01, init_nu=200.0):
"""Function to initialize the dual variables of the class.
Args:
neural_net_params_object: Object with the neural net weights, biases
and types
init_dual_file: Path to file containing dual variables, if the path
is empty, perform random initialization
Expects numpy dictionary with
lambda_pos_0, lambda_pos_1, ..
lambda_neg_0, lambda_neg_1, ..
lambda_quad_0, lambda_quad_1, ..
lambda_lu_0, lambda_lu_1, ..
random_init_variance: variance for random initialization
init_nu: Value to initialize nu variable with
Returns:
dual_var: dual variables initialized appropriately.
"""
lambda_pos = []
lambda_neg = []
lambda_quad = []
lambda_lu = []
if init_dual_file is None:
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_pos.append(tf.get_variable('lambda_pos_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_neg.append(tf.get_variable('lambda_neg_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_quad.append(tf.get_variable('lambda_quad_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_lu.append(tf.get_variable('lambda_lu_' + str(i),
initializer=initializer,
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=init_nu)
else:
# Loading from file
dual_var_init_val = np.load(init_dual_file).item()
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
lambda_pos.append(
tf.get_variable('lambda_pos_' + str(i),
initializer=dual_var_init_val['lambda_pos'][i],
dtype=tf.float32))
lambda_neg.append(
tf.get_variable('lambda_neg_' + str(i),
initializer=dual_var_init_val['lambda_neg'][i],
dtype=tf.float32))
lambda_quad.append(
tf.get_variable('lambda_quad_' + str(i),
initializer=dual_var_init_val['lambda_quad'][i],
dtype=tf.float32))
lambda_lu.append(
tf.get_variable('lambda_lu_' + str(i),
initializer=dual_var_init_val['lambda_lu'][i],
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=1.0*dual_var_init_val['nu'])
dual_var = {'lambda_pos': lambda_pos, 'lambda_neg': lambda_neg,
'lambda_quad': lambda_quad, 'lambda_lu': lambda_lu, 'nu': nu}
return dual_var | python | def initialize_dual(neural_net_params_object, init_dual_file=None,
random_init_variance=0.01, init_nu=200.0):
"""Function to initialize the dual variables of the class.
Args:
neural_net_params_object: Object with the neural net weights, biases
and types
init_dual_file: Path to file containing dual variables, if the path
is empty, perform random initialization
Expects numpy dictionary with
lambda_pos_0, lambda_pos_1, ..
lambda_neg_0, lambda_neg_1, ..
lambda_quad_0, lambda_quad_1, ..
lambda_lu_0, lambda_lu_1, ..
random_init_variance: variance for random initialization
init_nu: Value to initialize nu variable with
Returns:
dual_var: dual variables initialized appropriately.
"""
lambda_pos = []
lambda_neg = []
lambda_quad = []
lambda_lu = []
if init_dual_file is None:
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_pos.append(tf.get_variable('lambda_pos_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_neg.append(tf.get_variable('lambda_neg_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_quad.append(tf.get_variable('lambda_quad_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_lu.append(tf.get_variable('lambda_lu_' + str(i),
initializer=initializer,
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=init_nu)
else:
# Loading from file
dual_var_init_val = np.load(init_dual_file).item()
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
lambda_pos.append(
tf.get_variable('lambda_pos_' + str(i),
initializer=dual_var_init_val['lambda_pos'][i],
dtype=tf.float32))
lambda_neg.append(
tf.get_variable('lambda_neg_' + str(i),
initializer=dual_var_init_val['lambda_neg'][i],
dtype=tf.float32))
lambda_quad.append(
tf.get_variable('lambda_quad_' + str(i),
initializer=dual_var_init_val['lambda_quad'][i],
dtype=tf.float32))
lambda_lu.append(
tf.get_variable('lambda_lu_' + str(i),
initializer=dual_var_init_val['lambda_lu'][i],
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=1.0*dual_var_init_val['nu'])
dual_var = {'lambda_pos': lambda_pos, 'lambda_neg': lambda_neg,
'lambda_quad': lambda_quad, 'lambda_lu': lambda_lu, 'nu': nu}
return dual_var | [
"def",
"initialize_dual",
"(",
"neural_net_params_object",
",",
"init_dual_file",
"=",
"None",
",",
"random_init_variance",
"=",
"0.01",
",",
"init_nu",
"=",
"200.0",
")",
":",
"lambda_pos",
"=",
"[",
"]",
"lambda_neg",
"=",
"[",
"]",
"lambda_quad",
"=",
"[",
"]",
"lambda_lu",
"=",
"[",
"]",
"if",
"init_dual_file",
"is",
"None",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"neural_net_params_object",
".",
"num_hidden_layers",
"+",
"1",
")",
":",
"initializer",
"=",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"random_init_variance",
",",
"size",
"=",
"(",
"neural_net_params_object",
".",
"sizes",
"[",
"i",
"]",
",",
"1",
")",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"lambda_pos",
".",
"append",
"(",
"tf",
".",
"get_variable",
"(",
"'lambda_pos_'",
"+",
"str",
"(",
"i",
")",
",",
"initializer",
"=",
"initializer",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
"initializer",
"=",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"random_init_variance",
",",
"size",
"=",
"(",
"neural_net_params_object",
".",
"sizes",
"[",
"i",
"]",
",",
"1",
")",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"lambda_neg",
".",
"append",
"(",
"tf",
".",
"get_variable",
"(",
"'lambda_neg_'",
"+",
"str",
"(",
"i",
")",
",",
"initializer",
"=",
"initializer",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
"initializer",
"=",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"random_init_variance",
",",
"size",
"=",
"(",
"neural_net_params_object",
".",
"sizes",
"[",
"i",
"]",
",",
"1",
")",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"lambda_quad",
".",
"append",
"(",
"tf",
".",
"get_variable",
"(",
"'lambda_quad_'",
"+",
"str",
"(",
"i",
")",
",",
"initializer",
"=",
"initializer",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
"initializer",
"=",
"(",
"np",
".",
"random",
".",
"uniform",
"(",
"0",
",",
"random_init_variance",
",",
"size",
"=",
"(",
"neural_net_params_object",
".",
"sizes",
"[",
"i",
"]",
",",
"1",
")",
")",
")",
".",
"astype",
"(",
"np",
".",
"float32",
")",
"lambda_lu",
".",
"append",
"(",
"tf",
".",
"get_variable",
"(",
"'lambda_lu_'",
"+",
"str",
"(",
"i",
")",
",",
"initializer",
"=",
"initializer",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
"nu",
"=",
"tf",
".",
"get_variable",
"(",
"'nu'",
",",
"initializer",
"=",
"init_nu",
")",
"else",
":",
"# Loading from file",
"dual_var_init_val",
"=",
"np",
".",
"load",
"(",
"init_dual_file",
")",
".",
"item",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"neural_net_params_object",
".",
"num_hidden_layers",
"+",
"1",
")",
":",
"lambda_pos",
".",
"append",
"(",
"tf",
".",
"get_variable",
"(",
"'lambda_pos_'",
"+",
"str",
"(",
"i",
")",
",",
"initializer",
"=",
"dual_var_init_val",
"[",
"'lambda_pos'",
"]",
"[",
"i",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
"lambda_neg",
".",
"append",
"(",
"tf",
".",
"get_variable",
"(",
"'lambda_neg_'",
"+",
"str",
"(",
"i",
")",
",",
"initializer",
"=",
"dual_var_init_val",
"[",
"'lambda_neg'",
"]",
"[",
"i",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
"lambda_quad",
".",
"append",
"(",
"tf",
".",
"get_variable",
"(",
"'lambda_quad_'",
"+",
"str",
"(",
"i",
")",
",",
"initializer",
"=",
"dual_var_init_val",
"[",
"'lambda_quad'",
"]",
"[",
"i",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
"lambda_lu",
".",
"append",
"(",
"tf",
".",
"get_variable",
"(",
"'lambda_lu_'",
"+",
"str",
"(",
"i",
")",
",",
"initializer",
"=",
"dual_var_init_val",
"[",
"'lambda_lu'",
"]",
"[",
"i",
"]",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
")",
"nu",
"=",
"tf",
".",
"get_variable",
"(",
"'nu'",
",",
"initializer",
"=",
"1.0",
"*",
"dual_var_init_val",
"[",
"'nu'",
"]",
")",
"dual_var",
"=",
"{",
"'lambda_pos'",
":",
"lambda_pos",
",",
"'lambda_neg'",
":",
"lambda_neg",
",",
"'lambda_quad'",
":",
"lambda_quad",
",",
"'lambda_lu'",
":",
"lambda_lu",
",",
"'nu'",
":",
"nu",
"}",
"return",
"dual_var"
] | Function to initialize the dual variables of the class.
Args:
neural_net_params_object: Object with the neural net weights, biases
and types
init_dual_file: Path to file containing dual variables, if the path
is empty, perform random initialization
Expects numpy dictionary with
lambda_pos_0, lambda_pos_1, ..
lambda_neg_0, lambda_neg_1, ..
lambda_quad_0, lambda_quad_1, ..
lambda_lu_0, lambda_lu_1, ..
random_init_variance: variance for random initialization
init_nu: Value to initialize nu variable with
Returns:
dual_var: dual variables initialized appropriately. | [
"Function",
"to",
"initialize",
"the",
"dual",
"variables",
"of",
"the",
"class",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/utils.py#L22-L93 | train |
tensorflow/cleverhans | cleverhans/experimental/certification/utils.py | eig_one_step | def eig_one_step(current_vector, learning_rate, vector_prod_fn):
"""Function that performs one step of gd (variant) for min eigen value.
Args:
current_vector: current estimate of the eigen vector with minimum eigen
value.
learning_rate: learning rate.
vector_prod_fn: function which returns product H*x, where H is a matrix for
which we computing eigenvector.
Returns:
updated vector after one step
"""
grad = 2*vector_prod_fn(current_vector)
# Current objective = (1/2)*v^T (2*M*v); v = current_vector
# grad = 2*M*v
current_objective = tf.reshape(tf.matmul(tf.transpose(current_vector),
grad) / 2., shape=())
# Project the gradient into the tangent space of the constraint region.
# This way we do not waste time taking steps that try to change the
# norm of current_vector
grad = grad - current_vector*tf.matmul(tf.transpose(current_vector), grad)
grad_norm = tf.norm(grad)
grad_norm_sq = tf.square(grad_norm)
# Computing normalized gradient of unit norm
norm_grad = grad / grad_norm
# Computing directional second derivative (dsd)
# dsd = 2*g^T M g, where g is normalized gradient
directional_second_derivative = (
tf.reshape(2*tf.matmul(tf.transpose(norm_grad),
vector_prod_fn(norm_grad)),
shape=()))
# Computing grad^\top M grad [useful to compute step size later]
# Just a rescaling of the directional_second_derivative (which uses
# normalized gradient
grad_m_grad = directional_second_derivative*grad_norm_sq / 2
# Directional_second_derivative/2 = objective when vector is norm_grad
# If this is smaller than current objective, simply return that
if directional_second_derivative / 2. < current_objective:
return norm_grad
# If curvature is positive, jump to the bottom of the bowl
if directional_second_derivative > 0.:
step = -1. * grad_norm / directional_second_derivative
else:
# If the gradient is very small, do not move
if grad_norm_sq <= 1e-16:
step = 0.0
else:
# Make a heuristic guess of the step size
step = -2. * tf.reduce_sum(current_vector*grad) / grad_norm_sq
# Computing gain using the gradient and second derivative
gain = -(2 * tf.reduce_sum(current_vector*grad) +
(step*step) * grad_m_grad)
# Fall back to pre-determined learning rate if no gain
if gain < 0.:
step = -learning_rate * grad_norm
current_vector = current_vector + step * norm_grad
return tf.nn.l2_normalize(current_vector) | python | def eig_one_step(current_vector, learning_rate, vector_prod_fn):
"""Function that performs one step of gd (variant) for min eigen value.
Args:
current_vector: current estimate of the eigen vector with minimum eigen
value.
learning_rate: learning rate.
vector_prod_fn: function which returns product H*x, where H is a matrix for
which we computing eigenvector.
Returns:
updated vector after one step
"""
grad = 2*vector_prod_fn(current_vector)
# Current objective = (1/2)*v^T (2*M*v); v = current_vector
# grad = 2*M*v
current_objective = tf.reshape(tf.matmul(tf.transpose(current_vector),
grad) / 2., shape=())
# Project the gradient into the tangent space of the constraint region.
# This way we do not waste time taking steps that try to change the
# norm of current_vector
grad = grad - current_vector*tf.matmul(tf.transpose(current_vector), grad)
grad_norm = tf.norm(grad)
grad_norm_sq = tf.square(grad_norm)
# Computing normalized gradient of unit norm
norm_grad = grad / grad_norm
# Computing directional second derivative (dsd)
# dsd = 2*g^T M g, where g is normalized gradient
directional_second_derivative = (
tf.reshape(2*tf.matmul(tf.transpose(norm_grad),
vector_prod_fn(norm_grad)),
shape=()))
# Computing grad^\top M grad [useful to compute step size later]
# Just a rescaling of the directional_second_derivative (which uses
# normalized gradient
grad_m_grad = directional_second_derivative*grad_norm_sq / 2
# Directional_second_derivative/2 = objective when vector is norm_grad
# If this is smaller than current objective, simply return that
if directional_second_derivative / 2. < current_objective:
return norm_grad
# If curvature is positive, jump to the bottom of the bowl
if directional_second_derivative > 0.:
step = -1. * grad_norm / directional_second_derivative
else:
# If the gradient is very small, do not move
if grad_norm_sq <= 1e-16:
step = 0.0
else:
# Make a heuristic guess of the step size
step = -2. * tf.reduce_sum(current_vector*grad) / grad_norm_sq
# Computing gain using the gradient and second derivative
gain = -(2 * tf.reduce_sum(current_vector*grad) +
(step*step) * grad_m_grad)
# Fall back to pre-determined learning rate if no gain
if gain < 0.:
step = -learning_rate * grad_norm
current_vector = current_vector + step * norm_grad
return tf.nn.l2_normalize(current_vector) | [
"def",
"eig_one_step",
"(",
"current_vector",
",",
"learning_rate",
",",
"vector_prod_fn",
")",
":",
"grad",
"=",
"2",
"*",
"vector_prod_fn",
"(",
"current_vector",
")",
"# Current objective = (1/2)*v^T (2*M*v); v = current_vector",
"# grad = 2*M*v",
"current_objective",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"matmul",
"(",
"tf",
".",
"transpose",
"(",
"current_vector",
")",
",",
"grad",
")",
"/",
"2.",
",",
"shape",
"=",
"(",
")",
")",
"# Project the gradient into the tangent space of the constraint region.",
"# This way we do not waste time taking steps that try to change the",
"# norm of current_vector",
"grad",
"=",
"grad",
"-",
"current_vector",
"*",
"tf",
".",
"matmul",
"(",
"tf",
".",
"transpose",
"(",
"current_vector",
")",
",",
"grad",
")",
"grad_norm",
"=",
"tf",
".",
"norm",
"(",
"grad",
")",
"grad_norm_sq",
"=",
"tf",
".",
"square",
"(",
"grad_norm",
")",
"# Computing normalized gradient of unit norm",
"norm_grad",
"=",
"grad",
"/",
"grad_norm",
"# Computing directional second derivative (dsd)",
"# dsd = 2*g^T M g, where g is normalized gradient",
"directional_second_derivative",
"=",
"(",
"tf",
".",
"reshape",
"(",
"2",
"*",
"tf",
".",
"matmul",
"(",
"tf",
".",
"transpose",
"(",
"norm_grad",
")",
",",
"vector_prod_fn",
"(",
"norm_grad",
")",
")",
",",
"shape",
"=",
"(",
")",
")",
")",
"# Computing grad^\\top M grad [useful to compute step size later]",
"# Just a rescaling of the directional_second_derivative (which uses",
"# normalized gradient",
"grad_m_grad",
"=",
"directional_second_derivative",
"*",
"grad_norm_sq",
"/",
"2",
"# Directional_second_derivative/2 = objective when vector is norm_grad",
"# If this is smaller than current objective, simply return that",
"if",
"directional_second_derivative",
"/",
"2.",
"<",
"current_objective",
":",
"return",
"norm_grad",
"# If curvature is positive, jump to the bottom of the bowl",
"if",
"directional_second_derivative",
">",
"0.",
":",
"step",
"=",
"-",
"1.",
"*",
"grad_norm",
"/",
"directional_second_derivative",
"else",
":",
"# If the gradient is very small, do not move",
"if",
"grad_norm_sq",
"<=",
"1e-16",
":",
"step",
"=",
"0.0",
"else",
":",
"# Make a heuristic guess of the step size",
"step",
"=",
"-",
"2.",
"*",
"tf",
".",
"reduce_sum",
"(",
"current_vector",
"*",
"grad",
")",
"/",
"grad_norm_sq",
"# Computing gain using the gradient and second derivative",
"gain",
"=",
"-",
"(",
"2",
"*",
"tf",
".",
"reduce_sum",
"(",
"current_vector",
"*",
"grad",
")",
"+",
"(",
"step",
"*",
"step",
")",
"*",
"grad_m_grad",
")",
"# Fall back to pre-determined learning rate if no gain",
"if",
"gain",
"<",
"0.",
":",
"step",
"=",
"-",
"learning_rate",
"*",
"grad_norm",
"current_vector",
"=",
"current_vector",
"+",
"step",
"*",
"norm_grad",
"return",
"tf",
".",
"nn",
".",
"l2_normalize",
"(",
"current_vector",
")"
] | Function that performs one step of gd (variant) for min eigen value.
Args:
current_vector: current estimate of the eigen vector with minimum eigen
value.
learning_rate: learning rate.
vector_prod_fn: function which returns product H*x, where H is a matrix for
which we computing eigenvector.
Returns:
updated vector after one step | [
"Function",
"that",
"performs",
"one",
"step",
"of",
"gd",
"(",
"variant",
")",
"for",
"min",
"eigen",
"value",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/utils.py#L95-L159 | train |
tensorflow/cleverhans | cleverhans/experimental/certification/utils.py | minimum_eigen_vector | def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):
"""Computes eigenvector which corresponds to minimum eigenvalue.
Args:
x: initial value of eigenvector.
num_steps: number of optimization steps.
learning_rate: learning rate.
vector_prod_fn: function which takes x and returns product H*x.
Returns:
approximate value of eigenvector.
This function finds approximate value of eigenvector of matrix H which
corresponds to smallest (by absolute value) eigenvalue of H.
It works by solving optimization problem x^{T}*H*x -> min.
"""
x = tf.nn.l2_normalize(x)
for _ in range(num_steps):
x = eig_one_step(x, learning_rate, vector_prod_fn)
return x | python | def minimum_eigen_vector(x, num_steps, learning_rate, vector_prod_fn):
"""Computes eigenvector which corresponds to minimum eigenvalue.
Args:
x: initial value of eigenvector.
num_steps: number of optimization steps.
learning_rate: learning rate.
vector_prod_fn: function which takes x and returns product H*x.
Returns:
approximate value of eigenvector.
This function finds approximate value of eigenvector of matrix H which
corresponds to smallest (by absolute value) eigenvalue of H.
It works by solving optimization problem x^{T}*H*x -> min.
"""
x = tf.nn.l2_normalize(x)
for _ in range(num_steps):
x = eig_one_step(x, learning_rate, vector_prod_fn)
return x | [
"def",
"minimum_eigen_vector",
"(",
"x",
",",
"num_steps",
",",
"learning_rate",
",",
"vector_prod_fn",
")",
":",
"x",
"=",
"tf",
".",
"nn",
".",
"l2_normalize",
"(",
"x",
")",
"for",
"_",
"in",
"range",
"(",
"num_steps",
")",
":",
"x",
"=",
"eig_one_step",
"(",
"x",
",",
"learning_rate",
",",
"vector_prod_fn",
")",
"return",
"x"
] | Computes eigenvector which corresponds to minimum eigenvalue.
Args:
x: initial value of eigenvector.
num_steps: number of optimization steps.
learning_rate: learning rate.
vector_prod_fn: function which takes x and returns product H*x.
Returns:
approximate value of eigenvector.
This function finds approximate value of eigenvector of matrix H which
corresponds to smallest (by absolute value) eigenvalue of H.
It works by solving optimization problem x^{T}*H*x -> min. | [
"Computes",
"eigenvector",
"which",
"corresponds",
"to",
"minimum",
"eigenvalue",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/utils.py#L162-L181 | train |
tensorflow/cleverhans | cleverhans/experimental/certification/utils.py | tf_lanczos_smallest_eigval | def tf_lanczos_smallest_eigval(vector_prod_fn,
matrix_dim,
initial_vector,
num_iter=1000,
max_iter=1000,
collapse_tol=1e-9,
dtype=tf.float32):
"""Computes smallest eigenvector and eigenvalue using Lanczos in pure TF.
This function computes smallest eigenvector and eigenvalue of the matrix
which is implicitly specified by `vector_prod_fn`.
`vector_prod_fn` is a function which takes `x` and returns a product of matrix
in consideration and `x`.
Computation is done using Lanczos algorithm, see
https://en.wikipedia.org/wiki/Lanczos_algorithm#The_algorithm
Args:
vector_prod_fn: function which takes a vector as an input and returns
matrix vector product.
matrix_dim: dimentionality of the matrix.
initial_vector: guess vector to start the algorithm with
num_iter: user-defined number of iterations for the algorithm
max_iter: maximum number of iterations.
collapse_tol: tolerance to determine collapse of the Krylov subspace
dtype: type of data
Returns:
tuple of (eigenvalue, eigenvector) of smallest eigenvalue and corresponding
eigenvector.
"""
# alpha will store diagonal elements
alpha = tf.TensorArray(dtype, size=1, dynamic_size=True, element_shape=())
# beta will store off diagonal elements
beta = tf.TensorArray(dtype, size=0, dynamic_size=True, element_shape=())
# q will store Krylov space basis
q_vectors = tf.TensorArray(
dtype, size=1, dynamic_size=True, element_shape=(matrix_dim, 1))
# If start vector is all zeros, make it a random normal vector and run for max_iter
if tf.norm(initial_vector) < collapse_tol:
initial_vector = tf.random_normal(shape=(matrix_dim, 1), dtype=dtype)
num_iter = max_iter
w = initial_vector / tf.norm(initial_vector)
# Iteration 0 of Lanczos
q_vectors = q_vectors.write(0, w)
w_ = vector_prod_fn(w)
cur_alpha = tf.reduce_sum(w_ * w)
alpha = alpha.write(0, cur_alpha)
w_ = w_ - tf.scalar_mul(cur_alpha, w)
w_prev = w
w = w_
# Subsequent iterations of Lanczos
for i in tf.range(1, num_iter):
cur_beta = tf.norm(w)
if cur_beta < collapse_tol:
# return early if Krylov subspace collapsed
break
# cur_beta is larger than collapse_tol,
# so division will return finite result.
w = w / cur_beta
w_ = vector_prod_fn(w)
cur_alpha = tf.reduce_sum(w_ * w)
q_vectors = q_vectors.write(i, w)
alpha = alpha.write(i, cur_alpha)
beta = beta.write(i-1, cur_beta)
w_ = w_ - tf.scalar_mul(cur_alpha, w) - tf.scalar_mul(cur_beta, w_prev)
w_prev = w
w = w_
alpha = alpha.stack()
beta = beta.stack()
q_vectors = tf.reshape(q_vectors.stack(), (-1, matrix_dim))
offdiag_submatrix = tf.linalg.diag(beta)
tridiag_matrix = (tf.linalg.diag(alpha)
+ tf.pad(offdiag_submatrix, [[0, 1], [1, 0]])
+ tf.pad(offdiag_submatrix, [[1, 0], [0, 1]]))
eigvals, eigvecs = tf.linalg.eigh(tridiag_matrix)
smallest_eigval = eigvals[0]
smallest_eigvec = tf.matmul(tf.reshape(eigvecs[:, 0], (1, -1)),
q_vectors)
smallest_eigvec = smallest_eigvec / tf.norm(smallest_eigvec)
smallest_eigvec = tf.reshape(smallest_eigvec, (matrix_dim, 1))
return smallest_eigval, smallest_eigvec | python | def tf_lanczos_smallest_eigval(vector_prod_fn,
matrix_dim,
initial_vector,
num_iter=1000,
max_iter=1000,
collapse_tol=1e-9,
dtype=tf.float32):
"""Computes smallest eigenvector and eigenvalue using Lanczos in pure TF.
This function computes smallest eigenvector and eigenvalue of the matrix
which is implicitly specified by `vector_prod_fn`.
`vector_prod_fn` is a function which takes `x` and returns a product of matrix
in consideration and `x`.
Computation is done using Lanczos algorithm, see
https://en.wikipedia.org/wiki/Lanczos_algorithm#The_algorithm
Args:
vector_prod_fn: function which takes a vector as an input and returns
matrix vector product.
matrix_dim: dimentionality of the matrix.
initial_vector: guess vector to start the algorithm with
num_iter: user-defined number of iterations for the algorithm
max_iter: maximum number of iterations.
collapse_tol: tolerance to determine collapse of the Krylov subspace
dtype: type of data
Returns:
tuple of (eigenvalue, eigenvector) of smallest eigenvalue and corresponding
eigenvector.
"""
# alpha will store diagonal elements
alpha = tf.TensorArray(dtype, size=1, dynamic_size=True, element_shape=())
# beta will store off diagonal elements
beta = tf.TensorArray(dtype, size=0, dynamic_size=True, element_shape=())
# q will store Krylov space basis
q_vectors = tf.TensorArray(
dtype, size=1, dynamic_size=True, element_shape=(matrix_dim, 1))
# If start vector is all zeros, make it a random normal vector and run for max_iter
if tf.norm(initial_vector) < collapse_tol:
initial_vector = tf.random_normal(shape=(matrix_dim, 1), dtype=dtype)
num_iter = max_iter
w = initial_vector / tf.norm(initial_vector)
# Iteration 0 of Lanczos
q_vectors = q_vectors.write(0, w)
w_ = vector_prod_fn(w)
cur_alpha = tf.reduce_sum(w_ * w)
alpha = alpha.write(0, cur_alpha)
w_ = w_ - tf.scalar_mul(cur_alpha, w)
w_prev = w
w = w_
# Subsequent iterations of Lanczos
for i in tf.range(1, num_iter):
cur_beta = tf.norm(w)
if cur_beta < collapse_tol:
# return early if Krylov subspace collapsed
break
# cur_beta is larger than collapse_tol,
# so division will return finite result.
w = w / cur_beta
w_ = vector_prod_fn(w)
cur_alpha = tf.reduce_sum(w_ * w)
q_vectors = q_vectors.write(i, w)
alpha = alpha.write(i, cur_alpha)
beta = beta.write(i-1, cur_beta)
w_ = w_ - tf.scalar_mul(cur_alpha, w) - tf.scalar_mul(cur_beta, w_prev)
w_prev = w
w = w_
alpha = alpha.stack()
beta = beta.stack()
q_vectors = tf.reshape(q_vectors.stack(), (-1, matrix_dim))
offdiag_submatrix = tf.linalg.diag(beta)
tridiag_matrix = (tf.linalg.diag(alpha)
+ tf.pad(offdiag_submatrix, [[0, 1], [1, 0]])
+ tf.pad(offdiag_submatrix, [[1, 0], [0, 1]]))
eigvals, eigvecs = tf.linalg.eigh(tridiag_matrix)
smallest_eigval = eigvals[0]
smallest_eigvec = tf.matmul(tf.reshape(eigvecs[:, 0], (1, -1)),
q_vectors)
smallest_eigvec = smallest_eigvec / tf.norm(smallest_eigvec)
smallest_eigvec = tf.reshape(smallest_eigvec, (matrix_dim, 1))
return smallest_eigval, smallest_eigvec | [
"def",
"tf_lanczos_smallest_eigval",
"(",
"vector_prod_fn",
",",
"matrix_dim",
",",
"initial_vector",
",",
"num_iter",
"=",
"1000",
",",
"max_iter",
"=",
"1000",
",",
"collapse_tol",
"=",
"1e-9",
",",
"dtype",
"=",
"tf",
".",
"float32",
")",
":",
"# alpha will store diagonal elements",
"alpha",
"=",
"tf",
".",
"TensorArray",
"(",
"dtype",
",",
"size",
"=",
"1",
",",
"dynamic_size",
"=",
"True",
",",
"element_shape",
"=",
"(",
")",
")",
"# beta will store off diagonal elements",
"beta",
"=",
"tf",
".",
"TensorArray",
"(",
"dtype",
",",
"size",
"=",
"0",
",",
"dynamic_size",
"=",
"True",
",",
"element_shape",
"=",
"(",
")",
")",
"# q will store Krylov space basis",
"q_vectors",
"=",
"tf",
".",
"TensorArray",
"(",
"dtype",
",",
"size",
"=",
"1",
",",
"dynamic_size",
"=",
"True",
",",
"element_shape",
"=",
"(",
"matrix_dim",
",",
"1",
")",
")",
"# If start vector is all zeros, make it a random normal vector and run for max_iter",
"if",
"tf",
".",
"norm",
"(",
"initial_vector",
")",
"<",
"collapse_tol",
":",
"initial_vector",
"=",
"tf",
".",
"random_normal",
"(",
"shape",
"=",
"(",
"matrix_dim",
",",
"1",
")",
",",
"dtype",
"=",
"dtype",
")",
"num_iter",
"=",
"max_iter",
"w",
"=",
"initial_vector",
"/",
"tf",
".",
"norm",
"(",
"initial_vector",
")",
"# Iteration 0 of Lanczos",
"q_vectors",
"=",
"q_vectors",
".",
"write",
"(",
"0",
",",
"w",
")",
"w_",
"=",
"vector_prod_fn",
"(",
"w",
")",
"cur_alpha",
"=",
"tf",
".",
"reduce_sum",
"(",
"w_",
"*",
"w",
")",
"alpha",
"=",
"alpha",
".",
"write",
"(",
"0",
",",
"cur_alpha",
")",
"w_",
"=",
"w_",
"-",
"tf",
".",
"scalar_mul",
"(",
"cur_alpha",
",",
"w",
")",
"w_prev",
"=",
"w",
"w",
"=",
"w_",
"# Subsequent iterations of Lanczos",
"for",
"i",
"in",
"tf",
".",
"range",
"(",
"1",
",",
"num_iter",
")",
":",
"cur_beta",
"=",
"tf",
".",
"norm",
"(",
"w",
")",
"if",
"cur_beta",
"<",
"collapse_tol",
":",
"# return early if Krylov subspace collapsed",
"break",
"# cur_beta is larger than collapse_tol,",
"# so division will return finite result.",
"w",
"=",
"w",
"/",
"cur_beta",
"w_",
"=",
"vector_prod_fn",
"(",
"w",
")",
"cur_alpha",
"=",
"tf",
".",
"reduce_sum",
"(",
"w_",
"*",
"w",
")",
"q_vectors",
"=",
"q_vectors",
".",
"write",
"(",
"i",
",",
"w",
")",
"alpha",
"=",
"alpha",
".",
"write",
"(",
"i",
",",
"cur_alpha",
")",
"beta",
"=",
"beta",
".",
"write",
"(",
"i",
"-",
"1",
",",
"cur_beta",
")",
"w_",
"=",
"w_",
"-",
"tf",
".",
"scalar_mul",
"(",
"cur_alpha",
",",
"w",
")",
"-",
"tf",
".",
"scalar_mul",
"(",
"cur_beta",
",",
"w_prev",
")",
"w_prev",
"=",
"w",
"w",
"=",
"w_",
"alpha",
"=",
"alpha",
".",
"stack",
"(",
")",
"beta",
"=",
"beta",
".",
"stack",
"(",
")",
"q_vectors",
"=",
"tf",
".",
"reshape",
"(",
"q_vectors",
".",
"stack",
"(",
")",
",",
"(",
"-",
"1",
",",
"matrix_dim",
")",
")",
"offdiag_submatrix",
"=",
"tf",
".",
"linalg",
".",
"diag",
"(",
"beta",
")",
"tridiag_matrix",
"=",
"(",
"tf",
".",
"linalg",
".",
"diag",
"(",
"alpha",
")",
"+",
"tf",
".",
"pad",
"(",
"offdiag_submatrix",
",",
"[",
"[",
"0",
",",
"1",
"]",
",",
"[",
"1",
",",
"0",
"]",
"]",
")",
"+",
"tf",
".",
"pad",
"(",
"offdiag_submatrix",
",",
"[",
"[",
"1",
",",
"0",
"]",
",",
"[",
"0",
",",
"1",
"]",
"]",
")",
")",
"eigvals",
",",
"eigvecs",
"=",
"tf",
".",
"linalg",
".",
"eigh",
"(",
"tridiag_matrix",
")",
"smallest_eigval",
"=",
"eigvals",
"[",
"0",
"]",
"smallest_eigvec",
"=",
"tf",
".",
"matmul",
"(",
"tf",
".",
"reshape",
"(",
"eigvecs",
"[",
":",
",",
"0",
"]",
",",
"(",
"1",
",",
"-",
"1",
")",
")",
",",
"q_vectors",
")",
"smallest_eigvec",
"=",
"smallest_eigvec",
"/",
"tf",
".",
"norm",
"(",
"smallest_eigvec",
")",
"smallest_eigvec",
"=",
"tf",
".",
"reshape",
"(",
"smallest_eigvec",
",",
"(",
"matrix_dim",
",",
"1",
")",
")",
"return",
"smallest_eigval",
",",
"smallest_eigvec"
] | Computes smallest eigenvector and eigenvalue using Lanczos in pure TF.
This function computes smallest eigenvector and eigenvalue of the matrix
which is implicitly specified by `vector_prod_fn`.
`vector_prod_fn` is a function which takes `x` and returns a product of matrix
in consideration and `x`.
Computation is done using Lanczos algorithm, see
https://en.wikipedia.org/wiki/Lanczos_algorithm#The_algorithm
Args:
vector_prod_fn: function which takes a vector as an input and returns
matrix vector product.
matrix_dim: dimentionality of the matrix.
initial_vector: guess vector to start the algorithm with
num_iter: user-defined number of iterations for the algorithm
max_iter: maximum number of iterations.
collapse_tol: tolerance to determine collapse of the Krylov subspace
dtype: type of data
Returns:
tuple of (eigenvalue, eigenvector) of smallest eigenvalue and corresponding
eigenvector. | [
"Computes",
"smallest",
"eigenvector",
"and",
"eigenvalue",
"using",
"Lanczos",
"in",
"pure",
"TF",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/experimental/certification/utils.py#L184-L278 | train |
tensorflow/cleverhans | cleverhans/serial.py | NoRefModel.get_vars | def get_vars(self):
"""
Provides access to the model's Variables.
This may include Variables that are not parameters, such as batch
norm running moments.
:return: A list of all Variables defining the model.
"""
# Catch eager execution and assert function overload.
try:
if tf.executing_eagerly():
raise NotImplementedError("For Eager execution - get_vars "
"must be overridden.")
except AttributeError:
pass
done = False
tried_to_make_params = False
while not done:
# Most models in cleverhans use only trainable variables and do not
# make sure the other collections are updated correctly.
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
self.scope + "/")
# When wrapping other code, such as the CIFAR 10 challenge models,
# we need to make sure we get the batch norm running averages as well
# as the trainable variables.
model_vars = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES,
self.scope + "/")
scope_vars = ordered_union(trainable_vars, model_vars)
if len(scope_vars) > 0:
done = True
else:
assert not tried_to_make_params
tried_to_make_params = True
self.make_params()
# Make sure no variables have been added or removed
if hasattr(self, "num_vars"):
assert self.num_vars == len(scope_vars)
else:
self.num_vars = len(scope_vars)
return scope_vars | python | def get_vars(self):
"""
Provides access to the model's Variables.
This may include Variables that are not parameters, such as batch
norm running moments.
:return: A list of all Variables defining the model.
"""
# Catch eager execution and assert function overload.
try:
if tf.executing_eagerly():
raise NotImplementedError("For Eager execution - get_vars "
"must be overridden.")
except AttributeError:
pass
done = False
tried_to_make_params = False
while not done:
# Most models in cleverhans use only trainable variables and do not
# make sure the other collections are updated correctly.
trainable_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
self.scope + "/")
# When wrapping other code, such as the CIFAR 10 challenge models,
# we need to make sure we get the batch norm running averages as well
# as the trainable variables.
model_vars = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES,
self.scope + "/")
scope_vars = ordered_union(trainable_vars, model_vars)
if len(scope_vars) > 0:
done = True
else:
assert not tried_to_make_params
tried_to_make_params = True
self.make_params()
# Make sure no variables have been added or removed
if hasattr(self, "num_vars"):
assert self.num_vars == len(scope_vars)
else:
self.num_vars = len(scope_vars)
return scope_vars | [
"def",
"get_vars",
"(",
"self",
")",
":",
"# Catch eager execution and assert function overload.",
"try",
":",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"raise",
"NotImplementedError",
"(",
"\"For Eager execution - get_vars \"",
"\"must be overridden.\"",
")",
"except",
"AttributeError",
":",
"pass",
"done",
"=",
"False",
"tried_to_make_params",
"=",
"False",
"while",
"not",
"done",
":",
"# Most models in cleverhans use only trainable variables and do not",
"# make sure the other collections are updated correctly.",
"trainable_vars",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"TRAINABLE_VARIABLES",
",",
"self",
".",
"scope",
"+",
"\"/\"",
")",
"# When wrapping other code, such as the CIFAR 10 challenge models,",
"# we need to make sure we get the batch norm running averages as well",
"# as the trainable variables.",
"model_vars",
"=",
"tf",
".",
"get_collection",
"(",
"tf",
".",
"GraphKeys",
".",
"MODEL_VARIABLES",
",",
"self",
".",
"scope",
"+",
"\"/\"",
")",
"scope_vars",
"=",
"ordered_union",
"(",
"trainable_vars",
",",
"model_vars",
")",
"if",
"len",
"(",
"scope_vars",
")",
">",
"0",
":",
"done",
"=",
"True",
"else",
":",
"assert",
"not",
"tried_to_make_params",
"tried_to_make_params",
"=",
"True",
"self",
".",
"make_params",
"(",
")",
"# Make sure no variables have been added or removed",
"if",
"hasattr",
"(",
"self",
",",
"\"num_vars\"",
")",
":",
"assert",
"self",
".",
"num_vars",
"==",
"len",
"(",
"scope_vars",
")",
"else",
":",
"self",
".",
"num_vars",
"=",
"len",
"(",
"scope_vars",
")",
"return",
"scope_vars"
] | Provides access to the model's Variables.
This may include Variables that are not parameters, such as batch
norm running moments.
:return: A list of all Variables defining the model. | [
"Provides",
"access",
"to",
"the",
"model",
"s",
"Variables",
".",
"This",
"may",
"include",
"Variables",
"that",
"are",
"not",
"parameters",
"such",
"as",
"batch",
"norm",
"running",
"moments",
".",
":",
"return",
":",
"A",
"list",
"of",
"all",
"Variables",
"defining",
"the",
"model",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/serial.py#L151-L194 | train |
tensorflow/cleverhans | cleverhans/picklable_model.py | Dropout.fprop | def fprop(self, x, dropout=False, dropout_dict=None, **kwargs):
"""
Forward propagation as either no-op or dropping random units.
:param x: The input to the layer
:param dropout: bool specifying whether to drop units
:param dropout_dict: dict
This dictionary is usually not needed.
In rare cases, generally for research purposes, this dictionary
makes it possible to run forward propagation with a different
dropout include probability.
This dictionary should be passed as a named argument to the MLP
class, which will then pass it to *all* layers' fprop methods.
Other layers will just receive this as an ignored kwargs entry.
Each dropout layer looks up its own name in this dictionary
to read out its include probability.
"""
include_prob = self.include_prob
if dropout_dict is not None:
assert dropout
if self.name in dropout_dict:
include_prob = dropout_dict[self.name]
if dropout:
return tf.nn.dropout(x, include_prob)
return x | python | def fprop(self, x, dropout=False, dropout_dict=None, **kwargs):
"""
Forward propagation as either no-op or dropping random units.
:param x: The input to the layer
:param dropout: bool specifying whether to drop units
:param dropout_dict: dict
This dictionary is usually not needed.
In rare cases, generally for research purposes, this dictionary
makes it possible to run forward propagation with a different
dropout include probability.
This dictionary should be passed as a named argument to the MLP
class, which will then pass it to *all* layers' fprop methods.
Other layers will just receive this as an ignored kwargs entry.
Each dropout layer looks up its own name in this dictionary
to read out its include probability.
"""
include_prob = self.include_prob
if dropout_dict is not None:
assert dropout
if self.name in dropout_dict:
include_prob = dropout_dict[self.name]
if dropout:
return tf.nn.dropout(x, include_prob)
return x | [
"def",
"fprop",
"(",
"self",
",",
"x",
",",
"dropout",
"=",
"False",
",",
"dropout_dict",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"include_prob",
"=",
"self",
".",
"include_prob",
"if",
"dropout_dict",
"is",
"not",
"None",
":",
"assert",
"dropout",
"if",
"self",
".",
"name",
"in",
"dropout_dict",
":",
"include_prob",
"=",
"dropout_dict",
"[",
"self",
".",
"name",
"]",
"if",
"dropout",
":",
"return",
"tf",
".",
"nn",
".",
"dropout",
"(",
"x",
",",
"include_prob",
")",
"return",
"x"
] | Forward propagation as either no-op or dropping random units.
:param x: The input to the layer
:param dropout: bool specifying whether to drop units
:param dropout_dict: dict
This dictionary is usually not needed.
In rare cases, generally for research purposes, this dictionary
makes it possible to run forward propagation with a different
dropout include probability.
This dictionary should be passed as a named argument to the MLP
class, which will then pass it to *all* layers' fprop methods.
Other layers will just receive this as an ignored kwargs entry.
Each dropout layer looks up its own name in this dictionary
to read out its include probability. | [
"Forward",
"propagation",
"as",
"either",
"no",
"-",
"op",
"or",
"dropping",
"random",
"units",
".",
":",
"param",
"x",
":",
"The",
"input",
"to",
"the",
"layer",
":",
"param",
"dropout",
":",
"bool",
"specifying",
"whether",
"to",
"drop",
"units",
":",
"param",
"dropout_dict",
":",
"dict",
"This",
"dictionary",
"is",
"usually",
"not",
"needed",
".",
"In",
"rare",
"cases",
"generally",
"for",
"research",
"purposes",
"this",
"dictionary",
"makes",
"it",
"possible",
"to",
"run",
"forward",
"propagation",
"with",
"a",
"different",
"dropout",
"include",
"probability",
".",
"This",
"dictionary",
"should",
"be",
"passed",
"as",
"a",
"named",
"argument",
"to",
"the",
"MLP",
"class",
"which",
"will",
"then",
"pass",
"it",
"to",
"*",
"all",
"*",
"layers",
"fprop",
"methods",
".",
"Other",
"layers",
"will",
"just",
"receive",
"this",
"as",
"an",
"ignored",
"kwargs",
"entry",
".",
"Each",
"dropout",
"layer",
"looks",
"up",
"its",
"own",
"name",
"in",
"this",
"dictionary",
"to",
"read",
"out",
"its",
"include",
"probability",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/picklable_model.py#L616-L639 | train |
tensorflow/cleverhans | cleverhans/attacks/carlini_wagner_l2.py | CarliniWagnerL2.generate | def generate(self, x, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: A tensor with the inputs.
:param kwargs: See `parse_params`
"""
assert self.sess is not None, \
'Cannot use `generate` when no `sess` was provided'
self.parse_params(**kwargs)
labels, nb_classes = self.get_or_guess_labels(x, kwargs)
attack = CWL2(self.sess, self.model, self.batch_size, self.confidence,
'y_target' in kwargs, self.learning_rate,
self.binary_search_steps, self.max_iterations,
self.abort_early, self.initial_const, self.clip_min,
self.clip_max, nb_classes,
x.get_shape().as_list()[1:])
def cw_wrap(x_val, y_val):
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)
wrap = tf.py_func(cw_wrap, [x, labels], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap | python | def generate(self, x, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: A tensor with the inputs.
:param kwargs: See `parse_params`
"""
assert self.sess is not None, \
'Cannot use `generate` when no `sess` was provided'
self.parse_params(**kwargs)
labels, nb_classes = self.get_or_guess_labels(x, kwargs)
attack = CWL2(self.sess, self.model, self.batch_size, self.confidence,
'y_target' in kwargs, self.learning_rate,
self.binary_search_steps, self.max_iterations,
self.abort_early, self.initial_const, self.clip_min,
self.clip_max, nb_classes,
x.get_shape().as_list()[1:])
def cw_wrap(x_val, y_val):
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)
wrap = tf.py_func(cw_wrap, [x, labels], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap | [
"def",
"generate",
"(",
"self",
",",
"x",
",",
"*",
"*",
"kwargs",
")",
":",
"assert",
"self",
".",
"sess",
"is",
"not",
"None",
",",
"'Cannot use `generate` when no `sess` was provided'",
"self",
".",
"parse_params",
"(",
"*",
"*",
"kwargs",
")",
"labels",
",",
"nb_classes",
"=",
"self",
".",
"get_or_guess_labels",
"(",
"x",
",",
"kwargs",
")",
"attack",
"=",
"CWL2",
"(",
"self",
".",
"sess",
",",
"self",
".",
"model",
",",
"self",
".",
"batch_size",
",",
"self",
".",
"confidence",
",",
"'y_target'",
"in",
"kwargs",
",",
"self",
".",
"learning_rate",
",",
"self",
".",
"binary_search_steps",
",",
"self",
".",
"max_iterations",
",",
"self",
".",
"abort_early",
",",
"self",
".",
"initial_const",
",",
"self",
".",
"clip_min",
",",
"self",
".",
"clip_max",
",",
"nb_classes",
",",
"x",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
":",
"]",
")",
"def",
"cw_wrap",
"(",
"x_val",
",",
"y_val",
")",
":",
"return",
"np",
".",
"array",
"(",
"attack",
".",
"attack",
"(",
"x_val",
",",
"y_val",
")",
",",
"dtype",
"=",
"self",
".",
"np_dtype",
")",
"wrap",
"=",
"tf",
".",
"py_func",
"(",
"cw_wrap",
",",
"[",
"x",
",",
"labels",
"]",
",",
"self",
".",
"tf_dtype",
")",
"wrap",
".",
"set_shape",
"(",
"x",
".",
"get_shape",
"(",
")",
")",
"return",
"wrap"
] | Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: A tensor with the inputs.
:param kwargs: See `parse_params` | [
"Return",
"a",
"tensor",
"that",
"constructs",
"adversarial",
"examples",
"for",
"the",
"given",
"input",
".",
"Generate",
"uses",
"tf",
".",
"py_func",
"in",
"order",
"to",
"operate",
"over",
"tensors",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/carlini_wagner_l2.py#L58-L85 | train |
tensorflow/cleverhans | cleverhans/attacks/carlini_wagner_l2.py | CarliniWagnerL2.parse_params | def parse_params(self,
y=None,
y_target=None,
batch_size=1,
confidence=0,
learning_rate=5e-3,
binary_search_steps=5,
max_iterations=1000,
abort_early=True,
initial_const=1e-2,
clip_min=0,
clip_max=1):
"""
:param y: (optional) A tensor with the true labels for an untargeted
attack. If None (and y_target is None) then use the
original labels the classifier assigns.
:param y_target: (optional) A tensor with the target labels for a
targeted attack.
:param confidence: Confidence of adversarial examples: higher produces
examples with larger l2 distortion, but more
strongly classified as adversarial.
:param batch_size: Number of attacks to run simultaneously.
:param learning_rate: The learning rate for the attack algorithm.
Smaller values produce better results but are
slower to converge.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and confidence of the classification.
:param max_iterations: The maximum number of iterations. Setting this
to a larger value will produce lower distortion
results. Using only a few iterations requires
a larger learning rate, and will produce larger
distortion results.
:param abort_early: If true, allows early aborts if gradient descent
is unable to make progress (i.e., gets stuck in
a local minimum).
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and confidence of classification.
If binary_search_steps is large, the initial
constant is not important. A smaller value of
this constant gives lower distortion results.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# ignore the y and y_target argument
self.batch_size = batch_size
self.confidence = confidence
self.learning_rate = learning_rate
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.abort_early = abort_early
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max | python | def parse_params(self,
y=None,
y_target=None,
batch_size=1,
confidence=0,
learning_rate=5e-3,
binary_search_steps=5,
max_iterations=1000,
abort_early=True,
initial_const=1e-2,
clip_min=0,
clip_max=1):
"""
:param y: (optional) A tensor with the true labels for an untargeted
attack. If None (and y_target is None) then use the
original labels the classifier assigns.
:param y_target: (optional) A tensor with the target labels for a
targeted attack.
:param confidence: Confidence of adversarial examples: higher produces
examples with larger l2 distortion, but more
strongly classified as adversarial.
:param batch_size: Number of attacks to run simultaneously.
:param learning_rate: The learning rate for the attack algorithm.
Smaller values produce better results but are
slower to converge.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and confidence of the classification.
:param max_iterations: The maximum number of iterations. Setting this
to a larger value will produce lower distortion
results. Using only a few iterations requires
a larger learning rate, and will produce larger
distortion results.
:param abort_early: If true, allows early aborts if gradient descent
is unable to make progress (i.e., gets stuck in
a local minimum).
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and confidence of classification.
If binary_search_steps is large, the initial
constant is not important. A smaller value of
this constant gives lower distortion results.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value
"""
# ignore the y and y_target argument
self.batch_size = batch_size
self.confidence = confidence
self.learning_rate = learning_rate
self.binary_search_steps = binary_search_steps
self.max_iterations = max_iterations
self.abort_early = abort_early
self.initial_const = initial_const
self.clip_min = clip_min
self.clip_max = clip_max | [
"def",
"parse_params",
"(",
"self",
",",
"y",
"=",
"None",
",",
"y_target",
"=",
"None",
",",
"batch_size",
"=",
"1",
",",
"confidence",
"=",
"0",
",",
"learning_rate",
"=",
"5e-3",
",",
"binary_search_steps",
"=",
"5",
",",
"max_iterations",
"=",
"1000",
",",
"abort_early",
"=",
"True",
",",
"initial_const",
"=",
"1e-2",
",",
"clip_min",
"=",
"0",
",",
"clip_max",
"=",
"1",
")",
":",
"# ignore the y and y_target argument",
"self",
".",
"batch_size",
"=",
"batch_size",
"self",
".",
"confidence",
"=",
"confidence",
"self",
".",
"learning_rate",
"=",
"learning_rate",
"self",
".",
"binary_search_steps",
"=",
"binary_search_steps",
"self",
".",
"max_iterations",
"=",
"max_iterations",
"self",
".",
"abort_early",
"=",
"abort_early",
"self",
".",
"initial_const",
"=",
"initial_const",
"self",
".",
"clip_min",
"=",
"clip_min",
"self",
".",
"clip_max",
"=",
"clip_max"
] | :param y: (optional) A tensor with the true labels for an untargeted
attack. If None (and y_target is None) then use the
original labels the classifier assigns.
:param y_target: (optional) A tensor with the target labels for a
targeted attack.
:param confidence: Confidence of adversarial examples: higher produces
examples with larger l2 distortion, but more
strongly classified as adversarial.
:param batch_size: Number of attacks to run simultaneously.
:param learning_rate: The learning rate for the attack algorithm.
Smaller values produce better results but are
slower to converge.
:param binary_search_steps: The number of times we perform binary
search to find the optimal tradeoff-
constant between norm of the purturbation
and confidence of the classification.
:param max_iterations: The maximum number of iterations. Setting this
to a larger value will produce lower distortion
results. Using only a few iterations requires
a larger learning rate, and will produce larger
distortion results.
:param abort_early: If true, allows early aborts if gradient descent
is unable to make progress (i.e., gets stuck in
a local minimum).
:param initial_const: The initial tradeoff-constant to use to tune the
relative importance of size of the perturbation
and confidence of classification.
If binary_search_steps is large, the initial
constant is not important. A smaller value of
this constant gives lower distortion results.
:param clip_min: (optional float) Minimum input component value
:param clip_max: (optional float) Maximum input component value | [
":",
"param",
"y",
":",
"(",
"optional",
")",
"A",
"tensor",
"with",
"the",
"true",
"labels",
"for",
"an",
"untargeted",
"attack",
".",
"If",
"None",
"(",
"and",
"y_target",
"is",
"None",
")",
"then",
"use",
"the",
"original",
"labels",
"the",
"classifier",
"assigns",
".",
":",
"param",
"y_target",
":",
"(",
"optional",
")",
"A",
"tensor",
"with",
"the",
"target",
"labels",
"for",
"a",
"targeted",
"attack",
".",
":",
"param",
"confidence",
":",
"Confidence",
"of",
"adversarial",
"examples",
":",
"higher",
"produces",
"examples",
"with",
"larger",
"l2",
"distortion",
"but",
"more",
"strongly",
"classified",
"as",
"adversarial",
".",
":",
"param",
"batch_size",
":",
"Number",
"of",
"attacks",
"to",
"run",
"simultaneously",
".",
":",
"param",
"learning_rate",
":",
"The",
"learning",
"rate",
"for",
"the",
"attack",
"algorithm",
".",
"Smaller",
"values",
"produce",
"better",
"results",
"but",
"are",
"slower",
"to",
"converge",
".",
":",
"param",
"binary_search_steps",
":",
"The",
"number",
"of",
"times",
"we",
"perform",
"binary",
"search",
"to",
"find",
"the",
"optimal",
"tradeoff",
"-",
"constant",
"between",
"norm",
"of",
"the",
"purturbation",
"and",
"confidence",
"of",
"the",
"classification",
".",
":",
"param",
"max_iterations",
":",
"The",
"maximum",
"number",
"of",
"iterations",
".",
"Setting",
"this",
"to",
"a",
"larger",
"value",
"will",
"produce",
"lower",
"distortion",
"results",
".",
"Using",
"only",
"a",
"few",
"iterations",
"requires",
"a",
"larger",
"learning",
"rate",
"and",
"will",
"produce",
"larger",
"distortion",
"results",
".",
":",
"param",
"abort_early",
":",
"If",
"true",
"allows",
"early",
"aborts",
"if",
"gradient",
"descent",
"is",
"unable",
"to",
"make",
"progress",
"(",
"i",
".",
"e",
".",
"gets",
"stuck",
"in",
"a",
"local",
"minimum",
")",
".",
":",
"param",
"initial_const",
":",
"The",
"initial",
"tradeoff",
"-",
"constant",
"to",
"use",
"to",
"tune",
"the",
"relative",
"importance",
"of",
"size",
"of",
"the",
"perturbation",
"and",
"confidence",
"of",
"classification",
".",
"If",
"binary_search_steps",
"is",
"large",
"the",
"initial",
"constant",
"is",
"not",
"important",
".",
"A",
"smaller",
"value",
"of",
"this",
"constant",
"gives",
"lower",
"distortion",
"results",
".",
":",
"param",
"clip_min",
":",
"(",
"optional",
"float",
")",
"Minimum",
"input",
"component",
"value",
":",
"param",
"clip_max",
":",
"(",
"optional",
"float",
")",
"Maximum",
"input",
"component",
"value"
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/carlini_wagner_l2.py#L87-L143 | train |
tensorflow/cleverhans | cleverhans/attacks/carlini_wagner_l2.py | CWL2.attack | def attack(self, imgs, targets):
"""
Perform the L_2 attack on the given instance for the given targets.
If self.targeted is true, then the targets represents the target labels
If self.targeted is false, then targets are the original class labels
"""
r = []
for i in range(0, len(imgs), self.batch_size):
_logger.debug(
("Running CWL2 attack on instance %s of %s", i, len(imgs)))
r.extend(
self.attack_batch(imgs[i:i + self.batch_size],
targets[i:i + self.batch_size]))
return np.array(r) | python | def attack(self, imgs, targets):
"""
Perform the L_2 attack on the given instance for the given targets.
If self.targeted is true, then the targets represents the target labels
If self.targeted is false, then targets are the original class labels
"""
r = []
for i in range(0, len(imgs), self.batch_size):
_logger.debug(
("Running CWL2 attack on instance %s of %s", i, len(imgs)))
r.extend(
self.attack_batch(imgs[i:i + self.batch_size],
targets[i:i + self.batch_size]))
return np.array(r) | [
"def",
"attack",
"(",
"self",
",",
"imgs",
",",
"targets",
")",
":",
"r",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"imgs",
")",
",",
"self",
".",
"batch_size",
")",
":",
"_logger",
".",
"debug",
"(",
"(",
"\"Running CWL2 attack on instance %s of %s\"",
",",
"i",
",",
"len",
"(",
"imgs",
")",
")",
")",
"r",
".",
"extend",
"(",
"self",
".",
"attack_batch",
"(",
"imgs",
"[",
"i",
":",
"i",
"+",
"self",
".",
"batch_size",
"]",
",",
"targets",
"[",
"i",
":",
"i",
"+",
"self",
".",
"batch_size",
"]",
")",
")",
"return",
"np",
".",
"array",
"(",
"r",
")"
] | Perform the L_2 attack on the given instance for the given targets.
If self.targeted is true, then the targets represents the target labels
If self.targeted is false, then targets are the original class labels | [
"Perform",
"the",
"L_2",
"attack",
"on",
"the",
"given",
"instance",
"for",
"the",
"given",
"targets",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/carlini_wagner_l2.py#L276-L291 | train |
tensorflow/cleverhans | cleverhans/attacks/carlini_wagner_l2.py | CWL2.attack_batch | def attack_batch(self, imgs, labs):
"""
Run the attack on a batch of instance and labels.
"""
def compare(x, y):
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
if self.TARGETED:
x[y] -= self.CONFIDENCE
else:
x[y] += self.CONFIDENCE
x = np.argmax(x)
if self.TARGETED:
return x == y
else:
return x != y
batch_size = self.batch_size
oimgs = np.clip(imgs, self.clip_min, self.clip_max)
# re-scale instances to be within range [0, 1]
imgs = (imgs - self.clip_min) / (self.clip_max - self.clip_min)
imgs = np.clip(imgs, 0, 1)
# now convert to [-1, 1]
imgs = (imgs * 2) - 1
# convert to tanh-space
imgs = np.arctanh(imgs * .999999)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
CONST = np.ones(batch_size) * self.initial_const
upper_bound = np.ones(batch_size) * 1e10
# placeholders for the best l2, score, and instance attack found so far
o_bestl2 = [1e10] * batch_size
o_bestscore = [-1] * batch_size
o_bestattack = np.copy(oimgs)
for outer_step in range(self.BINARY_SEARCH_STEPS):
# completely reset adam's internal state.
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
bestl2 = [1e10] * batch_size
bestscore = [-1] * batch_size
_logger.debug(" Binary search step %s of %s",
outer_step, self.BINARY_SEARCH_STEPS)
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:
CONST = upper_bound
# set the variables so that we don't have to send them over again
self.sess.run(
self.setup, {
self.assign_timg: batch,
self.assign_tlab: batchlab,
self.assign_const: CONST
})
prev = 1e6
for iteration in range(self.MAX_ITERATIONS):
# perform the attack
_, l, l2s, scores, nimg = self.sess.run([
self.train, self.loss, self.l2dist, self.output,
self.newimg
])
if iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
_logger.debug((" Iteration {} of {}: loss={:.3g} " +
"l2={:.3g} f={:.3g}").format(
iteration, self.MAX_ITERATIONS, l,
np.mean(l2s), np.mean(scores)))
# check if we should abort search if we're getting nowhere.
if self.ABORT_EARLY and \
iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
if l > prev * .9999:
msg = " Failed to make progress; stop early"
_logger.debug(msg)
break
prev = l
# adjust the best result found so far
for e, (l2, sc, ii) in enumerate(zip(l2s, scores, nimg)):
lab = np.argmax(batchlab[e])
if l2 < bestl2[e] and compare(sc, lab):
bestl2[e] = l2
bestscore[e] = np.argmax(sc)
if l2 < o_bestl2[e] and compare(sc, lab):
o_bestl2[e] = l2
o_bestscore[e] = np.argmax(sc)
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(batch_size):
if compare(bestscore[e], np.argmax(batchlab[e])) and \
bestscore[e] != -1:
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(" Successfully generated adversarial examples " +
"on {} of {} instances.".format(
sum(upper_bound < 1e9), batch_size))
o_bestl2 = np.array(o_bestl2)
mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
_logger.debug(" Mean successful distortion: {:.4g}".format(mean))
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack | python | def attack_batch(self, imgs, labs):
"""
Run the attack on a batch of instance and labels.
"""
def compare(x, y):
if not isinstance(x, (float, int, np.int64)):
x = np.copy(x)
if self.TARGETED:
x[y] -= self.CONFIDENCE
else:
x[y] += self.CONFIDENCE
x = np.argmax(x)
if self.TARGETED:
return x == y
else:
return x != y
batch_size = self.batch_size
oimgs = np.clip(imgs, self.clip_min, self.clip_max)
# re-scale instances to be within range [0, 1]
imgs = (imgs - self.clip_min) / (self.clip_max - self.clip_min)
imgs = np.clip(imgs, 0, 1)
# now convert to [-1, 1]
imgs = (imgs * 2) - 1
# convert to tanh-space
imgs = np.arctanh(imgs * .999999)
# set the lower and upper bounds accordingly
lower_bound = np.zeros(batch_size)
CONST = np.ones(batch_size) * self.initial_const
upper_bound = np.ones(batch_size) * 1e10
# placeholders for the best l2, score, and instance attack found so far
o_bestl2 = [1e10] * batch_size
o_bestscore = [-1] * batch_size
o_bestattack = np.copy(oimgs)
for outer_step in range(self.BINARY_SEARCH_STEPS):
# completely reset adam's internal state.
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
bestl2 = [1e10] * batch_size
bestscore = [-1] * batch_size
_logger.debug(" Binary search step %s of %s",
outer_step, self.BINARY_SEARCH_STEPS)
# The last iteration (if we run many steps) repeat the search once.
if self.repeat and outer_step == self.BINARY_SEARCH_STEPS - 1:
CONST = upper_bound
# set the variables so that we don't have to send them over again
self.sess.run(
self.setup, {
self.assign_timg: batch,
self.assign_tlab: batchlab,
self.assign_const: CONST
})
prev = 1e6
for iteration in range(self.MAX_ITERATIONS):
# perform the attack
_, l, l2s, scores, nimg = self.sess.run([
self.train, self.loss, self.l2dist, self.output,
self.newimg
])
if iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
_logger.debug((" Iteration {} of {}: loss={:.3g} " +
"l2={:.3g} f={:.3g}").format(
iteration, self.MAX_ITERATIONS, l,
np.mean(l2s), np.mean(scores)))
# check if we should abort search if we're getting nowhere.
if self.ABORT_EARLY and \
iteration % ((self.MAX_ITERATIONS // 10) or 1) == 0:
if l > prev * .9999:
msg = " Failed to make progress; stop early"
_logger.debug(msg)
break
prev = l
# adjust the best result found so far
for e, (l2, sc, ii) in enumerate(zip(l2s, scores, nimg)):
lab = np.argmax(batchlab[e])
if l2 < bestl2[e] and compare(sc, lab):
bestl2[e] = l2
bestscore[e] = np.argmax(sc)
if l2 < o_bestl2[e] and compare(sc, lab):
o_bestl2[e] = l2
o_bestscore[e] = np.argmax(sc)
o_bestattack[e] = ii
# adjust the constant as needed
for e in range(batch_size):
if compare(bestscore[e], np.argmax(batchlab[e])) and \
bestscore[e] != -1:
# success, divide const by two
upper_bound[e] = min(upper_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
# failure, either multiply by 10 if no solution found yet
# or do binary search with the known upper bound
lower_bound[e] = max(lower_bound[e], CONST[e])
if upper_bound[e] < 1e9:
CONST[e] = (lower_bound[e] + upper_bound[e]) / 2
else:
CONST[e] *= 10
_logger.debug(" Successfully generated adversarial examples " +
"on {} of {} instances.".format(
sum(upper_bound < 1e9), batch_size))
o_bestl2 = np.array(o_bestl2)
mean = np.mean(np.sqrt(o_bestl2[o_bestl2 < 1e9]))
_logger.debug(" Mean successful distortion: {:.4g}".format(mean))
# return the best solution found
o_bestl2 = np.array(o_bestl2)
return o_bestattack | [
"def",
"attack_batch",
"(",
"self",
",",
"imgs",
",",
"labs",
")",
":",
"def",
"compare",
"(",
"x",
",",
"y",
")",
":",
"if",
"not",
"isinstance",
"(",
"x",
",",
"(",
"float",
",",
"int",
",",
"np",
".",
"int64",
")",
")",
":",
"x",
"=",
"np",
".",
"copy",
"(",
"x",
")",
"if",
"self",
".",
"TARGETED",
":",
"x",
"[",
"y",
"]",
"-=",
"self",
".",
"CONFIDENCE",
"else",
":",
"x",
"[",
"y",
"]",
"+=",
"self",
".",
"CONFIDENCE",
"x",
"=",
"np",
".",
"argmax",
"(",
"x",
")",
"if",
"self",
".",
"TARGETED",
":",
"return",
"x",
"==",
"y",
"else",
":",
"return",
"x",
"!=",
"y",
"batch_size",
"=",
"self",
".",
"batch_size",
"oimgs",
"=",
"np",
".",
"clip",
"(",
"imgs",
",",
"self",
".",
"clip_min",
",",
"self",
".",
"clip_max",
")",
"# re-scale instances to be within range [0, 1]",
"imgs",
"=",
"(",
"imgs",
"-",
"self",
".",
"clip_min",
")",
"/",
"(",
"self",
".",
"clip_max",
"-",
"self",
".",
"clip_min",
")",
"imgs",
"=",
"np",
".",
"clip",
"(",
"imgs",
",",
"0",
",",
"1",
")",
"# now convert to [-1, 1]",
"imgs",
"=",
"(",
"imgs",
"*",
"2",
")",
"-",
"1",
"# convert to tanh-space",
"imgs",
"=",
"np",
".",
"arctanh",
"(",
"imgs",
"*",
".999999",
")",
"# set the lower and upper bounds accordingly",
"lower_bound",
"=",
"np",
".",
"zeros",
"(",
"batch_size",
")",
"CONST",
"=",
"np",
".",
"ones",
"(",
"batch_size",
")",
"*",
"self",
".",
"initial_const",
"upper_bound",
"=",
"np",
".",
"ones",
"(",
"batch_size",
")",
"*",
"1e10",
"# placeholders for the best l2, score, and instance attack found so far",
"o_bestl2",
"=",
"[",
"1e10",
"]",
"*",
"batch_size",
"o_bestscore",
"=",
"[",
"-",
"1",
"]",
"*",
"batch_size",
"o_bestattack",
"=",
"np",
".",
"copy",
"(",
"oimgs",
")",
"for",
"outer_step",
"in",
"range",
"(",
"self",
".",
"BINARY_SEARCH_STEPS",
")",
":",
"# completely reset adam's internal state.",
"self",
".",
"sess",
".",
"run",
"(",
"self",
".",
"init",
")",
"batch",
"=",
"imgs",
"[",
":",
"batch_size",
"]",
"batchlab",
"=",
"labs",
"[",
":",
"batch_size",
"]",
"bestl2",
"=",
"[",
"1e10",
"]",
"*",
"batch_size",
"bestscore",
"=",
"[",
"-",
"1",
"]",
"*",
"batch_size",
"_logger",
".",
"debug",
"(",
"\" Binary search step %s of %s\"",
",",
"outer_step",
",",
"self",
".",
"BINARY_SEARCH_STEPS",
")",
"# The last iteration (if we run many steps) repeat the search once.",
"if",
"self",
".",
"repeat",
"and",
"outer_step",
"==",
"self",
".",
"BINARY_SEARCH_STEPS",
"-",
"1",
":",
"CONST",
"=",
"upper_bound",
"# set the variables so that we don't have to send them over again",
"self",
".",
"sess",
".",
"run",
"(",
"self",
".",
"setup",
",",
"{",
"self",
".",
"assign_timg",
":",
"batch",
",",
"self",
".",
"assign_tlab",
":",
"batchlab",
",",
"self",
".",
"assign_const",
":",
"CONST",
"}",
")",
"prev",
"=",
"1e6",
"for",
"iteration",
"in",
"range",
"(",
"self",
".",
"MAX_ITERATIONS",
")",
":",
"# perform the attack",
"_",
",",
"l",
",",
"l2s",
",",
"scores",
",",
"nimg",
"=",
"self",
".",
"sess",
".",
"run",
"(",
"[",
"self",
".",
"train",
",",
"self",
".",
"loss",
",",
"self",
".",
"l2dist",
",",
"self",
".",
"output",
",",
"self",
".",
"newimg",
"]",
")",
"if",
"iteration",
"%",
"(",
"(",
"self",
".",
"MAX_ITERATIONS",
"//",
"10",
")",
"or",
"1",
")",
"==",
"0",
":",
"_logger",
".",
"debug",
"(",
"(",
"\" Iteration {} of {}: loss={:.3g} \"",
"+",
"\"l2={:.3g} f={:.3g}\"",
")",
".",
"format",
"(",
"iteration",
",",
"self",
".",
"MAX_ITERATIONS",
",",
"l",
",",
"np",
".",
"mean",
"(",
"l2s",
")",
",",
"np",
".",
"mean",
"(",
"scores",
")",
")",
")",
"# check if we should abort search if we're getting nowhere.",
"if",
"self",
".",
"ABORT_EARLY",
"and",
"iteration",
"%",
"(",
"(",
"self",
".",
"MAX_ITERATIONS",
"//",
"10",
")",
"or",
"1",
")",
"==",
"0",
":",
"if",
"l",
">",
"prev",
"*",
".9999",
":",
"msg",
"=",
"\" Failed to make progress; stop early\"",
"_logger",
".",
"debug",
"(",
"msg",
")",
"break",
"prev",
"=",
"l",
"# adjust the best result found so far",
"for",
"e",
",",
"(",
"l2",
",",
"sc",
",",
"ii",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"l2s",
",",
"scores",
",",
"nimg",
")",
")",
":",
"lab",
"=",
"np",
".",
"argmax",
"(",
"batchlab",
"[",
"e",
"]",
")",
"if",
"l2",
"<",
"bestl2",
"[",
"e",
"]",
"and",
"compare",
"(",
"sc",
",",
"lab",
")",
":",
"bestl2",
"[",
"e",
"]",
"=",
"l2",
"bestscore",
"[",
"e",
"]",
"=",
"np",
".",
"argmax",
"(",
"sc",
")",
"if",
"l2",
"<",
"o_bestl2",
"[",
"e",
"]",
"and",
"compare",
"(",
"sc",
",",
"lab",
")",
":",
"o_bestl2",
"[",
"e",
"]",
"=",
"l2",
"o_bestscore",
"[",
"e",
"]",
"=",
"np",
".",
"argmax",
"(",
"sc",
")",
"o_bestattack",
"[",
"e",
"]",
"=",
"ii",
"# adjust the constant as needed",
"for",
"e",
"in",
"range",
"(",
"batch_size",
")",
":",
"if",
"compare",
"(",
"bestscore",
"[",
"e",
"]",
",",
"np",
".",
"argmax",
"(",
"batchlab",
"[",
"e",
"]",
")",
")",
"and",
"bestscore",
"[",
"e",
"]",
"!=",
"-",
"1",
":",
"# success, divide const by two",
"upper_bound",
"[",
"e",
"]",
"=",
"min",
"(",
"upper_bound",
"[",
"e",
"]",
",",
"CONST",
"[",
"e",
"]",
")",
"if",
"upper_bound",
"[",
"e",
"]",
"<",
"1e9",
":",
"CONST",
"[",
"e",
"]",
"=",
"(",
"lower_bound",
"[",
"e",
"]",
"+",
"upper_bound",
"[",
"e",
"]",
")",
"/",
"2",
"else",
":",
"# failure, either multiply by 10 if no solution found yet",
"# or do binary search with the known upper bound",
"lower_bound",
"[",
"e",
"]",
"=",
"max",
"(",
"lower_bound",
"[",
"e",
"]",
",",
"CONST",
"[",
"e",
"]",
")",
"if",
"upper_bound",
"[",
"e",
"]",
"<",
"1e9",
":",
"CONST",
"[",
"e",
"]",
"=",
"(",
"lower_bound",
"[",
"e",
"]",
"+",
"upper_bound",
"[",
"e",
"]",
")",
"/",
"2",
"else",
":",
"CONST",
"[",
"e",
"]",
"*=",
"10",
"_logger",
".",
"debug",
"(",
"\" Successfully generated adversarial examples \"",
"+",
"\"on {} of {} instances.\"",
".",
"format",
"(",
"sum",
"(",
"upper_bound",
"<",
"1e9",
")",
",",
"batch_size",
")",
")",
"o_bestl2",
"=",
"np",
".",
"array",
"(",
"o_bestl2",
")",
"mean",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"sqrt",
"(",
"o_bestl2",
"[",
"o_bestl2",
"<",
"1e9",
"]",
")",
")",
"_logger",
".",
"debug",
"(",
"\" Mean successful distortion: {:.4g}\"",
".",
"format",
"(",
"mean",
")",
")",
"# return the best solution found",
"o_bestl2",
"=",
"np",
".",
"array",
"(",
"o_bestl2",
")",
"return",
"o_bestattack"
] | Run the attack on a batch of instance and labels. | [
"Run",
"the",
"attack",
"on",
"a",
"batch",
"of",
"instance",
"and",
"labels",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/carlini_wagner_l2.py#L293-L415 | train |
tensorflow/cleverhans | examples/RL-attack/train.py | maybe_load_model | def maybe_load_model(savedir, container):
"""Load model if present at the specified path."""
if savedir is None:
return
state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
if container is not None:
logger.log("Attempting to download model from Azure")
found_model = container.get(savedir, 'training_state.pkl.zip')
else:
found_model = os.path.exists(state_path)
if found_model:
state = pickle_load(state_path, compression=True)
model_dir = "model-{}".format(state["num_iters"])
if container is not None:
container.get(savedir, model_dir)
U.load_state(os.path.join(savedir, model_dir, "saved"))
logger.log("Loaded models checkpoint at {} iterations".format(
state["num_iters"]))
return state | python | def maybe_load_model(savedir, container):
"""Load model if present at the specified path."""
if savedir is None:
return
state_path = os.path.join(os.path.join(savedir, 'training_state.pkl.zip'))
if container is not None:
logger.log("Attempting to download model from Azure")
found_model = container.get(savedir, 'training_state.pkl.zip')
else:
found_model = os.path.exists(state_path)
if found_model:
state = pickle_load(state_path, compression=True)
model_dir = "model-{}".format(state["num_iters"])
if container is not None:
container.get(savedir, model_dir)
U.load_state(os.path.join(savedir, model_dir, "saved"))
logger.log("Loaded models checkpoint at {} iterations".format(
state["num_iters"]))
return state | [
"def",
"maybe_load_model",
"(",
"savedir",
",",
"container",
")",
":",
"if",
"savedir",
"is",
"None",
":",
"return",
"state_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"join",
"(",
"savedir",
",",
"'training_state.pkl.zip'",
")",
")",
"if",
"container",
"is",
"not",
"None",
":",
"logger",
".",
"log",
"(",
"\"Attempting to download model from Azure\"",
")",
"found_model",
"=",
"container",
".",
"get",
"(",
"savedir",
",",
"'training_state.pkl.zip'",
")",
"else",
":",
"found_model",
"=",
"os",
".",
"path",
".",
"exists",
"(",
"state_path",
")",
"if",
"found_model",
":",
"state",
"=",
"pickle_load",
"(",
"state_path",
",",
"compression",
"=",
"True",
")",
"model_dir",
"=",
"\"model-{}\"",
".",
"format",
"(",
"state",
"[",
"\"num_iters\"",
"]",
")",
"if",
"container",
"is",
"not",
"None",
":",
"container",
".",
"get",
"(",
"savedir",
",",
"model_dir",
")",
"U",
".",
"load_state",
"(",
"os",
".",
"path",
".",
"join",
"(",
"savedir",
",",
"model_dir",
",",
"\"saved\"",
")",
")",
"logger",
".",
"log",
"(",
"\"Loaded models checkpoint at {} iterations\"",
".",
"format",
"(",
"state",
"[",
"\"num_iters\"",
"]",
")",
")",
"return",
"state"
] | Load model if present at the specified path. | [
"Load",
"model",
"if",
"present",
"at",
"the",
"specified",
"path",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/RL-attack/train.py#L130-L149 | train |
tensorflow/cleverhans | cleverhans_tutorials/__init__.py | check_installation | def check_installation(cur_file):
"""Warn user if running cleverhans from a different directory than tutorial."""
cur_dir = os.path.split(os.path.dirname(os.path.abspath(cur_file)))[0]
ch_dir = os.path.split(cleverhans.__path__[0])[0]
if cur_dir != ch_dir:
warnings.warn("It appears that you have at least two versions of "
"cleverhans installed, one at %s and one at"
" %s. You are running the tutorial script from the "
"former but python imported the library module from the "
"latter. This may cause errors, for example if the tutorial"
" version is newer than the library version and attempts to"
" call new features." % (cur_dir, ch_dir)) | python | def check_installation(cur_file):
"""Warn user if running cleverhans from a different directory than tutorial."""
cur_dir = os.path.split(os.path.dirname(os.path.abspath(cur_file)))[0]
ch_dir = os.path.split(cleverhans.__path__[0])[0]
if cur_dir != ch_dir:
warnings.warn("It appears that you have at least two versions of "
"cleverhans installed, one at %s and one at"
" %s. You are running the tutorial script from the "
"former but python imported the library module from the "
"latter. This may cause errors, for example if the tutorial"
" version is newer than the library version and attempts to"
" call new features." % (cur_dir, ch_dir)) | [
"def",
"check_installation",
"(",
"cur_file",
")",
":",
"cur_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"cur_file",
")",
")",
")",
"[",
"0",
"]",
"ch_dir",
"=",
"os",
".",
"path",
".",
"split",
"(",
"cleverhans",
".",
"__path__",
"[",
"0",
"]",
")",
"[",
"0",
"]",
"if",
"cur_dir",
"!=",
"ch_dir",
":",
"warnings",
".",
"warn",
"(",
"\"It appears that you have at least two versions of \"",
"\"cleverhans installed, one at %s and one at\"",
"\" %s. You are running the tutorial script from the \"",
"\"former but python imported the library module from the \"",
"\"latter. This may cause errors, for example if the tutorial\"",
"\" version is newer than the library version and attempts to\"",
"\" call new features.\"",
"%",
"(",
"cur_dir",
",",
"ch_dir",
")",
")"
] | Warn user if running cleverhans from a different directory than tutorial. | [
"Warn",
"user",
"if",
"running",
"cleverhans",
"from",
"a",
"different",
"directory",
"than",
"tutorial",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans_tutorials/__init__.py#L13-L24 | train |
tensorflow/cleverhans | examples/nips17_adversarial_competition/dataset/download_images.py | parse_args | def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser(
description='Tool to download dataset images.')
parser.add_argument('--input_file', required=True,
help='Location of dataset.csv')
parser.add_argument('--output_dir', required=True,
help='Output path to download images')
parser.add_argument('--threads', default=multiprocessing.cpu_count() + 1,
help='Number of threads to use')
args = parser.parse_args()
return args.input_file, args.output_dir, int(args.threads) | python | def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser(
description='Tool to download dataset images.')
parser.add_argument('--input_file', required=True,
help='Location of dataset.csv')
parser.add_argument('--output_dir', required=True,
help='Output path to download images')
parser.add_argument('--threads', default=multiprocessing.cpu_count() + 1,
help='Number of threads to use')
args = parser.parse_args()
return args.input_file, args.output_dir, int(args.threads) | [
"def",
"parse_args",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Tool to download dataset images.'",
")",
"parser",
".",
"add_argument",
"(",
"'--input_file'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Location of dataset.csv'",
")",
"parser",
".",
"add_argument",
"(",
"'--output_dir'",
",",
"required",
"=",
"True",
",",
"help",
"=",
"'Output path to download images'",
")",
"parser",
".",
"add_argument",
"(",
"'--threads'",
",",
"default",
"=",
"multiprocessing",
".",
"cpu_count",
"(",
")",
"+",
"1",
",",
"help",
"=",
"'Number of threads to use'",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"return",
"args",
".",
"input_file",
",",
"args",
".",
"output_dir",
",",
"int",
"(",
"args",
".",
"threads",
")"
] | Parses command line arguments. | [
"Parses",
"command",
"line",
"arguments",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dataset/download_images.py#L43-L54 | train |
tensorflow/cleverhans | examples/nips17_adversarial_competition/dataset/download_images.py | get_image | def get_image(row, output_dir):
"""Downloads the image that corresponds to the given row.
Prints a notification if the download fails."""
if not download_image(image_id=row[0],
url=row[1],
x1=float(row[2]),
y1=float(row[3]),
x2=float(row[4]),
y2=float(row[5]),
output_dir=output_dir):
print("Download failed: " + str(row[0])) | python | def get_image(row, output_dir):
"""Downloads the image that corresponds to the given row.
Prints a notification if the download fails."""
if not download_image(image_id=row[0],
url=row[1],
x1=float(row[2]),
y1=float(row[3]),
x2=float(row[4]),
y2=float(row[5]),
output_dir=output_dir):
print("Download failed: " + str(row[0])) | [
"def",
"get_image",
"(",
"row",
",",
"output_dir",
")",
":",
"if",
"not",
"download_image",
"(",
"image_id",
"=",
"row",
"[",
"0",
"]",
",",
"url",
"=",
"row",
"[",
"1",
"]",
",",
"x1",
"=",
"float",
"(",
"row",
"[",
"2",
"]",
")",
",",
"y1",
"=",
"float",
"(",
"row",
"[",
"3",
"]",
")",
",",
"x2",
"=",
"float",
"(",
"row",
"[",
"4",
"]",
")",
",",
"y2",
"=",
"float",
"(",
"row",
"[",
"5",
"]",
")",
",",
"output_dir",
"=",
"output_dir",
")",
":",
"print",
"(",
"\"Download failed: \"",
"+",
"str",
"(",
"row",
"[",
"0",
"]",
")",
")"
] | Downloads the image that corresponds to the given row.
Prints a notification if the download fails. | [
"Downloads",
"the",
"image",
"that",
"corresponds",
"to",
"the",
"given",
"row",
".",
"Prints",
"a",
"notification",
"if",
"the",
"download",
"fails",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dataset/download_images.py#L57-L67 | train |
tensorflow/cleverhans | examples/nips17_adversarial_competition/dataset/download_images.py | download_image | def download_image(image_id, url, x1, y1, x2, y2, output_dir):
"""Downloads one image, crops it, resizes it and saves it locally."""
output_filename = os.path.join(output_dir, image_id + '.png')
if os.path.exists(output_filename):
# Don't download image if it's already there
return True
try:
# Download image
url_file = urlopen(url)
if url_file.getcode() != 200:
return False
image_buffer = url_file.read()
# Crop, resize and save image
image = Image.open(BytesIO(image_buffer)).convert('RGB')
w = image.size[0]
h = image.size[1]
image = image.crop((int(x1 * w), int(y1 * h), int(x2 * w),
int(y2 * h)))
image = image.resize((299, 299), resample=Image.ANTIALIAS)
image.save(output_filename)
except IOError:
return False
return True | python | def download_image(image_id, url, x1, y1, x2, y2, output_dir):
"""Downloads one image, crops it, resizes it and saves it locally."""
output_filename = os.path.join(output_dir, image_id + '.png')
if os.path.exists(output_filename):
# Don't download image if it's already there
return True
try:
# Download image
url_file = urlopen(url)
if url_file.getcode() != 200:
return False
image_buffer = url_file.read()
# Crop, resize and save image
image = Image.open(BytesIO(image_buffer)).convert('RGB')
w = image.size[0]
h = image.size[1]
image = image.crop((int(x1 * w), int(y1 * h), int(x2 * w),
int(y2 * h)))
image = image.resize((299, 299), resample=Image.ANTIALIAS)
image.save(output_filename)
except IOError:
return False
return True | [
"def",
"download_image",
"(",
"image_id",
",",
"url",
",",
"x1",
",",
"y1",
",",
"x2",
",",
"y2",
",",
"output_dir",
")",
":",
"output_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"output_dir",
",",
"image_id",
"+",
"'.png'",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"output_filename",
")",
":",
"# Don't download image if it's already there",
"return",
"True",
"try",
":",
"# Download image",
"url_file",
"=",
"urlopen",
"(",
"url",
")",
"if",
"url_file",
".",
"getcode",
"(",
")",
"!=",
"200",
":",
"return",
"False",
"image_buffer",
"=",
"url_file",
".",
"read",
"(",
")",
"# Crop, resize and save image",
"image",
"=",
"Image",
".",
"open",
"(",
"BytesIO",
"(",
"image_buffer",
")",
")",
".",
"convert",
"(",
"'RGB'",
")",
"w",
"=",
"image",
".",
"size",
"[",
"0",
"]",
"h",
"=",
"image",
".",
"size",
"[",
"1",
"]",
"image",
"=",
"image",
".",
"crop",
"(",
"(",
"int",
"(",
"x1",
"*",
"w",
")",
",",
"int",
"(",
"y1",
"*",
"h",
")",
",",
"int",
"(",
"x2",
"*",
"w",
")",
",",
"int",
"(",
"y2",
"*",
"h",
")",
")",
")",
"image",
"=",
"image",
".",
"resize",
"(",
"(",
"299",
",",
"299",
")",
",",
"resample",
"=",
"Image",
".",
"ANTIALIAS",
")",
"image",
".",
"save",
"(",
"output_filename",
")",
"except",
"IOError",
":",
"return",
"False",
"return",
"True"
] | Downloads one image, crops it, resizes it and saves it locally. | [
"Downloads",
"one",
"image",
"crops",
"it",
"resizes",
"it",
"and",
"saves",
"it",
"locally",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dataset/download_images.py#L70-L92 | train |
tensorflow/cleverhans | examples/robust_vision_benchmark/cleverhans_attack_example/utils.py | py_func_grad | def py_func_grad(func, inp, Tout, stateful=True, name=None, grad=None):
"""Custom py_func with gradient support
"""
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name,
"PyFuncStateless": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name) | python | def py_func_grad(func, inp, Tout, stateful=True, name=None, grad=None):
"""Custom py_func with gradient support
"""
# Need to generate a unique name to avoid duplicates:
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({"PyFunc": rnd_name,
"PyFuncStateless": rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name) | [
"def",
"py_func_grad",
"(",
"func",
",",
"inp",
",",
"Tout",
",",
"stateful",
"=",
"True",
",",
"name",
"=",
"None",
",",
"grad",
"=",
"None",
")",
":",
"# Need to generate a unique name to avoid duplicates:",
"rnd_name",
"=",
"'PyFuncGrad'",
"+",
"str",
"(",
"np",
".",
"random",
".",
"randint",
"(",
"0",
",",
"1E+8",
")",
")",
"tf",
".",
"RegisterGradient",
"(",
"rnd_name",
")",
"(",
"grad",
")",
"g",
"=",
"tf",
".",
"get_default_graph",
"(",
")",
"with",
"g",
".",
"gradient_override_map",
"(",
"{",
"\"PyFunc\"",
":",
"rnd_name",
",",
"\"PyFuncStateless\"",
":",
"rnd_name",
"}",
")",
":",
"return",
"tf",
".",
"py_func",
"(",
"func",
",",
"inp",
",",
"Tout",
",",
"stateful",
"=",
"stateful",
",",
"name",
"=",
"name",
")"
] | Custom py_func with gradient support | [
"Custom",
"py_func",
"with",
"gradient",
"support"
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/robust_vision_benchmark/cleverhans_attack_example/utils.py#L25-L36 | train |
tensorflow/cleverhans | cleverhans_tutorials/tutorial_models_tfe.py | ModelBasicCNNTFE.fprop | def fprop(self, x):
"""
Forward propagation throught the network
:return: dictionary with layer names mapping to activation values.
"""
# Feed forward through the network layers
for layer_name in self.layer_names:
if layer_name == 'input':
prev_layer_act = x
continue
else:
self.layer_acts[layer_name] = self.layers[layer_name](
prev_layer_act)
prev_layer_act = self.layer_acts[layer_name]
# Adding softmax values to list of activations.
self.layer_acts['probs'] = tf.nn.softmax(
logits=self.layer_acts['logits'])
return self.layer_acts | python | def fprop(self, x):
"""
Forward propagation throught the network
:return: dictionary with layer names mapping to activation values.
"""
# Feed forward through the network layers
for layer_name in self.layer_names:
if layer_name == 'input':
prev_layer_act = x
continue
else:
self.layer_acts[layer_name] = self.layers[layer_name](
prev_layer_act)
prev_layer_act = self.layer_acts[layer_name]
# Adding softmax values to list of activations.
self.layer_acts['probs'] = tf.nn.softmax(
logits=self.layer_acts['logits'])
return self.layer_acts | [
"def",
"fprop",
"(",
"self",
",",
"x",
")",
":",
"# Feed forward through the network layers",
"for",
"layer_name",
"in",
"self",
".",
"layer_names",
":",
"if",
"layer_name",
"==",
"'input'",
":",
"prev_layer_act",
"=",
"x",
"continue",
"else",
":",
"self",
".",
"layer_acts",
"[",
"layer_name",
"]",
"=",
"self",
".",
"layers",
"[",
"layer_name",
"]",
"(",
"prev_layer_act",
")",
"prev_layer_act",
"=",
"self",
".",
"layer_acts",
"[",
"layer_name",
"]",
"# Adding softmax values to list of activations.",
"self",
".",
"layer_acts",
"[",
"'probs'",
"]",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"logits",
"=",
"self",
".",
"layer_acts",
"[",
"'logits'",
"]",
")",
"return",
"self",
".",
"layer_acts"
] | Forward propagation throught the network
:return: dictionary with layer names mapping to activation values. | [
"Forward",
"propagation",
"throught",
"the",
"network",
":",
"return",
":",
"dictionary",
"with",
"layer",
"names",
"mapping",
"to",
"activation",
"values",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans_tutorials/tutorial_models_tfe.py#L54-L73 | train |
tensorflow/cleverhans | cleverhans_tutorials/tutorial_models_tfe.py | ModelBasicCNNTFE.get_layer_params | def get_layer_params(self, layer_name):
"""
Provides access to the parameters of the given layer.
Works arounds the non-availability of graph collections in
eager mode.
:layer_name: name of the layer for which parameters are
required, must be one of the string in the
list layer_names
:return: list of parameters corresponding to the given
layer.
"""
assert layer_name in self.layer_names
out = []
layer = self.layers[layer_name]
layer_variables = layer.variables
# For each parameter in a layer.
for param in layer_variables:
if param not in out:
out.append(param)
return out | python | def get_layer_params(self, layer_name):
"""
Provides access to the parameters of the given layer.
Works arounds the non-availability of graph collections in
eager mode.
:layer_name: name of the layer for which parameters are
required, must be one of the string in the
list layer_names
:return: list of parameters corresponding to the given
layer.
"""
assert layer_name in self.layer_names
out = []
layer = self.layers[layer_name]
layer_variables = layer.variables
# For each parameter in a layer.
for param in layer_variables:
if param not in out:
out.append(param)
return out | [
"def",
"get_layer_params",
"(",
"self",
",",
"layer_name",
")",
":",
"assert",
"layer_name",
"in",
"self",
".",
"layer_names",
"out",
"=",
"[",
"]",
"layer",
"=",
"self",
".",
"layers",
"[",
"layer_name",
"]",
"layer_variables",
"=",
"layer",
".",
"variables",
"# For each parameter in a layer.",
"for",
"param",
"in",
"layer_variables",
":",
"if",
"param",
"not",
"in",
"out",
":",
"out",
".",
"append",
"(",
"param",
")",
"return",
"out"
] | Provides access to the parameters of the given layer.
Works arounds the non-availability of graph collections in
eager mode.
:layer_name: name of the layer for which parameters are
required, must be one of the string in the
list layer_names
:return: list of parameters corresponding to the given
layer. | [
"Provides",
"access",
"to",
"the",
"parameters",
"of",
"the",
"given",
"layer",
".",
"Works",
"arounds",
"the",
"non",
"-",
"availability",
"of",
"graph",
"collections",
"in",
"eager",
"mode",
".",
":",
"layer_name",
":",
"name",
"of",
"the",
"layer",
"for",
"which",
"parameters",
"are",
"required",
"must",
"be",
"one",
"of",
"the",
"string",
"in",
"the",
"list",
"layer_names",
":",
"return",
":",
"list",
"of",
"parameters",
"corresponding",
"to",
"the",
"given",
"layer",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans_tutorials/tutorial_models_tfe.py#L75-L96 | train |
tensorflow/cleverhans | cleverhans_tutorials/tutorial_models_tfe.py | ModelBasicCNNTFE.get_params | def get_params(self):
"""
Provides access to the model's parameters.
Works arounds the non-availability of graph collections in
eager mode.
:return: A list of all Variables defining the model parameters.
"""
assert tf.executing_eagerly()
out = []
# Collecting params from each layer.
for layer_name in self.layers:
out += self.get_layer_params(layer_name)
return out | python | def get_params(self):
"""
Provides access to the model's parameters.
Works arounds the non-availability of graph collections in
eager mode.
:return: A list of all Variables defining the model parameters.
"""
assert tf.executing_eagerly()
out = []
# Collecting params from each layer.
for layer_name in self.layers:
out += self.get_layer_params(layer_name)
return out | [
"def",
"get_params",
"(",
"self",
")",
":",
"assert",
"tf",
".",
"executing_eagerly",
"(",
")",
"out",
"=",
"[",
"]",
"# Collecting params from each layer.",
"for",
"layer_name",
"in",
"self",
".",
"layers",
":",
"out",
"+=",
"self",
".",
"get_layer_params",
"(",
"layer_name",
")",
"return",
"out"
] | Provides access to the model's parameters.
Works arounds the non-availability of graph collections in
eager mode.
:return: A list of all Variables defining the model parameters. | [
"Provides",
"access",
"to",
"the",
"model",
"s",
"parameters",
".",
"Works",
"arounds",
"the",
"non",
"-",
"availability",
"of",
"graph",
"collections",
"in",
"eager",
"mode",
".",
":",
"return",
":",
"A",
"list",
"of",
"all",
"Variables",
"defining",
"the",
"model",
"parameters",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans_tutorials/tutorial_models_tfe.py#L98-L111 | train |
tensorflow/cleverhans | cleverhans/plot/pyplot_image.py | pair_visual | def pair_visual(original, adversarial, figure=None):
"""
This function displays two images: the original and the adversarial sample
:param original: the original input
:param adversarial: the input after perturbations have been applied
:param figure: if we've already displayed images, use the same plot
:return: the matplot figure to reuse for future samples
"""
import matplotlib.pyplot as plt
# Squeeze the image to remove single-dimensional entries from array shape
original = np.squeeze(original)
adversarial = np.squeeze(adversarial)
# Ensure our inputs are of proper shape
assert(len(original.shape) == 2 or len(original.shape) == 3)
# To avoid creating figures per input sample, reuse the sample plot
if figure is None:
plt.ion()
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Pair Visualization')
# Add the images to the plot
perturbations = adversarial - original
for index, image in enumerate((original, perturbations, adversarial)):
figure.add_subplot(1, 3, index + 1)
plt.axis('off')
# If the image is 2D, then we have 1 color channel
if len(image.shape) == 2:
plt.imshow(image, cmap='gray')
else:
plt.imshow(image)
# Give the plot some time to update
plt.pause(0.01)
# Draw the plot and return
plt.show()
return figure | python | def pair_visual(original, adversarial, figure=None):
"""
This function displays two images: the original and the adversarial sample
:param original: the original input
:param adversarial: the input after perturbations have been applied
:param figure: if we've already displayed images, use the same plot
:return: the matplot figure to reuse for future samples
"""
import matplotlib.pyplot as plt
# Squeeze the image to remove single-dimensional entries from array shape
original = np.squeeze(original)
adversarial = np.squeeze(adversarial)
# Ensure our inputs are of proper shape
assert(len(original.shape) == 2 or len(original.shape) == 3)
# To avoid creating figures per input sample, reuse the sample plot
if figure is None:
plt.ion()
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Pair Visualization')
# Add the images to the plot
perturbations = adversarial - original
for index, image in enumerate((original, perturbations, adversarial)):
figure.add_subplot(1, 3, index + 1)
plt.axis('off')
# If the image is 2D, then we have 1 color channel
if len(image.shape) == 2:
plt.imshow(image, cmap='gray')
else:
plt.imshow(image)
# Give the plot some time to update
plt.pause(0.01)
# Draw the plot and return
plt.show()
return figure | [
"def",
"pair_visual",
"(",
"original",
",",
"adversarial",
",",
"figure",
"=",
"None",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"# Squeeze the image to remove single-dimensional entries from array shape",
"original",
"=",
"np",
".",
"squeeze",
"(",
"original",
")",
"adversarial",
"=",
"np",
".",
"squeeze",
"(",
"adversarial",
")",
"# Ensure our inputs are of proper shape",
"assert",
"(",
"len",
"(",
"original",
".",
"shape",
")",
"==",
"2",
"or",
"len",
"(",
"original",
".",
"shape",
")",
"==",
"3",
")",
"# To avoid creating figures per input sample, reuse the sample plot",
"if",
"figure",
"is",
"None",
":",
"plt",
".",
"ion",
"(",
")",
"figure",
"=",
"plt",
".",
"figure",
"(",
")",
"figure",
".",
"canvas",
".",
"set_window_title",
"(",
"'Cleverhans: Pair Visualization'",
")",
"# Add the images to the plot",
"perturbations",
"=",
"adversarial",
"-",
"original",
"for",
"index",
",",
"image",
"in",
"enumerate",
"(",
"(",
"original",
",",
"perturbations",
",",
"adversarial",
")",
")",
":",
"figure",
".",
"add_subplot",
"(",
"1",
",",
"3",
",",
"index",
"+",
"1",
")",
"plt",
".",
"axis",
"(",
"'off'",
")",
"# If the image is 2D, then we have 1 color channel",
"if",
"len",
"(",
"image",
".",
"shape",
")",
"==",
"2",
":",
"plt",
".",
"imshow",
"(",
"image",
",",
"cmap",
"=",
"'gray'",
")",
"else",
":",
"plt",
".",
"imshow",
"(",
"image",
")",
"# Give the plot some time to update",
"plt",
".",
"pause",
"(",
"0.01",
")",
"# Draw the plot and return",
"plt",
".",
"show",
"(",
")",
"return",
"figure"
] | This function displays two images: the original and the adversarial sample
:param original: the original input
:param adversarial: the input after perturbations have been applied
:param figure: if we've already displayed images, use the same plot
:return: the matplot figure to reuse for future samples | [
"This",
"function",
"displays",
"two",
"images",
":",
"the",
"original",
"and",
"the",
"adversarial",
"sample",
":",
"param",
"original",
":",
"the",
"original",
"input",
":",
"param",
"adversarial",
":",
"the",
"input",
"after",
"perturbations",
"have",
"been",
"applied",
":",
"param",
"figure",
":",
"if",
"we",
"ve",
"already",
"displayed",
"images",
"use",
"the",
"same",
"plot",
":",
"return",
":",
"the",
"matplot",
"figure",
"to",
"reuse",
"for",
"future",
"samples"
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/plot/pyplot_image.py#L9-L49 | train |
tensorflow/cleverhans | cleverhans/plot/pyplot_image.py | grid_visual | def grid_visual(data):
"""
This function displays a grid of images to show full misclassification
:param data: grid data of the form;
[nb_classes : nb_classes : img_rows : img_cols : nb_channels]
:return: if necessary, the matplot figure to reuse
"""
import matplotlib.pyplot as plt
# Ensure interactive mode is disabled and initialize our graph
plt.ioff()
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Grid Visualization')
# Add the images to the plot
num_cols = data.shape[0]
num_rows = data.shape[1]
num_channels = data.shape[4]
for y in range(num_rows):
for x in range(num_cols):
figure.add_subplot(num_rows, num_cols, (x + 1) + (y * num_cols))
plt.axis('off')
if num_channels == 1:
plt.imshow(data[x, y, :, :, 0], cmap='gray')
else:
plt.imshow(data[x, y, :, :, :])
# Draw the plot and return
plt.show()
return figure | python | def grid_visual(data):
"""
This function displays a grid of images to show full misclassification
:param data: grid data of the form;
[nb_classes : nb_classes : img_rows : img_cols : nb_channels]
:return: if necessary, the matplot figure to reuse
"""
import matplotlib.pyplot as plt
# Ensure interactive mode is disabled and initialize our graph
plt.ioff()
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Grid Visualization')
# Add the images to the plot
num_cols = data.shape[0]
num_rows = data.shape[1]
num_channels = data.shape[4]
for y in range(num_rows):
for x in range(num_cols):
figure.add_subplot(num_rows, num_cols, (x + 1) + (y * num_cols))
plt.axis('off')
if num_channels == 1:
plt.imshow(data[x, y, :, :, 0], cmap='gray')
else:
plt.imshow(data[x, y, :, :, :])
# Draw the plot and return
plt.show()
return figure | [
"def",
"grid_visual",
"(",
"data",
")",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"# Ensure interactive mode is disabled and initialize our graph",
"plt",
".",
"ioff",
"(",
")",
"figure",
"=",
"plt",
".",
"figure",
"(",
")",
"figure",
".",
"canvas",
".",
"set_window_title",
"(",
"'Cleverhans: Grid Visualization'",
")",
"# Add the images to the plot",
"num_cols",
"=",
"data",
".",
"shape",
"[",
"0",
"]",
"num_rows",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"num_channels",
"=",
"data",
".",
"shape",
"[",
"4",
"]",
"for",
"y",
"in",
"range",
"(",
"num_rows",
")",
":",
"for",
"x",
"in",
"range",
"(",
"num_cols",
")",
":",
"figure",
".",
"add_subplot",
"(",
"num_rows",
",",
"num_cols",
",",
"(",
"x",
"+",
"1",
")",
"+",
"(",
"y",
"*",
"num_cols",
")",
")",
"plt",
".",
"axis",
"(",
"'off'",
")",
"if",
"num_channels",
"==",
"1",
":",
"plt",
".",
"imshow",
"(",
"data",
"[",
"x",
",",
"y",
",",
":",
",",
":",
",",
"0",
"]",
",",
"cmap",
"=",
"'gray'",
")",
"else",
":",
"plt",
".",
"imshow",
"(",
"data",
"[",
"x",
",",
"y",
",",
":",
",",
":",
",",
":",
"]",
")",
"# Draw the plot and return",
"plt",
".",
"show",
"(",
")",
"return",
"figure"
] | This function displays a grid of images to show full misclassification
:param data: grid data of the form;
[nb_classes : nb_classes : img_rows : img_cols : nb_channels]
:return: if necessary, the matplot figure to reuse | [
"This",
"function",
"displays",
"a",
"grid",
"of",
"images",
"to",
"show",
"full",
"misclassification",
":",
"param",
"data",
":",
"grid",
"data",
"of",
"the",
"form",
";",
"[",
"nb_classes",
":",
"nb_classes",
":",
"img_rows",
":",
"img_cols",
":",
"nb_channels",
"]",
":",
"return",
":",
"if",
"necessary",
"the",
"matplot",
"figure",
"to",
"reuse"
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/plot/pyplot_image.py#L51-L81 | train |
tensorflow/cleverhans | cleverhans/plot/pyplot_image.py | get_logits_over_interval | def get_logits_over_interval(sess, model, x_data, fgsm_params,
min_epsilon=-10., max_epsilon=10.,
num_points=21):
"""Get logits when the input is perturbed in an interval in adv direction.
Args:
sess: Tf session
model: Model for which we wish to get logits.
x_data: Numpy array corresponding to single data.
point of shape [height, width, channels].
fgsm_params: Parameters for generating adversarial examples.
min_epsilon: Minimum value of epsilon over the interval.
max_epsilon: Maximum value of epsilon over the interval.
num_points: Number of points used to interpolate.
Returns:
Numpy array containing logits.
Raises:
ValueError if min_epsilon is larger than max_epsilon.
"""
# Get the height, width and number of channels
height = x_data.shape[0]
width = x_data.shape[1]
channels = x_data.shape[2]
x_data = np.expand_dims(x_data, axis=0)
import tensorflow as tf
from cleverhans.attacks import FastGradientMethod
# Define the data placeholder
x = tf.placeholder(dtype=tf.float32,
shape=[1, height,
width,
channels],
name='x')
# Define adv_x
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
if min_epsilon > max_epsilon:
raise ValueError('Minimum epsilon is less than maximum epsilon')
eta = tf.nn.l2_normalize(adv_x - x, dim=0)
epsilon = tf.reshape(tf.lin_space(float(min_epsilon),
float(max_epsilon),
num_points),
(num_points, 1, 1, 1))
lin_batch = x + epsilon * eta
logits = model.get_logits(lin_batch)
with sess.as_default():
log_prob_adv_array = sess.run(logits,
feed_dict={x: x_data})
return log_prob_adv_array | python | def get_logits_over_interval(sess, model, x_data, fgsm_params,
min_epsilon=-10., max_epsilon=10.,
num_points=21):
"""Get logits when the input is perturbed in an interval in adv direction.
Args:
sess: Tf session
model: Model for which we wish to get logits.
x_data: Numpy array corresponding to single data.
point of shape [height, width, channels].
fgsm_params: Parameters for generating adversarial examples.
min_epsilon: Minimum value of epsilon over the interval.
max_epsilon: Maximum value of epsilon over the interval.
num_points: Number of points used to interpolate.
Returns:
Numpy array containing logits.
Raises:
ValueError if min_epsilon is larger than max_epsilon.
"""
# Get the height, width and number of channels
height = x_data.shape[0]
width = x_data.shape[1]
channels = x_data.shape[2]
x_data = np.expand_dims(x_data, axis=0)
import tensorflow as tf
from cleverhans.attacks import FastGradientMethod
# Define the data placeholder
x = tf.placeholder(dtype=tf.float32,
shape=[1, height,
width,
channels],
name='x')
# Define adv_x
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
if min_epsilon > max_epsilon:
raise ValueError('Minimum epsilon is less than maximum epsilon')
eta = tf.nn.l2_normalize(adv_x - x, dim=0)
epsilon = tf.reshape(tf.lin_space(float(min_epsilon),
float(max_epsilon),
num_points),
(num_points, 1, 1, 1))
lin_batch = x + epsilon * eta
logits = model.get_logits(lin_batch)
with sess.as_default():
log_prob_adv_array = sess.run(logits,
feed_dict={x: x_data})
return log_prob_adv_array | [
"def",
"get_logits_over_interval",
"(",
"sess",
",",
"model",
",",
"x_data",
",",
"fgsm_params",
",",
"min_epsilon",
"=",
"-",
"10.",
",",
"max_epsilon",
"=",
"10.",
",",
"num_points",
"=",
"21",
")",
":",
"# Get the height, width and number of channels",
"height",
"=",
"x_data",
".",
"shape",
"[",
"0",
"]",
"width",
"=",
"x_data",
".",
"shape",
"[",
"1",
"]",
"channels",
"=",
"x_data",
".",
"shape",
"[",
"2",
"]",
"x_data",
"=",
"np",
".",
"expand_dims",
"(",
"x_data",
",",
"axis",
"=",
"0",
")",
"import",
"tensorflow",
"as",
"tf",
"from",
"cleverhans",
".",
"attacks",
"import",
"FastGradientMethod",
"# Define the data placeholder",
"x",
"=",
"tf",
".",
"placeholder",
"(",
"dtype",
"=",
"tf",
".",
"float32",
",",
"shape",
"=",
"[",
"1",
",",
"height",
",",
"width",
",",
"channels",
"]",
",",
"name",
"=",
"'x'",
")",
"# Define adv_x",
"fgsm",
"=",
"FastGradientMethod",
"(",
"model",
",",
"sess",
"=",
"sess",
")",
"adv_x",
"=",
"fgsm",
".",
"generate",
"(",
"x",
",",
"*",
"*",
"fgsm_params",
")",
"if",
"min_epsilon",
">",
"max_epsilon",
":",
"raise",
"ValueError",
"(",
"'Minimum epsilon is less than maximum epsilon'",
")",
"eta",
"=",
"tf",
".",
"nn",
".",
"l2_normalize",
"(",
"adv_x",
"-",
"x",
",",
"dim",
"=",
"0",
")",
"epsilon",
"=",
"tf",
".",
"reshape",
"(",
"tf",
".",
"lin_space",
"(",
"float",
"(",
"min_epsilon",
")",
",",
"float",
"(",
"max_epsilon",
")",
",",
"num_points",
")",
",",
"(",
"num_points",
",",
"1",
",",
"1",
",",
"1",
")",
")",
"lin_batch",
"=",
"x",
"+",
"epsilon",
"*",
"eta",
"logits",
"=",
"model",
".",
"get_logits",
"(",
"lin_batch",
")",
"with",
"sess",
".",
"as_default",
"(",
")",
":",
"log_prob_adv_array",
"=",
"sess",
".",
"run",
"(",
"logits",
",",
"feed_dict",
"=",
"{",
"x",
":",
"x_data",
"}",
")",
"return",
"log_prob_adv_array"
] | Get logits when the input is perturbed in an interval in adv direction.
Args:
sess: Tf session
model: Model for which we wish to get logits.
x_data: Numpy array corresponding to single data.
point of shape [height, width, channels].
fgsm_params: Parameters for generating adversarial examples.
min_epsilon: Minimum value of epsilon over the interval.
max_epsilon: Maximum value of epsilon over the interval.
num_points: Number of points used to interpolate.
Returns:
Numpy array containing logits.
Raises:
ValueError if min_epsilon is larger than max_epsilon. | [
"Get",
"logits",
"when",
"the",
"input",
"is",
"perturbed",
"in",
"an",
"interval",
"in",
"adv",
"direction",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/plot/pyplot_image.py#L84-L137 | train |
tensorflow/cleverhans | cleverhans/plot/pyplot_image.py | linear_extrapolation_plot | def linear_extrapolation_plot(log_prob_adv_array, y, file_name,
min_epsilon=-10, max_epsilon=10,
num_points=21):
"""Generate linear extrapolation plot.
Args:
log_prob_adv_array: Numpy array containing log probabilities
y: Tf placeholder for the labels
file_name: Plot filename
min_epsilon: Minimum value of epsilon over the interval
max_epsilon: Maximum value of epsilon over the interval
num_points: Number of points used to interpolate
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Linear Extrapolation Plot')
correct_idx = np.argmax(y, axis=0)
fig = plt.figure()
plt.xlabel('Epsilon')
plt.ylabel('Logits')
x_axis = np.linspace(min_epsilon, max_epsilon, num_points)
plt.xlim(min_epsilon - 1, max_epsilon + 1)
for i in range(y.shape[0]):
if i == correct_idx:
ls = '-'
linewidth = 5
else:
ls = '--'
linewidth = 2
plt.plot(
x_axis,
log_prob_adv_array[:, i],
ls=ls,
linewidth=linewidth,
label='{}'.format(i))
plt.legend(loc='best', fontsize=14)
plt.show()
fig.savefig(file_name)
plt.clf()
return figure | python | def linear_extrapolation_plot(log_prob_adv_array, y, file_name,
min_epsilon=-10, max_epsilon=10,
num_points=21):
"""Generate linear extrapolation plot.
Args:
log_prob_adv_array: Numpy array containing log probabilities
y: Tf placeholder for the labels
file_name: Plot filename
min_epsilon: Minimum value of epsilon over the interval
max_epsilon: Maximum value of epsilon over the interval
num_points: Number of points used to interpolate
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
figure = plt.figure()
figure.canvas.set_window_title('Cleverhans: Linear Extrapolation Plot')
correct_idx = np.argmax(y, axis=0)
fig = plt.figure()
plt.xlabel('Epsilon')
plt.ylabel('Logits')
x_axis = np.linspace(min_epsilon, max_epsilon, num_points)
plt.xlim(min_epsilon - 1, max_epsilon + 1)
for i in range(y.shape[0]):
if i == correct_idx:
ls = '-'
linewidth = 5
else:
ls = '--'
linewidth = 2
plt.plot(
x_axis,
log_prob_adv_array[:, i],
ls=ls,
linewidth=linewidth,
label='{}'.format(i))
plt.legend(loc='best', fontsize=14)
plt.show()
fig.savefig(file_name)
plt.clf()
return figure | [
"def",
"linear_extrapolation_plot",
"(",
"log_prob_adv_array",
",",
"y",
",",
"file_name",
",",
"min_epsilon",
"=",
"-",
"10",
",",
"max_epsilon",
"=",
"10",
",",
"num_points",
"=",
"21",
")",
":",
"import",
"matplotlib",
"matplotlib",
".",
"use",
"(",
"'Agg'",
")",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"figure",
"=",
"plt",
".",
"figure",
"(",
")",
"figure",
".",
"canvas",
".",
"set_window_title",
"(",
"'Cleverhans: Linear Extrapolation Plot'",
")",
"correct_idx",
"=",
"np",
".",
"argmax",
"(",
"y",
",",
"axis",
"=",
"0",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
")",
"plt",
".",
"xlabel",
"(",
"'Epsilon'",
")",
"plt",
".",
"ylabel",
"(",
"'Logits'",
")",
"x_axis",
"=",
"np",
".",
"linspace",
"(",
"min_epsilon",
",",
"max_epsilon",
",",
"num_points",
")",
"plt",
".",
"xlim",
"(",
"min_epsilon",
"-",
"1",
",",
"max_epsilon",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"y",
".",
"shape",
"[",
"0",
"]",
")",
":",
"if",
"i",
"==",
"correct_idx",
":",
"ls",
"=",
"'-'",
"linewidth",
"=",
"5",
"else",
":",
"ls",
"=",
"'--'",
"linewidth",
"=",
"2",
"plt",
".",
"plot",
"(",
"x_axis",
",",
"log_prob_adv_array",
"[",
":",
",",
"i",
"]",
",",
"ls",
"=",
"ls",
",",
"linewidth",
"=",
"linewidth",
",",
"label",
"=",
"'{}'",
".",
"format",
"(",
"i",
")",
")",
"plt",
".",
"legend",
"(",
"loc",
"=",
"'best'",
",",
"fontsize",
"=",
"14",
")",
"plt",
".",
"show",
"(",
")",
"fig",
".",
"savefig",
"(",
"file_name",
")",
"plt",
".",
"clf",
"(",
")",
"return",
"figure"
] | Generate linear extrapolation plot.
Args:
log_prob_adv_array: Numpy array containing log probabilities
y: Tf placeholder for the labels
file_name: Plot filename
min_epsilon: Minimum value of epsilon over the interval
max_epsilon: Maximum value of epsilon over the interval
num_points: Number of points used to interpolate | [
"Generate",
"linear",
"extrapolation",
"plot",
"."
] | 97488e215760547b81afc53f5e5de8ba7da5bd98 | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/plot/pyplot_image.py#L139-L182 | train |
backtrader/backtrader | contrib/utils/iqfeed-to-influxdb.py | IQFeedTool._send_cmd | def _send_cmd(self, cmd: str):
"""Encode IQFeed API messages."""
self._sock.sendall(cmd.encode(encoding='latin-1', errors='strict')) | python | def _send_cmd(self, cmd: str):
"""Encode IQFeed API messages."""
self._sock.sendall(cmd.encode(encoding='latin-1', errors='strict')) | [
"def",
"_send_cmd",
"(",
"self",
",",
"cmd",
":",
"str",
")",
":",
"self",
".",
"_sock",
".",
"sendall",
"(",
"cmd",
".",
"encode",
"(",
"encoding",
"=",
"'latin-1'",
",",
"errors",
"=",
"'strict'",
")",
")"
] | Encode IQFeed API messages. | [
"Encode",
"IQFeed",
"API",
"messages",
"."
] | 59ee9521f9887c2a1030c6f1db8c918a5816fd64 | https://github.com/backtrader/backtrader/blob/59ee9521f9887c2a1030c6f1db8c918a5816fd64/contrib/utils/iqfeed-to-influxdb.py#L59-L61 | train |
backtrader/backtrader | contrib/utils/iqfeed-to-influxdb.py | IQFeedTool.iq_query | def iq_query(self, message: str):
"""Send data query to IQFeed API."""
end_msg = '!ENDMSG!'
recv_buffer = 4096
# Send the historical data request message and buffer the data
self._send_cmd(message)
chunk = ""
data = ""
while True:
chunk = self._sock.recv(recv_buffer).decode('latin-1')
data += chunk
if chunk.startswith('E,'): # error condition
if chunk.startswith('E,!NO_DATA!'):
log.warn('No data available for the given symbol or dates')
return
else:
raise Exception(chunk)
elif end_msg in chunk:
break
# Clean up the data.
data = data[:-1 * (len(end_msg) + 3)]
data = "".join(data.split("\r"))
data = data.replace(",\n", ",")[:-1]
data = data.split(",")
return data | python | def iq_query(self, message: str):
"""Send data query to IQFeed API."""
end_msg = '!ENDMSG!'
recv_buffer = 4096
# Send the historical data request message and buffer the data
self._send_cmd(message)
chunk = ""
data = ""
while True:
chunk = self._sock.recv(recv_buffer).decode('latin-1')
data += chunk
if chunk.startswith('E,'): # error condition
if chunk.startswith('E,!NO_DATA!'):
log.warn('No data available for the given symbol or dates')
return
else:
raise Exception(chunk)
elif end_msg in chunk:
break
# Clean up the data.
data = data[:-1 * (len(end_msg) + 3)]
data = "".join(data.split("\r"))
data = data.replace(",\n", ",")[:-1]
data = data.split(",")
return data | [
"def",
"iq_query",
"(",
"self",
",",
"message",
":",
"str",
")",
":",
"end_msg",
"=",
"'!ENDMSG!'",
"recv_buffer",
"=",
"4096",
"# Send the historical data request message and buffer the data",
"self",
".",
"_send_cmd",
"(",
"message",
")",
"chunk",
"=",
"\"\"",
"data",
"=",
"\"\"",
"while",
"True",
":",
"chunk",
"=",
"self",
".",
"_sock",
".",
"recv",
"(",
"recv_buffer",
")",
".",
"decode",
"(",
"'latin-1'",
")",
"data",
"+=",
"chunk",
"if",
"chunk",
".",
"startswith",
"(",
"'E,'",
")",
":",
"# error condition",
"if",
"chunk",
".",
"startswith",
"(",
"'E,!NO_DATA!'",
")",
":",
"log",
".",
"warn",
"(",
"'No data available for the given symbol or dates'",
")",
"return",
"else",
":",
"raise",
"Exception",
"(",
"chunk",
")",
"elif",
"end_msg",
"in",
"chunk",
":",
"break",
"# Clean up the data.",
"data",
"=",
"data",
"[",
":",
"-",
"1",
"*",
"(",
"len",
"(",
"end_msg",
")",
"+",
"3",
")",
"]",
"data",
"=",
"\"\"",
".",
"join",
"(",
"data",
".",
"split",
"(",
"\"\\r\"",
")",
")",
"data",
"=",
"data",
".",
"replace",
"(",
"\",\\n\"",
",",
"\",\"",
")",
"[",
":",
"-",
"1",
"]",
"data",
"=",
"data",
".",
"split",
"(",
"\",\"",
")",
"return",
"data"
] | Send data query to IQFeed API. | [
"Send",
"data",
"query",
"to",
"IQFeed",
"API",
"."
] | 59ee9521f9887c2a1030c6f1db8c918a5816fd64 | https://github.com/backtrader/backtrader/blob/59ee9521f9887c2a1030c6f1db8c918a5816fd64/contrib/utils/iqfeed-to-influxdb.py#L63-L90 | train |
backtrader/backtrader | contrib/utils/iqfeed-to-influxdb.py | IQFeedTool.get_historical_minute_data | def get_historical_minute_data(self, ticker: str):
"""Request historical 5 minute data from DTN."""
start = self._start
stop = self._stop
if len(stop) > 4:
stop = stop[:4]
if len(start) > 4:
start = start[:4]
for year in range(int(start), int(stop) + 1):
beg_time = ('%s0101000000' % year)
end_time = ('%s1231235959' % year)
msg = "HIT,%s,60,%s,%s,,,,1,,,s\r\n" % (ticker,
beg_time,
end_time)
try:
data = iq.iq_query(message=msg)
iq.add_data_to_df(data=data)
except Exception as err:
log.error('No data returned because %s', err)
try:
self.dfdb.write_points(self._ndf, ticker)
except InfluxDBClientError as err:
log.error('Write to database failed: %s' % err) | python | def get_historical_minute_data(self, ticker: str):
"""Request historical 5 minute data from DTN."""
start = self._start
stop = self._stop
if len(stop) > 4:
stop = stop[:4]
if len(start) > 4:
start = start[:4]
for year in range(int(start), int(stop) + 1):
beg_time = ('%s0101000000' % year)
end_time = ('%s1231235959' % year)
msg = "HIT,%s,60,%s,%s,,,,1,,,s\r\n" % (ticker,
beg_time,
end_time)
try:
data = iq.iq_query(message=msg)
iq.add_data_to_df(data=data)
except Exception as err:
log.error('No data returned because %s', err)
try:
self.dfdb.write_points(self._ndf, ticker)
except InfluxDBClientError as err:
log.error('Write to database failed: %s' % err) | [
"def",
"get_historical_minute_data",
"(",
"self",
",",
"ticker",
":",
"str",
")",
":",
"start",
"=",
"self",
".",
"_start",
"stop",
"=",
"self",
".",
"_stop",
"if",
"len",
"(",
"stop",
")",
">",
"4",
":",
"stop",
"=",
"stop",
"[",
":",
"4",
"]",
"if",
"len",
"(",
"start",
")",
">",
"4",
":",
"start",
"=",
"start",
"[",
":",
"4",
"]",
"for",
"year",
"in",
"range",
"(",
"int",
"(",
"start",
")",
",",
"int",
"(",
"stop",
")",
"+",
"1",
")",
":",
"beg_time",
"=",
"(",
"'%s0101000000'",
"%",
"year",
")",
"end_time",
"=",
"(",
"'%s1231235959'",
"%",
"year",
")",
"msg",
"=",
"\"HIT,%s,60,%s,%s,,,,1,,,s\\r\\n\"",
"%",
"(",
"ticker",
",",
"beg_time",
",",
"end_time",
")",
"try",
":",
"data",
"=",
"iq",
".",
"iq_query",
"(",
"message",
"=",
"msg",
")",
"iq",
".",
"add_data_to_df",
"(",
"data",
"=",
"data",
")",
"except",
"Exception",
"as",
"err",
":",
"log",
".",
"error",
"(",
"'No data returned because %s'",
",",
"err",
")",
"try",
":",
"self",
".",
"dfdb",
".",
"write_points",
"(",
"self",
".",
"_ndf",
",",
"ticker",
")",
"except",
"InfluxDBClientError",
"as",
"err",
":",
"log",
".",
"error",
"(",
"'Write to database failed: %s'",
"%",
"err",
")"
] | Request historical 5 minute data from DTN. | [
"Request",
"historical",
"5",
"minute",
"data",
"from",
"DTN",
"."
] | 59ee9521f9887c2a1030c6f1db8c918a5816fd64 | https://github.com/backtrader/backtrader/blob/59ee9521f9887c2a1030c6f1db8c918a5816fd64/contrib/utils/iqfeed-to-influxdb.py#L92-L118 | train |
backtrader/backtrader | contrib/utils/iqfeed-to-influxdb.py | IQFeedTool.add_data_to_df | def add_data_to_df(self, data: np.array):
"""Build Pandas Dataframe in memory"""
col_names = ['high_p', 'low_p', 'open_p', 'close_p', 'volume', 'oi']
data = np.array(data).reshape(-1, len(col_names) + 1)
df = pd.DataFrame(data=data[:, 1:], index=data[:, 0],
columns=col_names)
df.index = pd.to_datetime(df.index)
# Sort the dataframe based on ascending dates.
df.sort_index(ascending=True, inplace=True)
# Convert dataframe columns to float and ints.
df[['high_p', 'low_p', 'open_p', 'close_p']] = df[
['high_p', 'low_p', 'open_p', 'close_p']].astype(float)
df[['volume', 'oi']] = df[['volume', 'oi']].astype(int)
if self._ndf.empty:
self._ndf = df
else:
self._ndf = self._ndf.append(df) | python | def add_data_to_df(self, data: np.array):
"""Build Pandas Dataframe in memory"""
col_names = ['high_p', 'low_p', 'open_p', 'close_p', 'volume', 'oi']
data = np.array(data).reshape(-1, len(col_names) + 1)
df = pd.DataFrame(data=data[:, 1:], index=data[:, 0],
columns=col_names)
df.index = pd.to_datetime(df.index)
# Sort the dataframe based on ascending dates.
df.sort_index(ascending=True, inplace=True)
# Convert dataframe columns to float and ints.
df[['high_p', 'low_p', 'open_p', 'close_p']] = df[
['high_p', 'low_p', 'open_p', 'close_p']].astype(float)
df[['volume', 'oi']] = df[['volume', 'oi']].astype(int)
if self._ndf.empty:
self._ndf = df
else:
self._ndf = self._ndf.append(df) | [
"def",
"add_data_to_df",
"(",
"self",
",",
"data",
":",
"np",
".",
"array",
")",
":",
"col_names",
"=",
"[",
"'high_p'",
",",
"'low_p'",
",",
"'open_p'",
",",
"'close_p'",
",",
"'volume'",
",",
"'oi'",
"]",
"data",
"=",
"np",
".",
"array",
"(",
"data",
")",
".",
"reshape",
"(",
"-",
"1",
",",
"len",
"(",
"col_names",
")",
"+",
"1",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
"=",
"data",
"[",
":",
",",
"1",
":",
"]",
",",
"index",
"=",
"data",
"[",
":",
",",
"0",
"]",
",",
"columns",
"=",
"col_names",
")",
"df",
".",
"index",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
".",
"index",
")",
"# Sort the dataframe based on ascending dates.",
"df",
".",
"sort_index",
"(",
"ascending",
"=",
"True",
",",
"inplace",
"=",
"True",
")",
"# Convert dataframe columns to float and ints.",
"df",
"[",
"[",
"'high_p'",
",",
"'low_p'",
",",
"'open_p'",
",",
"'close_p'",
"]",
"]",
"=",
"df",
"[",
"[",
"'high_p'",
",",
"'low_p'",
",",
"'open_p'",
",",
"'close_p'",
"]",
"]",
".",
"astype",
"(",
"float",
")",
"df",
"[",
"[",
"'volume'",
",",
"'oi'",
"]",
"]",
"=",
"df",
"[",
"[",
"'volume'",
",",
"'oi'",
"]",
"]",
".",
"astype",
"(",
"int",
")",
"if",
"self",
".",
"_ndf",
".",
"empty",
":",
"self",
".",
"_ndf",
"=",
"df",
"else",
":",
"self",
".",
"_ndf",
"=",
"self",
".",
"_ndf",
".",
"append",
"(",
"df",
")"
] | Build Pandas Dataframe in memory | [
"Build",
"Pandas",
"Dataframe",
"in",
"memory"
] | 59ee9521f9887c2a1030c6f1db8c918a5816fd64 | https://github.com/backtrader/backtrader/blob/59ee9521f9887c2a1030c6f1db8c918a5816fd64/contrib/utils/iqfeed-to-influxdb.py#L120-L142 | train |
backtrader/backtrader | contrib/utils/iqfeed-to-influxdb.py | IQFeedTool.get_tickers_from_file | def get_tickers_from_file(self, filename):
"""Load ticker list from txt file"""
if not os.path.exists(filename):
log.error("Ticker List file does not exist: %s", filename)
tickers = []
with io.open(filename, 'r') as fd:
for ticker in fd:
tickers.append(ticker.rstrip())
return tickers | python | def get_tickers_from_file(self, filename):
"""Load ticker list from txt file"""
if not os.path.exists(filename):
log.error("Ticker List file does not exist: %s", filename)
tickers = []
with io.open(filename, 'r') as fd:
for ticker in fd:
tickers.append(ticker.rstrip())
return tickers | [
"def",
"get_tickers_from_file",
"(",
"self",
",",
"filename",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"filename",
")",
":",
"log",
".",
"error",
"(",
"\"Ticker List file does not exist: %s\"",
",",
"filename",
")",
"tickers",
"=",
"[",
"]",
"with",
"io",
".",
"open",
"(",
"filename",
",",
"'r'",
")",
"as",
"fd",
":",
"for",
"ticker",
"in",
"fd",
":",
"tickers",
".",
"append",
"(",
"ticker",
".",
"rstrip",
"(",
")",
")",
"return",
"tickers"
] | Load ticker list from txt file | [
"Load",
"ticker",
"list",
"from",
"txt",
"file"
] | 59ee9521f9887c2a1030c6f1db8c918a5816fd64 | https://github.com/backtrader/backtrader/blob/59ee9521f9887c2a1030c6f1db8c918a5816fd64/contrib/utils/iqfeed-to-influxdb.py#L144-L153 | train |
backtrader/backtrader | contrib/utils/influxdb-import.py | InfluxDBTool.write_dataframe_to_idb | def write_dataframe_to_idb(self, ticker):
"""Write Pandas Dataframe to InfluxDB database"""
cachepath = self._cache
cachefile = ('%s/%s-1M.csv.gz' % (cachepath, ticker))
if not os.path.exists(cachefile):
log.warn('Import file does not exist: %s' %
(cachefile))
return
df = pd.read_csv(cachefile, compression='infer', header=0,
infer_datetime_format=True)
df['Datetime'] = pd.to_datetime(df['Date'] + ' ' + df['Time'])
df = df.set_index('Datetime')
df = df.drop(['Date', 'Time'], axis=1)
try:
self.dfdb.write_points(df, ticker)
except InfluxDBClientError as err:
log.error('Write to database failed: %s' % err) | python | def write_dataframe_to_idb(self, ticker):
"""Write Pandas Dataframe to InfluxDB database"""
cachepath = self._cache
cachefile = ('%s/%s-1M.csv.gz' % (cachepath, ticker))
if not os.path.exists(cachefile):
log.warn('Import file does not exist: %s' %
(cachefile))
return
df = pd.read_csv(cachefile, compression='infer', header=0,
infer_datetime_format=True)
df['Datetime'] = pd.to_datetime(df['Date'] + ' ' + df['Time'])
df = df.set_index('Datetime')
df = df.drop(['Date', 'Time'], axis=1)
try:
self.dfdb.write_points(df, ticker)
except InfluxDBClientError as err:
log.error('Write to database failed: %s' % err) | [
"def",
"write_dataframe_to_idb",
"(",
"self",
",",
"ticker",
")",
":",
"cachepath",
"=",
"self",
".",
"_cache",
"cachefile",
"=",
"(",
"'%s/%s-1M.csv.gz'",
"%",
"(",
"cachepath",
",",
"ticker",
")",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"cachefile",
")",
":",
"log",
".",
"warn",
"(",
"'Import file does not exist: %s'",
"%",
"(",
"cachefile",
")",
")",
"return",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"cachefile",
",",
"compression",
"=",
"'infer'",
",",
"header",
"=",
"0",
",",
"infer_datetime_format",
"=",
"True",
")",
"df",
"[",
"'Datetime'",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"'Date'",
"]",
"+",
"' '",
"+",
"df",
"[",
"'Time'",
"]",
")",
"df",
"=",
"df",
".",
"set_index",
"(",
"'Datetime'",
")",
"df",
"=",
"df",
".",
"drop",
"(",
"[",
"'Date'",
",",
"'Time'",
"]",
",",
"axis",
"=",
"1",
")",
"try",
":",
"self",
".",
"dfdb",
".",
"write_points",
"(",
"df",
",",
"ticker",
")",
"except",
"InfluxDBClientError",
"as",
"err",
":",
"log",
".",
"error",
"(",
"'Write to database failed: %s'",
"%",
"err",
")"
] | Write Pandas Dataframe to InfluxDB database | [
"Write",
"Pandas",
"Dataframe",
"to",
"InfluxDB",
"database"
] | 59ee9521f9887c2a1030c6f1db8c918a5816fd64 | https://github.com/backtrader/backtrader/blob/59ee9521f9887c2a1030c6f1db8c918a5816fd64/contrib/utils/influxdb-import.py#L29-L49 | train |
backtrader/backtrader | backtrader/plot/multicursor.py | MultiCursor.connect | def connect(self):
"""connect events"""
self._cidmotion = self.canvas.mpl_connect('motion_notify_event',
self.onmove)
self._ciddraw = self.canvas.mpl_connect('draw_event', self.clear) | python | def connect(self):
"""connect events"""
self._cidmotion = self.canvas.mpl_connect('motion_notify_event',
self.onmove)
self._ciddraw = self.canvas.mpl_connect('draw_event', self.clear) | [
"def",
"connect",
"(",
"self",
")",
":",
"self",
".",
"_cidmotion",
"=",
"self",
".",
"canvas",
".",
"mpl_connect",
"(",
"'motion_notify_event'",
",",
"self",
".",
"onmove",
")",
"self",
".",
"_ciddraw",
"=",
"self",
".",
"canvas",
".",
"mpl_connect",
"(",
"'draw_event'",
",",
"self",
".",
"clear",
")"
] | connect events | [
"connect",
"events"
] | 59ee9521f9887c2a1030c6f1db8c918a5816fd64 | https://github.com/backtrader/backtrader/blob/59ee9521f9887c2a1030c6f1db8c918a5816fd64/backtrader/plot/multicursor.py#L173-L177 | train |
backtrader/backtrader | backtrader/plot/multicursor.py | MultiCursor.disconnect | def disconnect(self):
"""disconnect events"""
self.canvas.mpl_disconnect(self._cidmotion)
self.canvas.mpl_disconnect(self._ciddraw) | python | def disconnect(self):
"""disconnect events"""
self.canvas.mpl_disconnect(self._cidmotion)
self.canvas.mpl_disconnect(self._ciddraw) | [
"def",
"disconnect",
"(",
"self",
")",
":",
"self",
".",
"canvas",
".",
"mpl_disconnect",
"(",
"self",
".",
"_cidmotion",
")",
"self",
".",
"canvas",
".",
"mpl_disconnect",
"(",
"self",
".",
"_ciddraw",
")"
] | disconnect events | [
"disconnect",
"events"
] | 59ee9521f9887c2a1030c6f1db8c918a5816fd64 | https://github.com/backtrader/backtrader/blob/59ee9521f9887c2a1030c6f1db8c918a5816fd64/backtrader/plot/multicursor.py#L179-L182 | train |
AirtestProject/Airtest | playground/win_ide.py | WindowsInIDE.connect | def connect(self, **kwargs):
"""
Connect to window and set it foreground
Args:
**kwargs: optional arguments
Returns:
None
"""
self.app = self._app.connect(**kwargs)
try:
self._top_window = self.app.top_window().wrapper_object()
self.set_foreground()
except RuntimeError:
self._top_window = None | python | def connect(self, **kwargs):
"""
Connect to window and set it foreground
Args:
**kwargs: optional arguments
Returns:
None
"""
self.app = self._app.connect(**kwargs)
try:
self._top_window = self.app.top_window().wrapper_object()
self.set_foreground()
except RuntimeError:
self._top_window = None | [
"def",
"connect",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"app",
"=",
"self",
".",
"_app",
".",
"connect",
"(",
"*",
"*",
"kwargs",
")",
"try",
":",
"self",
".",
"_top_window",
"=",
"self",
".",
"app",
".",
"top_window",
"(",
")",
".",
"wrapper_object",
"(",
")",
"self",
".",
"set_foreground",
"(",
")",
"except",
"RuntimeError",
":",
"self",
".",
"_top_window",
"=",
"None"
] | Connect to window and set it foreground
Args:
**kwargs: optional arguments
Returns:
None | [
"Connect",
"to",
"window",
"and",
"set",
"it",
"foreground"
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/playground/win_ide.py#L19-L35 | train |
AirtestProject/Airtest | playground/win_ide.py | WindowsInIDE.get_rect | def get_rect(self):
"""
Get rectangle of app or desktop resolution
Returns:
RECT(left, top, right, bottom)
"""
if self.handle:
left, top, right, bottom = win32gui.GetWindowRect(self.handle)
return RECT(left, top, right, bottom)
else:
desktop = win32gui.GetDesktopWindow()
left, top, right, bottom = win32gui.GetWindowRect(desktop)
return RECT(left, top, right, bottom) | python | def get_rect(self):
"""
Get rectangle of app or desktop resolution
Returns:
RECT(left, top, right, bottom)
"""
if self.handle:
left, top, right, bottom = win32gui.GetWindowRect(self.handle)
return RECT(left, top, right, bottom)
else:
desktop = win32gui.GetDesktopWindow()
left, top, right, bottom = win32gui.GetWindowRect(desktop)
return RECT(left, top, right, bottom) | [
"def",
"get_rect",
"(",
"self",
")",
":",
"if",
"self",
".",
"handle",
":",
"left",
",",
"top",
",",
"right",
",",
"bottom",
"=",
"win32gui",
".",
"GetWindowRect",
"(",
"self",
".",
"handle",
")",
"return",
"RECT",
"(",
"left",
",",
"top",
",",
"right",
",",
"bottom",
")",
"else",
":",
"desktop",
"=",
"win32gui",
".",
"GetDesktopWindow",
"(",
")",
"left",
",",
"top",
",",
"right",
",",
"bottom",
"=",
"win32gui",
".",
"GetWindowRect",
"(",
"desktop",
")",
"return",
"RECT",
"(",
"left",
",",
"top",
",",
"right",
",",
"bottom",
")"
] | Get rectangle of app or desktop resolution
Returns:
RECT(left, top, right, bottom) | [
"Get",
"rectangle",
"of",
"app",
"or",
"desktop",
"resolution"
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/playground/win_ide.py#L37-L51 | train |
AirtestProject/Airtest | playground/win_ide.py | WindowsInIDE.snapshot | def snapshot(self, filename="tmp.png"):
"""
Take a screenshot and save it to `tmp.png` filename by default
Args:
filename: name of file where to store the screenshot
Returns:
display the screenshot
"""
if not filename:
filename = "tmp.png"
if self.handle:
try:
screenshot(filename, self.handle)
except win32gui.error:
self.handle = None
screenshot(filename)
else:
screenshot(filename)
img = aircv.imread(filename)
os.remove(filename)
return img | python | def snapshot(self, filename="tmp.png"):
"""
Take a screenshot and save it to `tmp.png` filename by default
Args:
filename: name of file where to store the screenshot
Returns:
display the screenshot
"""
if not filename:
filename = "tmp.png"
if self.handle:
try:
screenshot(filename, self.handle)
except win32gui.error:
self.handle = None
screenshot(filename)
else:
screenshot(filename)
img = aircv.imread(filename)
os.remove(filename)
return img | [
"def",
"snapshot",
"(",
"self",
",",
"filename",
"=",
"\"tmp.png\"",
")",
":",
"if",
"not",
"filename",
":",
"filename",
"=",
"\"tmp.png\"",
"if",
"self",
".",
"handle",
":",
"try",
":",
"screenshot",
"(",
"filename",
",",
"self",
".",
"handle",
")",
"except",
"win32gui",
".",
"error",
":",
"self",
".",
"handle",
"=",
"None",
"screenshot",
"(",
"filename",
")",
"else",
":",
"screenshot",
"(",
"filename",
")",
"img",
"=",
"aircv",
".",
"imread",
"(",
"filename",
")",
"os",
".",
"remove",
"(",
"filename",
")",
"return",
"img"
] | Take a screenshot and save it to `tmp.png` filename by default
Args:
filename: name of file where to store the screenshot
Returns:
display the screenshot | [
"Take",
"a",
"screenshot",
"and",
"save",
"it",
"to",
"tmp",
".",
"png",
"filename",
"by",
"default"
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/playground/win_ide.py#L53-L78 | train |
AirtestProject/Airtest | benchmark/plot.py | PlotResult.extract_data | def extract_data(self):
"""从数据中获取到绘图相关的有用信息."""
self.time_axis = []
self.cpu_axis = []
self.mem_axis = []
self.timestamp_list = []
plot_data = self.data.get("plot_data", [])
# 按照时间分割线,划分成几段数据,取其中的最值
for i in plot_data:
timestamp = i["timestamp"]
self.timestamp_list.append(timestamp)
timestamp = round(timestamp, 1)
cpu_percent = i["cpu_percent"]
mem_gb_num = i["mem_gb_num"]
date = datetime.fromtimestamp(timestamp)
# 添加坐标轴
self.time_axis.append(date)
self.cpu_axis.append(cpu_percent)
self.mem_axis.append(mem_gb_num)
# 获取各种方法执行过程中的cpu和内存极值:
self.get_each_method_maximun_cpu_mem() | python | def extract_data(self):
"""从数据中获取到绘图相关的有用信息."""
self.time_axis = []
self.cpu_axis = []
self.mem_axis = []
self.timestamp_list = []
plot_data = self.data.get("plot_data", [])
# 按照时间分割线,划分成几段数据,取其中的最值
for i in plot_data:
timestamp = i["timestamp"]
self.timestamp_list.append(timestamp)
timestamp = round(timestamp, 1)
cpu_percent = i["cpu_percent"]
mem_gb_num = i["mem_gb_num"]
date = datetime.fromtimestamp(timestamp)
# 添加坐标轴
self.time_axis.append(date)
self.cpu_axis.append(cpu_percent)
self.mem_axis.append(mem_gb_num)
# 获取各种方法执行过程中的cpu和内存极值:
self.get_each_method_maximun_cpu_mem() | [
"def",
"extract_data",
"(",
"self",
")",
":",
"self",
".",
"time_axis",
"=",
"[",
"]",
"self",
".",
"cpu_axis",
"=",
"[",
"]",
"self",
".",
"mem_axis",
"=",
"[",
"]",
"self",
".",
"timestamp_list",
"=",
"[",
"]",
"plot_data",
"=",
"self",
".",
"data",
".",
"get",
"(",
"\"plot_data\"",
",",
"[",
"]",
")",
"# 按照时间分割线,划分成几段数据,取其中的最值",
"for",
"i",
"in",
"plot_data",
":",
"timestamp",
"=",
"i",
"[",
"\"timestamp\"",
"]",
"self",
".",
"timestamp_list",
".",
"append",
"(",
"timestamp",
")",
"timestamp",
"=",
"round",
"(",
"timestamp",
",",
"1",
")",
"cpu_percent",
"=",
"i",
"[",
"\"cpu_percent\"",
"]",
"mem_gb_num",
"=",
"i",
"[",
"\"mem_gb_num\"",
"]",
"date",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"timestamp",
")",
"# 添加坐标轴",
"self",
".",
"time_axis",
".",
"append",
"(",
"date",
")",
"self",
".",
"cpu_axis",
".",
"append",
"(",
"cpu_percent",
")",
"self",
".",
"mem_axis",
".",
"append",
"(",
"mem_gb_num",
")",
"# 获取各种方法执行过程中的cpu和内存极值:",
"self",
".",
"get_each_method_maximun_cpu_mem",
"(",
")"
] | 从数据中获取到绘图相关的有用信息. | [
"从数据中获取到绘图相关的有用信息",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/plot.py#L38-L59 | train |
AirtestProject/Airtest | benchmark/plot.py | PlotResult.get_each_method_maximun_cpu_mem | def get_each_method_maximun_cpu_mem(self):
"""获取每个方法中的cpu和内存耗费最值点."""
# 本函数用于丰富self.method_exec_info的信息:存入cpu、mem最值点
self.method_exec_info = deepcopy(self.data.get("method_exec_info", []))
method_exec_info = deepcopy(self.method_exec_info) # 用来辅助循环
method_index, cpu_max, cpu_max_time, mem_max, mem_max_time = 0, 0, 0, 0, 0 # 临时变量
self.max_mem = 0
for index, timestamp in enumerate(self.timestamp_list):
# method_exec_info是按顺序的,逐个遍历找出每个method_exec_info中的cpu和mem的最值点和timestamp:
start, end = method_exec_info[0]["start_time"], method_exec_info[0]["end_time"]
if timestamp < start:
# 方法正式start之前的数据,不能参与方法内的cpu、mem计算,直接忽略此条数据
continue
elif timestamp <= end:
# 方法执行期间的数据,纳入最值比较:
if self.cpu_axis[index] > cpu_max:
cpu_max, cpu_max_time = self.cpu_axis[index], timestamp
if self.mem_axis[index] > mem_max:
mem_max, mem_max_time = self.mem_axis[index], timestamp
continue
else:
# 本次方法筛选完毕,保存本方法的最值cpu和mem
if cpu_max_time != 0 and mem_max_time != 0:
self.method_exec_info[method_index].update({"cpu_max": cpu_max, "mem_max": mem_max, "cpu_max_time": cpu_max_time, "mem_max_time": mem_max_time})
# 保存最大的内存,后面绘图时用
if mem_max > self.max_mem:
self.max_mem = mem_max
cpu_max, mem_max = 0, 0 # 临时变量
# 准备进行下一个方法的检查,发现已经检查完则正式结束
del method_exec_info[0]
if method_exec_info:
method_index += 1 # 进行下一个方法时:当前方法的序号+1
continue
else:
break | python | def get_each_method_maximun_cpu_mem(self):
"""获取每个方法中的cpu和内存耗费最值点."""
# 本函数用于丰富self.method_exec_info的信息:存入cpu、mem最值点
self.method_exec_info = deepcopy(self.data.get("method_exec_info", []))
method_exec_info = deepcopy(self.method_exec_info) # 用来辅助循环
method_index, cpu_max, cpu_max_time, mem_max, mem_max_time = 0, 0, 0, 0, 0 # 临时变量
self.max_mem = 0
for index, timestamp in enumerate(self.timestamp_list):
# method_exec_info是按顺序的,逐个遍历找出每个method_exec_info中的cpu和mem的最值点和timestamp:
start, end = method_exec_info[0]["start_time"], method_exec_info[0]["end_time"]
if timestamp < start:
# 方法正式start之前的数据,不能参与方法内的cpu、mem计算,直接忽略此条数据
continue
elif timestamp <= end:
# 方法执行期间的数据,纳入最值比较:
if self.cpu_axis[index] > cpu_max:
cpu_max, cpu_max_time = self.cpu_axis[index], timestamp
if self.mem_axis[index] > mem_max:
mem_max, mem_max_time = self.mem_axis[index], timestamp
continue
else:
# 本次方法筛选完毕,保存本方法的最值cpu和mem
if cpu_max_time != 0 and mem_max_time != 0:
self.method_exec_info[method_index].update({"cpu_max": cpu_max, "mem_max": mem_max, "cpu_max_time": cpu_max_time, "mem_max_time": mem_max_time})
# 保存最大的内存,后面绘图时用
if mem_max > self.max_mem:
self.max_mem = mem_max
cpu_max, mem_max = 0, 0 # 临时变量
# 准备进行下一个方法的检查,发现已经检查完则正式结束
del method_exec_info[0]
if method_exec_info:
method_index += 1 # 进行下一个方法时:当前方法的序号+1
continue
else:
break | [
"def",
"get_each_method_maximun_cpu_mem",
"(",
"self",
")",
":",
"# 本函数用于丰富self.method_exec_info的信息:存入cpu、mem最值点",
"self",
".",
"method_exec_info",
"=",
"deepcopy",
"(",
"self",
".",
"data",
".",
"get",
"(",
"\"method_exec_info\"",
",",
"[",
"]",
")",
")",
"method_exec_info",
"=",
"deepcopy",
"(",
"self",
".",
"method_exec_info",
")",
"# 用来辅助循环",
"method_index",
",",
"cpu_max",
",",
"cpu_max_time",
",",
"mem_max",
",",
"mem_max_time",
"=",
"0",
",",
"0",
",",
"0",
",",
"0",
",",
"0",
"# 临时变量",
"self",
".",
"max_mem",
"=",
"0",
"for",
"index",
",",
"timestamp",
"in",
"enumerate",
"(",
"self",
".",
"timestamp_list",
")",
":",
"# method_exec_info是按顺序的,逐个遍历找出每个method_exec_info中的cpu和mem的最值点和timestamp:",
"start",
",",
"end",
"=",
"method_exec_info",
"[",
"0",
"]",
"[",
"\"start_time\"",
"]",
",",
"method_exec_info",
"[",
"0",
"]",
"[",
"\"end_time\"",
"]",
"if",
"timestamp",
"<",
"start",
":",
"# 方法正式start之前的数据,不能参与方法内的cpu、mem计算,直接忽略此条数据",
"continue",
"elif",
"timestamp",
"<=",
"end",
":",
"# 方法执行期间的数据,纳入最值比较:",
"if",
"self",
".",
"cpu_axis",
"[",
"index",
"]",
">",
"cpu_max",
":",
"cpu_max",
",",
"cpu_max_time",
"=",
"self",
".",
"cpu_axis",
"[",
"index",
"]",
",",
"timestamp",
"if",
"self",
".",
"mem_axis",
"[",
"index",
"]",
">",
"mem_max",
":",
"mem_max",
",",
"mem_max_time",
"=",
"self",
".",
"mem_axis",
"[",
"index",
"]",
",",
"timestamp",
"continue",
"else",
":",
"# 本次方法筛选完毕,保存本方法的最值cpu和mem",
"if",
"cpu_max_time",
"!=",
"0",
"and",
"mem_max_time",
"!=",
"0",
":",
"self",
".",
"method_exec_info",
"[",
"method_index",
"]",
".",
"update",
"(",
"{",
"\"cpu_max\"",
":",
"cpu_max",
",",
"\"mem_max\"",
":",
"mem_max",
",",
"\"cpu_max_time\"",
":",
"cpu_max_time",
",",
"\"mem_max_time\"",
":",
"mem_max_time",
"}",
")",
"# 保存最大的内存,后面绘图时用",
"if",
"mem_max",
">",
"self",
".",
"max_mem",
":",
"self",
".",
"max_mem",
"=",
"mem_max",
"cpu_max",
",",
"mem_max",
"=",
"0",
",",
"0",
"# 临时变量",
"# 准备进行下一个方法的检查,发现已经检查完则正式结束",
"del",
"method_exec_info",
"[",
"0",
"]",
"if",
"method_exec_info",
":",
"method_index",
"+=",
"1",
"# 进行下一个方法时:当前方法的序号+1",
"continue",
"else",
":",
"break"
] | 获取每个方法中的cpu和内存耗费最值点. | [
"获取每个方法中的cpu和内存耗费最值点",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/plot.py#L61-L95 | train |
AirtestProject/Airtest | benchmark/plot.py | PlotResult._get_graph_title | def _get_graph_title(self):
"""获取图像的title."""
start_time = datetime.fromtimestamp(int(self.timestamp_list[0]))
end_time = datetime.fromtimestamp(int(self.timestamp_list[-1]))
end_time = end_time.strftime('%H:%M:%S')
title = "Timespan: %s —— %s" % (start_time, end_time)
return title | python | def _get_graph_title(self):
"""获取图像的title."""
start_time = datetime.fromtimestamp(int(self.timestamp_list[0]))
end_time = datetime.fromtimestamp(int(self.timestamp_list[-1]))
end_time = end_time.strftime('%H:%M:%S')
title = "Timespan: %s —— %s" % (start_time, end_time)
return title | [
"def",
"_get_graph_title",
"(",
"self",
")",
":",
"start_time",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"int",
"(",
"self",
".",
"timestamp_list",
"[",
"0",
"]",
")",
")",
"end_time",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"int",
"(",
"self",
".",
"timestamp_list",
"[",
"-",
"1",
"]",
")",
")",
"end_time",
"=",
"end_time",
".",
"strftime",
"(",
"'%H:%M:%S'",
")",
"title",
"=",
"\"Timespan: %s —— %s\" % (",
"t",
"r",
"t_time, en",
"d",
"time)",
"",
"return",
"title"
] | 获取图像的title. | [
"获取图像的title",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/plot.py#L97-L104 | train |
AirtestProject/Airtest | benchmark/plot.py | PlotResult.plot_cpu_mem_keypoints | def plot_cpu_mem_keypoints(self):
"""绘制CPU/Mem/特征点数量."""
plt.figure(1)
# 开始绘制子图:
plt.subplot(311)
title = self._get_graph_title()
plt.title(title, loc="center") # 设置绘图的标题
mem_ins = plt.plot(self.time_axis, self.mem_axis, "-", label="Mem(MB)", color='deepskyblue', linestyle='-', marker=',')
# 设置数字标签
plt.legend(mem_ins, ["Mem(MB)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["mem_max_time"])
text = "%s: %d MB" % (method_exec["name"], method_exec["mem_max"])
plt.text(x, method_exec["mem_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["mem_max"], 'bo', label="point") # 绘制点
# 绘制子图2
plt.subplot(312)
cpu_ins = plt.plot(self.time_axis, self.cpu_axis, "-", label="CPU(%)", color='red', linestyle='-', marker=',')
plt.legend(cpu_ins, ["CPU(%)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.xlabel("Time(s)")
plt.ylabel("CPU(%)")
plt.ylim(0, 120)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["cpu_max_time"])
text = "%s: %d%%" % (method_exec["name"], method_exec["cpu_max"])
plt.text(x, method_exec["cpu_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["cpu_max"], 'ro', label="point") # 绘制点
# 绘制子图3
plt.subplot(313) # 绘制一下柱状图(关键点)
# 设置轴向标签
plt.xlabel('methods')
plt.ylabel('keypoints number')
method_list, method_pts_length_list, color_list = [], [], []
for method_exec in self.method_exec_info:
for item in ["kp_sch", "kp_src", "good"]:
method_list.append("%s-%s" % (method_exec["name"], item))
method_pts_length_list.append(method_exec[item])
if method_exec["result"]:
color_list.append(["palegreen", "limegreen", "deepskyblue"][["kp_sch", "kp_src", "good"].index(item)])
else:
color_list.append("tomato")
method_x = np.arange(len(method_list)) + 1
plt.bar(method_x, method_pts_length_list, width=0.35, align='center', color=color_list, alpha=0.8)
plt.xticks(method_x, method_list, size='small', rotation=30)
# 设置数字标签
for x, y in zip(method_x, method_pts_length_list):
plt.text(x, y + 10, "%d" % y, ha="center", va="bottom", fontsize=7)
plt.ylim(0, max(method_pts_length_list) * 1.2)
# 显示图像
plt.show() | python | def plot_cpu_mem_keypoints(self):
"""绘制CPU/Mem/特征点数量."""
plt.figure(1)
# 开始绘制子图:
plt.subplot(311)
title = self._get_graph_title()
plt.title(title, loc="center") # 设置绘图的标题
mem_ins = plt.plot(self.time_axis, self.mem_axis, "-", label="Mem(MB)", color='deepskyblue', linestyle='-', marker=',')
# 设置数字标签
plt.legend(mem_ins, ["Mem(MB)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, self.max_mem, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["mem_max_time"])
text = "%s: %d MB" % (method_exec["name"], method_exec["mem_max"])
plt.text(x, method_exec["mem_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["mem_max"], 'bo', label="point") # 绘制点
# 绘制子图2
plt.subplot(312)
cpu_ins = plt.plot(self.time_axis, self.cpu_axis, "-", label="CPU(%)", color='red', linestyle='-', marker=',')
plt.legend(cpu_ins, ["CPU(%)"], loc='upper right') # 说明标签的位置
plt.grid() # 加网格
plt.xlabel("Time(s)")
plt.ylabel("CPU(%)")
plt.ylim(0, 120)
for method_exec in self.method_exec_info:
start_date = datetime.fromtimestamp(method_exec["start_time"])
end_date = datetime.fromtimestamp(method_exec["end_time"])
plt.vlines(start_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
plt.vlines(end_date, 0, 100, colors="c", linestyles="dashed") # vlines(x, ymin, ymax)
# 绘制mem文字:
x = datetime.fromtimestamp(method_exec["cpu_max_time"])
text = "%s: %d%%" % (method_exec["name"], method_exec["cpu_max"])
plt.text(x, method_exec["cpu_max"], text, ha="center", va="bottom", fontsize=10)
plt.plot(x, method_exec["cpu_max"], 'ro', label="point") # 绘制点
# 绘制子图3
plt.subplot(313) # 绘制一下柱状图(关键点)
# 设置轴向标签
plt.xlabel('methods')
plt.ylabel('keypoints number')
method_list, method_pts_length_list, color_list = [], [], []
for method_exec in self.method_exec_info:
for item in ["kp_sch", "kp_src", "good"]:
method_list.append("%s-%s" % (method_exec["name"], item))
method_pts_length_list.append(method_exec[item])
if method_exec["result"]:
color_list.append(["palegreen", "limegreen", "deepskyblue"][["kp_sch", "kp_src", "good"].index(item)])
else:
color_list.append("tomato")
method_x = np.arange(len(method_list)) + 1
plt.bar(method_x, method_pts_length_list, width=0.35, align='center', color=color_list, alpha=0.8)
plt.xticks(method_x, method_list, size='small', rotation=30)
# 设置数字标签
for x, y in zip(method_x, method_pts_length_list):
plt.text(x, y + 10, "%d" % y, ha="center", va="bottom", fontsize=7)
plt.ylim(0, max(method_pts_length_list) * 1.2)
# 显示图像
plt.show() | [
"def",
"plot_cpu_mem_keypoints",
"(",
"self",
")",
":",
"plt",
".",
"figure",
"(",
"1",
")",
"# 开始绘制子图:",
"plt",
".",
"subplot",
"(",
"311",
")",
"title",
"=",
"self",
".",
"_get_graph_title",
"(",
")",
"plt",
".",
"title",
"(",
"title",
",",
"loc",
"=",
"\"center\"",
")",
"# 设置绘图的标题",
"mem_ins",
"=",
"plt",
".",
"plot",
"(",
"self",
".",
"time_axis",
",",
"self",
".",
"mem_axis",
",",
"\"-\"",
",",
"label",
"=",
"\"Mem(MB)\"",
",",
"color",
"=",
"'deepskyblue'",
",",
"linestyle",
"=",
"'-'",
",",
"marker",
"=",
"','",
")",
"# 设置数字标签",
"plt",
".",
"legend",
"(",
"mem_ins",
",",
"[",
"\"Mem(MB)\"",
"]",
",",
"loc",
"=",
"'upper right'",
")",
"# 说明标签的位置",
"plt",
".",
"grid",
"(",
")",
"# 加网格",
"plt",
".",
"ylabel",
"(",
"\"Mem(MB)\"",
")",
"plt",
".",
"ylim",
"(",
"bottom",
"=",
"0",
")",
"for",
"method_exec",
"in",
"self",
".",
"method_exec_info",
":",
"start_date",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"method_exec",
"[",
"\"start_time\"",
"]",
")",
"end_date",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"method_exec",
"[",
"\"end_time\"",
"]",
")",
"plt",
".",
"vlines",
"(",
"start_date",
",",
"0",
",",
"self",
".",
"max_mem",
",",
"colors",
"=",
"\"c\"",
",",
"linestyles",
"=",
"\"dashed\"",
")",
"# vlines(x, ymin, ymax)",
"plt",
".",
"vlines",
"(",
"end_date",
",",
"0",
",",
"self",
".",
"max_mem",
",",
"colors",
"=",
"\"c\"",
",",
"linestyles",
"=",
"\"dashed\"",
")",
"# vlines(x, ymin, ymax)",
"# 绘制mem文字:",
"x",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"method_exec",
"[",
"\"mem_max_time\"",
"]",
")",
"text",
"=",
"\"%s: %d MB\"",
"%",
"(",
"method_exec",
"[",
"\"name\"",
"]",
",",
"method_exec",
"[",
"\"mem_max\"",
"]",
")",
"plt",
".",
"text",
"(",
"x",
",",
"method_exec",
"[",
"\"mem_max\"",
"]",
",",
"text",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"bottom\"",
",",
"fontsize",
"=",
"10",
")",
"plt",
".",
"plot",
"(",
"x",
",",
"method_exec",
"[",
"\"mem_max\"",
"]",
",",
"'bo'",
",",
"label",
"=",
"\"point\"",
")",
"# 绘制点",
"# 绘制子图2",
"plt",
".",
"subplot",
"(",
"312",
")",
"cpu_ins",
"=",
"plt",
".",
"plot",
"(",
"self",
".",
"time_axis",
",",
"self",
".",
"cpu_axis",
",",
"\"-\"",
",",
"label",
"=",
"\"CPU(%)\"",
",",
"color",
"=",
"'red'",
",",
"linestyle",
"=",
"'-'",
",",
"marker",
"=",
"','",
")",
"plt",
".",
"legend",
"(",
"cpu_ins",
",",
"[",
"\"CPU(%)\"",
"]",
",",
"loc",
"=",
"'upper right'",
")",
"# 说明标签的位置",
"plt",
".",
"grid",
"(",
")",
"# 加网格",
"plt",
".",
"xlabel",
"(",
"\"Time(s)\"",
")",
"plt",
".",
"ylabel",
"(",
"\"CPU(%)\"",
")",
"plt",
".",
"ylim",
"(",
"0",
",",
"120",
")",
"for",
"method_exec",
"in",
"self",
".",
"method_exec_info",
":",
"start_date",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"method_exec",
"[",
"\"start_time\"",
"]",
")",
"end_date",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"method_exec",
"[",
"\"end_time\"",
"]",
")",
"plt",
".",
"vlines",
"(",
"start_date",
",",
"0",
",",
"100",
",",
"colors",
"=",
"\"c\"",
",",
"linestyles",
"=",
"\"dashed\"",
")",
"# vlines(x, ymin, ymax)",
"plt",
".",
"vlines",
"(",
"end_date",
",",
"0",
",",
"100",
",",
"colors",
"=",
"\"c\"",
",",
"linestyles",
"=",
"\"dashed\"",
")",
"# vlines(x, ymin, ymax)",
"# 绘制mem文字:",
"x",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"method_exec",
"[",
"\"cpu_max_time\"",
"]",
")",
"text",
"=",
"\"%s: %d%%\"",
"%",
"(",
"method_exec",
"[",
"\"name\"",
"]",
",",
"method_exec",
"[",
"\"cpu_max\"",
"]",
")",
"plt",
".",
"text",
"(",
"x",
",",
"method_exec",
"[",
"\"cpu_max\"",
"]",
",",
"text",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"bottom\"",
",",
"fontsize",
"=",
"10",
")",
"plt",
".",
"plot",
"(",
"x",
",",
"method_exec",
"[",
"\"cpu_max\"",
"]",
",",
"'ro'",
",",
"label",
"=",
"\"point\"",
")",
"# 绘制点",
"# 绘制子图3",
"plt",
".",
"subplot",
"(",
"313",
")",
"# 绘制一下柱状图(关键点)",
"# 设置轴向标签",
"plt",
".",
"xlabel",
"(",
"'methods'",
")",
"plt",
".",
"ylabel",
"(",
"'keypoints number'",
")",
"method_list",
",",
"method_pts_length_list",
",",
"color_list",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"method_exec",
"in",
"self",
".",
"method_exec_info",
":",
"for",
"item",
"in",
"[",
"\"kp_sch\"",
",",
"\"kp_src\"",
",",
"\"good\"",
"]",
":",
"method_list",
".",
"append",
"(",
"\"%s-%s\"",
"%",
"(",
"method_exec",
"[",
"\"name\"",
"]",
",",
"item",
")",
")",
"method_pts_length_list",
".",
"append",
"(",
"method_exec",
"[",
"item",
"]",
")",
"if",
"method_exec",
"[",
"\"result\"",
"]",
":",
"color_list",
".",
"append",
"(",
"[",
"\"palegreen\"",
",",
"\"limegreen\"",
",",
"\"deepskyblue\"",
"]",
"[",
"[",
"\"kp_sch\"",
",",
"\"kp_src\"",
",",
"\"good\"",
"]",
".",
"index",
"(",
"item",
")",
"]",
")",
"else",
":",
"color_list",
".",
"append",
"(",
"\"tomato\"",
")",
"method_x",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"method_list",
")",
")",
"+",
"1",
"plt",
".",
"bar",
"(",
"method_x",
",",
"method_pts_length_list",
",",
"width",
"=",
"0.35",
",",
"align",
"=",
"'center'",
",",
"color",
"=",
"color_list",
",",
"alpha",
"=",
"0.8",
")",
"plt",
".",
"xticks",
"(",
"method_x",
",",
"method_list",
",",
"size",
"=",
"'small'",
",",
"rotation",
"=",
"30",
")",
"# 设置数字标签",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"method_x",
",",
"method_pts_length_list",
")",
":",
"plt",
".",
"text",
"(",
"x",
",",
"y",
"+",
"10",
",",
"\"%d\"",
"%",
"y",
",",
"ha",
"=",
"\"center\"",
",",
"va",
"=",
"\"bottom\"",
",",
"fontsize",
"=",
"7",
")",
"plt",
".",
"ylim",
"(",
"0",
",",
"max",
"(",
"method_pts_length_list",
")",
"*",
"1.2",
")",
"# 显示图像",
"plt",
".",
"show",
"(",
")"
] | 绘制CPU/Mem/特征点数量. | [
"绘制CPU",
"/",
"Mem",
"/",
"特征点数量",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/plot.py#L106-L172 | train |
AirtestProject/Airtest | benchmark/profile_recorder.py | CheckKeypointResult.refresh_method_objects | def refresh_method_objects(self):
"""初始化方法对象."""
self.method_object_dict = {}
for key, method in self.MATCHING_METHODS.items():
method_object = method(self.im_search, self.im_source, self.threshold, self.rgb)
self.method_object_dict.update({key: method_object}) | python | def refresh_method_objects(self):
"""初始化方法对象."""
self.method_object_dict = {}
for key, method in self.MATCHING_METHODS.items():
method_object = method(self.im_search, self.im_source, self.threshold, self.rgb)
self.method_object_dict.update({key: method_object}) | [
"def",
"refresh_method_objects",
"(",
"self",
")",
":",
"self",
".",
"method_object_dict",
"=",
"{",
"}",
"for",
"key",
",",
"method",
"in",
"self",
".",
"MATCHING_METHODS",
".",
"items",
"(",
")",
":",
"method_object",
"=",
"method",
"(",
"self",
".",
"im_search",
",",
"self",
".",
"im_source",
",",
"self",
".",
"threshold",
",",
"self",
".",
"rgb",
")",
"self",
".",
"method_object_dict",
".",
"update",
"(",
"{",
"key",
":",
"method_object",
"}",
")"
] | 初始化方法对象. | [
"初始化方法对象",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L44-L49 | train |
AirtestProject/Airtest | benchmark/profile_recorder.py | CheckKeypointResult._get_result | def _get_result(self, method_name="kaze"):
"""获取特征点."""
method_object = self.method_object_dict.get(method_name)
# 提取结果和特征点:
try:
result = method_object.find_best_result()
except Exception:
import traceback
traceback.print_exc()
return [], [], [], None
return method_object.kp_sch, method_object.kp_src, method_object.good, result | python | def _get_result(self, method_name="kaze"):
"""获取特征点."""
method_object = self.method_object_dict.get(method_name)
# 提取结果和特征点:
try:
result = method_object.find_best_result()
except Exception:
import traceback
traceback.print_exc()
return [], [], [], None
return method_object.kp_sch, method_object.kp_src, method_object.good, result | [
"def",
"_get_result",
"(",
"self",
",",
"method_name",
"=",
"\"kaze\"",
")",
":",
"method_object",
"=",
"self",
".",
"method_object_dict",
".",
"get",
"(",
"method_name",
")",
"# 提取结果和特征点:",
"try",
":",
"result",
"=",
"method_object",
".",
"find_best_result",
"(",
")",
"except",
"Exception",
":",
"import",
"traceback",
"traceback",
".",
"print_exc",
"(",
")",
"return",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
",",
"None",
"return",
"method_object",
".",
"kp_sch",
",",
"method_object",
".",
"kp_src",
",",
"method_object",
".",
"good",
",",
"result"
] | 获取特征点. | [
"获取特征点",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L51-L62 | train |
AirtestProject/Airtest | benchmark/profile_recorder.py | CheckKeypointResult.get_and_plot_keypoints | def get_and_plot_keypoints(self, method_name, plot=False):
"""获取并且绘制出特征点匹配结果."""
if method_name not in self.method_object_dict.keys():
print("'%s' is not in MATCHING_METHODS" % method_name)
return None
kp_sch, kp_src, good, result = self._get_result(method_name)
if not plot or result is None:
return kp_sch, kp_src, good, result
else:
im_search, im_source = deepcopy(self.im_search), deepcopy(self.im_source)
# 绘制特征点识别情况、基于特征的图像匹配结果:
h_sch, w_sch = im_search.shape[:2]
h_src, w_src = im_source.shape[:2]
# init the plot image:
plot_img = np.zeros([max(h_sch, h_src), w_sch + w_src, 3], np.uint8)
plot_img[:h_sch, :w_sch, :] = im_search
plot_img[:h_src, w_sch:, :] = im_source
# plot good matche points:
for m in good:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画线
cv2.line(plot_img, (int(kp_sch[m.queryIdx].pt[0]), int(kp_sch[m.queryIdx].pt[1])), (int(kp_src[m.trainIdx].pt[0] + w_sch), int(kp_src[m.trainIdx].pt[1])), color)
# plot search_image
for kp in kp_sch:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画点
pos = (int(kp.pt[0]), int(kp.pt[1]))
mark_point(im_search, pos, circle=False, color=color, radius=5)
# plot source_image
for kp in kp_src:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画点
pos = (int(kp.pt[0]), int(kp.pt[1]))
mark_point(im_source, pos, circle=False, color=color, radius=10)
from airtest.aircv import show
show(plot_img)
show(im_search)
show(im_source) | python | def get_and_plot_keypoints(self, method_name, plot=False):
"""获取并且绘制出特征点匹配结果."""
if method_name not in self.method_object_dict.keys():
print("'%s' is not in MATCHING_METHODS" % method_name)
return None
kp_sch, kp_src, good, result = self._get_result(method_name)
if not plot or result is None:
return kp_sch, kp_src, good, result
else:
im_search, im_source = deepcopy(self.im_search), deepcopy(self.im_source)
# 绘制特征点识别情况、基于特征的图像匹配结果:
h_sch, w_sch = im_search.shape[:2]
h_src, w_src = im_source.shape[:2]
# init the plot image:
plot_img = np.zeros([max(h_sch, h_src), w_sch + w_src, 3], np.uint8)
plot_img[:h_sch, :w_sch, :] = im_search
plot_img[:h_src, w_sch:, :] = im_source
# plot good matche points:
for m in good:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画线
cv2.line(plot_img, (int(kp_sch[m.queryIdx].pt[0]), int(kp_sch[m.queryIdx].pt[1])), (int(kp_src[m.trainIdx].pt[0] + w_sch), int(kp_src[m.trainIdx].pt[1])), color)
# plot search_image
for kp in kp_sch:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画点
pos = (int(kp.pt[0]), int(kp.pt[1]))
mark_point(im_search, pos, circle=False, color=color, radius=5)
# plot source_image
for kp in kp_src:
color = tuple([int(random() * 255) for _ in range(3)]) # 随机颜色画点
pos = (int(kp.pt[0]), int(kp.pt[1]))
mark_point(im_source, pos, circle=False, color=color, radius=10)
from airtest.aircv import show
show(plot_img)
show(im_search)
show(im_source) | [
"def",
"get_and_plot_keypoints",
"(",
"self",
",",
"method_name",
",",
"plot",
"=",
"False",
")",
":",
"if",
"method_name",
"not",
"in",
"self",
".",
"method_object_dict",
".",
"keys",
"(",
")",
":",
"print",
"(",
"\"'%s' is not in MATCHING_METHODS\"",
"%",
"method_name",
")",
"return",
"None",
"kp_sch",
",",
"kp_src",
",",
"good",
",",
"result",
"=",
"self",
".",
"_get_result",
"(",
"method_name",
")",
"if",
"not",
"plot",
"or",
"result",
"is",
"None",
":",
"return",
"kp_sch",
",",
"kp_src",
",",
"good",
",",
"result",
"else",
":",
"im_search",
",",
"im_source",
"=",
"deepcopy",
"(",
"self",
".",
"im_search",
")",
",",
"deepcopy",
"(",
"self",
".",
"im_source",
")",
"# 绘制特征点识别情况、基于特征的图像匹配结果:",
"h_sch",
",",
"w_sch",
"=",
"im_search",
".",
"shape",
"[",
":",
"2",
"]",
"h_src",
",",
"w_src",
"=",
"im_source",
".",
"shape",
"[",
":",
"2",
"]",
"# init the plot image:",
"plot_img",
"=",
"np",
".",
"zeros",
"(",
"[",
"max",
"(",
"h_sch",
",",
"h_src",
")",
",",
"w_sch",
"+",
"w_src",
",",
"3",
"]",
",",
"np",
".",
"uint8",
")",
"plot_img",
"[",
":",
"h_sch",
",",
":",
"w_sch",
",",
":",
"]",
"=",
"im_search",
"plot_img",
"[",
":",
"h_src",
",",
"w_sch",
":",
",",
":",
"]",
"=",
"im_source",
"# plot good matche points:",
"for",
"m",
"in",
"good",
":",
"color",
"=",
"tuple",
"(",
"[",
"int",
"(",
"random",
"(",
")",
"*",
"255",
")",
"for",
"_",
"in",
"range",
"(",
"3",
")",
"]",
")",
"# 随机颜色画线",
"cv2",
".",
"line",
"(",
"plot_img",
",",
"(",
"int",
"(",
"kp_sch",
"[",
"m",
".",
"queryIdx",
"]",
".",
"pt",
"[",
"0",
"]",
")",
",",
"int",
"(",
"kp_sch",
"[",
"m",
".",
"queryIdx",
"]",
".",
"pt",
"[",
"1",
"]",
")",
")",
",",
"(",
"int",
"(",
"kp_src",
"[",
"m",
".",
"trainIdx",
"]",
".",
"pt",
"[",
"0",
"]",
"+",
"w_sch",
")",
",",
"int",
"(",
"kp_src",
"[",
"m",
".",
"trainIdx",
"]",
".",
"pt",
"[",
"1",
"]",
")",
")",
",",
"color",
")",
"# plot search_image",
"for",
"kp",
"in",
"kp_sch",
":",
"color",
"=",
"tuple",
"(",
"[",
"int",
"(",
"random",
"(",
")",
"*",
"255",
")",
"for",
"_",
"in",
"range",
"(",
"3",
")",
"]",
")",
"# 随机颜色画点",
"pos",
"=",
"(",
"int",
"(",
"kp",
".",
"pt",
"[",
"0",
"]",
")",
",",
"int",
"(",
"kp",
".",
"pt",
"[",
"1",
"]",
")",
")",
"mark_point",
"(",
"im_search",
",",
"pos",
",",
"circle",
"=",
"False",
",",
"color",
"=",
"color",
",",
"radius",
"=",
"5",
")",
"# plot source_image",
"for",
"kp",
"in",
"kp_src",
":",
"color",
"=",
"tuple",
"(",
"[",
"int",
"(",
"random",
"(",
")",
"*",
"255",
")",
"for",
"_",
"in",
"range",
"(",
"3",
")",
"]",
")",
"# 随机颜色画点",
"pos",
"=",
"(",
"int",
"(",
"kp",
".",
"pt",
"[",
"0",
"]",
")",
",",
"int",
"(",
"kp",
".",
"pt",
"[",
"1",
"]",
")",
")",
"mark_point",
"(",
"im_source",
",",
"pos",
",",
"circle",
"=",
"False",
",",
"color",
"=",
"color",
",",
"radius",
"=",
"10",
")",
"from",
"airtest",
".",
"aircv",
"import",
"show",
"show",
"(",
"plot_img",
")",
"show",
"(",
"im_search",
")",
"show",
"(",
"im_source",
")"
] | 获取并且绘制出特征点匹配结果. | [
"获取并且绘制出特征点匹配结果",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L64-L100 | train |
AirtestProject/Airtest | benchmark/profile_recorder.py | RecordThread.run | def run(self):
"""开始线程."""
while not self.stop_flag:
timestamp = time.time()
cpu_percent = self.process.cpu_percent() / self.cpu_num
# mem_percent = mem = self.process.memory_percent()
mem_info = dict(self.process.memory_info()._asdict())
mem_gb_num = mem_info.get('rss', 0) / 1024 / 1024
# 记录类变量
self.profile_data.append({"mem_gb_num": mem_gb_num, "cpu_percent": cpu_percent, "timestamp": timestamp})
# 记录cpu和mem_gb_num
time.sleep(self.interval) | python | def run(self):
"""开始线程."""
while not self.stop_flag:
timestamp = time.time()
cpu_percent = self.process.cpu_percent() / self.cpu_num
# mem_percent = mem = self.process.memory_percent()
mem_info = dict(self.process.memory_info()._asdict())
mem_gb_num = mem_info.get('rss', 0) / 1024 / 1024
# 记录类变量
self.profile_data.append({"mem_gb_num": mem_gb_num, "cpu_percent": cpu_percent, "timestamp": timestamp})
# 记录cpu和mem_gb_num
time.sleep(self.interval) | [
"def",
"run",
"(",
"self",
")",
":",
"while",
"not",
"self",
".",
"stop_flag",
":",
"timestamp",
"=",
"time",
".",
"time",
"(",
")",
"cpu_percent",
"=",
"self",
".",
"process",
".",
"cpu_percent",
"(",
")",
"/",
"self",
".",
"cpu_num",
"# mem_percent = mem = self.process.memory_percent()",
"mem_info",
"=",
"dict",
"(",
"self",
".",
"process",
".",
"memory_info",
"(",
")",
".",
"_asdict",
"(",
")",
")",
"mem_gb_num",
"=",
"mem_info",
".",
"get",
"(",
"'rss'",
",",
"0",
")",
"/",
"1024",
"/",
"1024",
"# 记录类变量",
"self",
".",
"profile_data",
".",
"append",
"(",
"{",
"\"mem_gb_num\"",
":",
"mem_gb_num",
",",
"\"cpu_percent\"",
":",
"cpu_percent",
",",
"\"timestamp\"",
":",
"timestamp",
"}",
")",
"# 记录cpu和mem_gb_num",
"time",
".",
"sleep",
"(",
"self",
".",
"interval",
")"
] | 开始线程. | [
"开始线程",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L121-L132 | train |
AirtestProject/Airtest | benchmark/profile_recorder.py | ProfileRecorder.load_images | def load_images(self, search_file, source_file):
"""加载待匹配图片."""
self.search_file, self.source_file = search_file, source_file
self.im_search, self.im_source = imread(self.search_file), imread(self.source_file)
# 初始化对象
self.check_macthing_object = CheckKeypointResult(self.im_search, self.im_source) | python | def load_images(self, search_file, source_file):
"""加载待匹配图片."""
self.search_file, self.source_file = search_file, source_file
self.im_search, self.im_source = imread(self.search_file), imread(self.source_file)
# 初始化对象
self.check_macthing_object = CheckKeypointResult(self.im_search, self.im_source) | [
"def",
"load_images",
"(",
"self",
",",
"search_file",
",",
"source_file",
")",
":",
"self",
".",
"search_file",
",",
"self",
".",
"source_file",
"=",
"search_file",
",",
"source_file",
"self",
".",
"im_search",
",",
"self",
".",
"im_source",
"=",
"imread",
"(",
"self",
".",
"search_file",
")",
",",
"imread",
"(",
"self",
".",
"source_file",
")",
"# 初始化对象",
"self",
".",
"check_macthing_object",
"=",
"CheckKeypointResult",
"(",
"self",
".",
"im_search",
",",
"self",
".",
"im_source",
")"
] | 加载待匹配图片. | [
"加载待匹配图片",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L145-L150 | train |
AirtestProject/Airtest | benchmark/profile_recorder.py | ProfileRecorder.profile_methods | def profile_methods(self, method_list):
"""帮助函数执行时记录数据."""
self.method_exec_info = []
# 开始数据记录进程
self.record_thread.stop_flag = False
self.record_thread.start()
for name in method_list:
if name not in self.check_macthing_object.MATCHING_METHODS.keys():
continue
time.sleep(3) # 留出绘图空白区
start_time = time.time() # 记录开始时间
print("--->>> start '%s' matching:\n" % name)
kp_sch, kp_src, good, result = self.check_macthing_object.get_and_plot_keypoints(name) # 根据方法名绘制对应的识别结果
print("\n\n\n")
end_time = time.time() # 记录结束时间
time.sleep(3) # 留出绘图空白区
# 记录本次匹配的相关数据
ret_info = {
"name": name,
"start_time": start_time,
"end_time": end_time,
"result": result,
"kp_sch": len(kp_sch),
"kp_src": len(kp_src),
"good": len(good)}
self.method_exec_info.append(ret_info)
self.record_thread.stop_flag = True | python | def profile_methods(self, method_list):
"""帮助函数执行时记录数据."""
self.method_exec_info = []
# 开始数据记录进程
self.record_thread.stop_flag = False
self.record_thread.start()
for name in method_list:
if name not in self.check_macthing_object.MATCHING_METHODS.keys():
continue
time.sleep(3) # 留出绘图空白区
start_time = time.time() # 记录开始时间
print("--->>> start '%s' matching:\n" % name)
kp_sch, kp_src, good, result = self.check_macthing_object.get_and_plot_keypoints(name) # 根据方法名绘制对应的识别结果
print("\n\n\n")
end_time = time.time() # 记录结束时间
time.sleep(3) # 留出绘图空白区
# 记录本次匹配的相关数据
ret_info = {
"name": name,
"start_time": start_time,
"end_time": end_time,
"result": result,
"kp_sch": len(kp_sch),
"kp_src": len(kp_src),
"good": len(good)}
self.method_exec_info.append(ret_info)
self.record_thread.stop_flag = True | [
"def",
"profile_methods",
"(",
"self",
",",
"method_list",
")",
":",
"self",
".",
"method_exec_info",
"=",
"[",
"]",
"# 开始数据记录进程",
"self",
".",
"record_thread",
".",
"stop_flag",
"=",
"False",
"self",
".",
"record_thread",
".",
"start",
"(",
")",
"for",
"name",
"in",
"method_list",
":",
"if",
"name",
"not",
"in",
"self",
".",
"check_macthing_object",
".",
"MATCHING_METHODS",
".",
"keys",
"(",
")",
":",
"continue",
"time",
".",
"sleep",
"(",
"3",
")",
"# 留出绘图空白区",
"start_time",
"=",
"time",
".",
"time",
"(",
")",
"# 记录开始时间",
"print",
"(",
"\"--->>> start '%s' matching:\\n\"",
"%",
"name",
")",
"kp_sch",
",",
"kp_src",
",",
"good",
",",
"result",
"=",
"self",
".",
"check_macthing_object",
".",
"get_and_plot_keypoints",
"(",
"name",
")",
"# 根据方法名绘制对应的识别结果",
"print",
"(",
"\"\\n\\n\\n\"",
")",
"end_time",
"=",
"time",
".",
"time",
"(",
")",
"# 记录结束时间",
"time",
".",
"sleep",
"(",
"3",
")",
"# 留出绘图空白区",
"# 记录本次匹配的相关数据",
"ret_info",
"=",
"{",
"\"name\"",
":",
"name",
",",
"\"start_time\"",
":",
"start_time",
",",
"\"end_time\"",
":",
"end_time",
",",
"\"result\"",
":",
"result",
",",
"\"kp_sch\"",
":",
"len",
"(",
"kp_sch",
")",
",",
"\"kp_src\"",
":",
"len",
"(",
"kp_src",
")",
",",
"\"good\"",
":",
"len",
"(",
"good",
")",
"}",
"self",
".",
"method_exec_info",
".",
"append",
"(",
"ret_info",
")",
"self",
".",
"record_thread",
".",
"stop_flag",
"=",
"True"
] | 帮助函数执行时记录数据. | [
"帮助函数执行时记录数据",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L152-L180 | train |
AirtestProject/Airtest | benchmark/profile_recorder.py | ProfileRecorder.wite_to_json | def wite_to_json(self, dir_path="", file_name=""):
"""将性能数据写入文件."""
# 提取数据
data = {
"plot_data": self.record_thread.profile_data,
"method_exec_info": self.method_exec_info,
"search_file": self.search_file,
"source_file": self.source_file}
# 写入文件
file_path = os.path.join(dir_path, file_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
json.dump(data, open(file_path, "w+"), indent=4) | python | def wite_to_json(self, dir_path="", file_name=""):
"""将性能数据写入文件."""
# 提取数据
data = {
"plot_data": self.record_thread.profile_data,
"method_exec_info": self.method_exec_info,
"search_file": self.search_file,
"source_file": self.source_file}
# 写入文件
file_path = os.path.join(dir_path, file_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
json.dump(data, open(file_path, "w+"), indent=4) | [
"def",
"wite_to_json",
"(",
"self",
",",
"dir_path",
"=",
"\"\"",
",",
"file_name",
"=",
"\"\"",
")",
":",
"# 提取数据",
"data",
"=",
"{",
"\"plot_data\"",
":",
"self",
".",
"record_thread",
".",
"profile_data",
",",
"\"method_exec_info\"",
":",
"self",
".",
"method_exec_info",
",",
"\"search_file\"",
":",
"self",
".",
"search_file",
",",
"\"source_file\"",
":",
"self",
".",
"source_file",
"}",
"# 写入文件",
"file_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"file_name",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dir_path",
")",
":",
"os",
".",
"makedirs",
"(",
"dir_path",
")",
"json",
".",
"dump",
"(",
"data",
",",
"open",
"(",
"file_path",
",",
"\"w+\"",
")",
",",
"indent",
"=",
"4",
")"
] | 将性能数据写入文件. | [
"将性能数据写入文件",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/profile_recorder.py#L182-L194 | train |
AirtestProject/Airtest | playground/poco.py | PocoReport.translate_poco_step | def translate_poco_step(self, step):
"""
处理poco的相关操作,参数与airtest的不同,由一个截图和一个操作构成,需要合成一个步骤
Parameters
----------
step 一个完整的操作,如click
prev_step 前一个步骤,应该是截图
Returns
-------
"""
ret = {}
prev_step = self._steps[-1]
if prev_step:
ret.update(prev_step)
ret['type'] = step[1].get("name", "")
if step.get('trace'):
ret['trace'] = step['trace']
ret['traceback'] = step.get('traceback')
if ret['type'] == 'touch':
# 取出点击位置
if step[1]['args'] and len(step[1]['args'][0]) == 2:
pos = step[1]['args'][0]
ret['target_pos'] = [int(pos[0]), int(pos[1])]
ret['top'] = ret['target_pos'][1]
ret['left'] = ret['target_pos'][0]
elif ret['type'] == 'swipe':
if step[1]['args'] and len(step[1]['args'][0]) == 2:
pos = step[1]['args'][0]
ret['target_pos'] = [int(pos[0]), int(pos[1])]
ret['top'] = ret['target_pos'][1]
ret['left'] = ret['target_pos'][0]
# swipe 需要显示一个方向
vector = step[1]["kwargs"].get("vector")
if vector:
ret['swipe'] = self.dis_vector(vector)
ret['vector'] = vector
ret['desc'] = self.func_desc_poco(ret)
ret['title'] = self._translate_title(ret)
return ret | python | def translate_poco_step(self, step):
"""
处理poco的相关操作,参数与airtest的不同,由一个截图和一个操作构成,需要合成一个步骤
Parameters
----------
step 一个完整的操作,如click
prev_step 前一个步骤,应该是截图
Returns
-------
"""
ret = {}
prev_step = self._steps[-1]
if prev_step:
ret.update(prev_step)
ret['type'] = step[1].get("name", "")
if step.get('trace'):
ret['trace'] = step['trace']
ret['traceback'] = step.get('traceback')
if ret['type'] == 'touch':
# 取出点击位置
if step[1]['args'] and len(step[1]['args'][0]) == 2:
pos = step[1]['args'][0]
ret['target_pos'] = [int(pos[0]), int(pos[1])]
ret['top'] = ret['target_pos'][1]
ret['left'] = ret['target_pos'][0]
elif ret['type'] == 'swipe':
if step[1]['args'] and len(step[1]['args'][0]) == 2:
pos = step[1]['args'][0]
ret['target_pos'] = [int(pos[0]), int(pos[1])]
ret['top'] = ret['target_pos'][1]
ret['left'] = ret['target_pos'][0]
# swipe 需要显示一个方向
vector = step[1]["kwargs"].get("vector")
if vector:
ret['swipe'] = self.dis_vector(vector)
ret['vector'] = vector
ret['desc'] = self.func_desc_poco(ret)
ret['title'] = self._translate_title(ret)
return ret | [
"def",
"translate_poco_step",
"(",
"self",
",",
"step",
")",
":",
"ret",
"=",
"{",
"}",
"prev_step",
"=",
"self",
".",
"_steps",
"[",
"-",
"1",
"]",
"if",
"prev_step",
":",
"ret",
".",
"update",
"(",
"prev_step",
")",
"ret",
"[",
"'type'",
"]",
"=",
"step",
"[",
"1",
"]",
".",
"get",
"(",
"\"name\"",
",",
"\"\"",
")",
"if",
"step",
".",
"get",
"(",
"'trace'",
")",
":",
"ret",
"[",
"'trace'",
"]",
"=",
"step",
"[",
"'trace'",
"]",
"ret",
"[",
"'traceback'",
"]",
"=",
"step",
".",
"get",
"(",
"'traceback'",
")",
"if",
"ret",
"[",
"'type'",
"]",
"==",
"'touch'",
":",
"# 取出点击位置",
"if",
"step",
"[",
"1",
"]",
"[",
"'args'",
"]",
"and",
"len",
"(",
"step",
"[",
"1",
"]",
"[",
"'args'",
"]",
"[",
"0",
"]",
")",
"==",
"2",
":",
"pos",
"=",
"step",
"[",
"1",
"]",
"[",
"'args'",
"]",
"[",
"0",
"]",
"ret",
"[",
"'target_pos'",
"]",
"=",
"[",
"int",
"(",
"pos",
"[",
"0",
"]",
")",
",",
"int",
"(",
"pos",
"[",
"1",
"]",
")",
"]",
"ret",
"[",
"'top'",
"]",
"=",
"ret",
"[",
"'target_pos'",
"]",
"[",
"1",
"]",
"ret",
"[",
"'left'",
"]",
"=",
"ret",
"[",
"'target_pos'",
"]",
"[",
"0",
"]",
"elif",
"ret",
"[",
"'type'",
"]",
"==",
"'swipe'",
":",
"if",
"step",
"[",
"1",
"]",
"[",
"'args'",
"]",
"and",
"len",
"(",
"step",
"[",
"1",
"]",
"[",
"'args'",
"]",
"[",
"0",
"]",
")",
"==",
"2",
":",
"pos",
"=",
"step",
"[",
"1",
"]",
"[",
"'args'",
"]",
"[",
"0",
"]",
"ret",
"[",
"'target_pos'",
"]",
"=",
"[",
"int",
"(",
"pos",
"[",
"0",
"]",
")",
",",
"int",
"(",
"pos",
"[",
"1",
"]",
")",
"]",
"ret",
"[",
"'top'",
"]",
"=",
"ret",
"[",
"'target_pos'",
"]",
"[",
"1",
"]",
"ret",
"[",
"'left'",
"]",
"=",
"ret",
"[",
"'target_pos'",
"]",
"[",
"0",
"]",
"# swipe 需要显示一个方向",
"vector",
"=",
"step",
"[",
"1",
"]",
"[",
"\"kwargs\"",
"]",
".",
"get",
"(",
"\"vector\"",
")",
"if",
"vector",
":",
"ret",
"[",
"'swipe'",
"]",
"=",
"self",
".",
"dis_vector",
"(",
"vector",
")",
"ret",
"[",
"'vector'",
"]",
"=",
"vector",
"ret",
"[",
"'desc'",
"]",
"=",
"self",
".",
"func_desc_poco",
"(",
"ret",
")",
"ret",
"[",
"'title'",
"]",
"=",
"self",
".",
"_translate_title",
"(",
"ret",
")",
"return",
"ret"
] | 处理poco的相关操作,参数与airtest的不同,由一个截图和一个操作构成,需要合成一个步骤
Parameters
----------
step 一个完整的操作,如click
prev_step 前一个步骤,应该是截图
Returns
------- | [
"处理poco的相关操作,参数与airtest的不同,由一个截图和一个操作构成,需要合成一个步骤",
"Parameters",
"----------",
"step",
"一个完整的操作,如click",
"prev_step",
"前一个步骤,应该是截图"
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/playground/poco.py#L12-L53 | train |
AirtestProject/Airtest | playground/poco.py | PocoReport.func_desc_poco | def func_desc_poco(self, step):
""" 把对应的poco操作显示成中文"""
desc = {
"touch": u"点击UI组件 {name}".format(name=step.get("text", "")),
}
if step['type'] in desc:
return desc.get(step['type'])
else:
return self._translate_desc(step) | python | def func_desc_poco(self, step):
""" 把对应的poco操作显示成中文"""
desc = {
"touch": u"点击UI组件 {name}".format(name=step.get("text", "")),
}
if step['type'] in desc:
return desc.get(step['type'])
else:
return self._translate_desc(step) | [
"def",
"func_desc_poco",
"(",
"self",
",",
"step",
")",
":",
"desc",
"=",
"{",
"\"touch\"",
":",
"u\"点击UI组件 {name}\".format(",
"n",
"ame=st",
"e",
"p.ge",
"t",
"(\"te",
"x",
"t\",",
" ",
"\"\")),",
"",
"",
"",
"",
"",
"}",
"if",
"step",
"[",
"'type'",
"]",
"in",
"desc",
":",
"return",
"desc",
".",
"get",
"(",
"step",
"[",
"'type'",
"]",
")",
"else",
":",
"return",
"self",
".",
"_translate_desc",
"(",
"step",
")"
] | 把对应的poco操作显示成中文 | [
"把对应的poco操作显示成中文"
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/playground/poco.py#L55-L63 | train |
AirtestProject/Airtest | benchmark/benchmark.py | profile_different_methods | def profile_different_methods(search_file, screen_file, method_list, dir_path, file_name):
"""对指定的图片进行性能测试."""
profiler = ProfileRecorder(0.05)
# 加载图片
profiler.load_images(search_file, screen_file)
# 传入待测试的方法列表
profiler.profile_methods(method_list)
# 将性能数据写入文件
profiler.wite_to_json(dir_path, file_name) | python | def profile_different_methods(search_file, screen_file, method_list, dir_path, file_name):
"""对指定的图片进行性能测试."""
profiler = ProfileRecorder(0.05)
# 加载图片
profiler.load_images(search_file, screen_file)
# 传入待测试的方法列表
profiler.profile_methods(method_list)
# 将性能数据写入文件
profiler.wite_to_json(dir_path, file_name) | [
"def",
"profile_different_methods",
"(",
"search_file",
",",
"screen_file",
",",
"method_list",
",",
"dir_path",
",",
"file_name",
")",
":",
"profiler",
"=",
"ProfileRecorder",
"(",
"0.05",
")",
"# 加载图片",
"profiler",
".",
"load_images",
"(",
"search_file",
",",
"screen_file",
")",
"# 传入待测试的方法列表",
"profiler",
".",
"profile_methods",
"(",
"method_list",
")",
"# 将性能数据写入文件",
"profiler",
".",
"wite_to_json",
"(",
"dir_path",
",",
"file_name",
")"
] | 对指定的图片进行性能测试. | [
"对指定的图片进行性能测试",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/benchmark.py#L12-L20 | train |
AirtestProject/Airtest | benchmark/benchmark.py | plot_profiled_all_images_table | def plot_profiled_all_images_table(method_list):
"""绘制多个图片的结果."""
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
text_dir_path, text_file_name = "result", "text.json"
image_list = ['high_dpi', 'rich_texture', 'text']
# high_dpi_method_exec_info
high_dpi_plot_object = PlotResult(high_dpi_dir_path, high_dpi_file_name)
high_dpi_method_exec_info = high_dpi_plot_object.method_exec_info
# rich_texture_method_exec_info
rich_texture_plot_object = PlotResult(rich_texture_dir_path, rich_texture_file_name)
rich_texture_method_exec_info = rich_texture_plot_object.method_exec_info
# text_method_exec_info
text_plot_object = PlotResult(text_dir_path, text_file_name)
text_method_exec_info = text_plot_object.method_exec_info
exec_info_list = [high_dpi_method_exec_info, rich_texture_method_exec_info, text_method_exec_info]
# 提取对应结果:
mem_compare_dict, cpu_compare_dict, succeed_compare_dict = {}, {}, {}
for index, method in enumerate(method_list):
mem_list, cpu_list, succeed_list = [], [], []
for exec_info in exec_info_list:
current_method_exec_info = exec_info[index]
mem_list.append(round(current_method_exec_info["mem_max"], 2)) # MB
# mem_list.append(round(current_method_exec_info["mem_max"] / 1024, 2)) # GB
cpu_list.append(round(current_method_exec_info["cpu_max"], 2))
succeed_ret = True if current_method_exec_info["result"] else False
succeed_list.append(succeed_ret)
mem_compare_dict.update({method: mem_list})
cpu_compare_dict.update({method: cpu_list})
succeed_compare_dict.update({method: succeed_list})
color_list = get_color_list(method_list)
# # 绘制三张表格
# plot_compare_table(image_list, method_list, color_list, mem_compare_dict, "memory (GB)", 311)
# plot_compare_table(image_list, method_list, color_list, cpu_compare_dict, "CPU (%)", 312)
# plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Result", 313)
# plt.show()
# 绘制两个曲线图、一个表格图:
plot_compare_curves(image_list, method_list, color_list, mem_compare_dict, "Title: Memory (GB)", 311)
plot_compare_curves(image_list, method_list, color_list, cpu_compare_dict, "Title: CPU (%)", 312)
plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Title: Result", 313)
plt.show() | python | def plot_profiled_all_images_table(method_list):
"""绘制多个图片的结果."""
high_dpi_dir_path, high_dpi_file_name = "result", "high_dpi.json"
rich_texture_dir_path, rich_texture_file_name = "result", "rich_texture.json"
text_dir_path, text_file_name = "result", "text.json"
image_list = ['high_dpi', 'rich_texture', 'text']
# high_dpi_method_exec_info
high_dpi_plot_object = PlotResult(high_dpi_dir_path, high_dpi_file_name)
high_dpi_method_exec_info = high_dpi_plot_object.method_exec_info
# rich_texture_method_exec_info
rich_texture_plot_object = PlotResult(rich_texture_dir_path, rich_texture_file_name)
rich_texture_method_exec_info = rich_texture_plot_object.method_exec_info
# text_method_exec_info
text_plot_object = PlotResult(text_dir_path, text_file_name)
text_method_exec_info = text_plot_object.method_exec_info
exec_info_list = [high_dpi_method_exec_info, rich_texture_method_exec_info, text_method_exec_info]
# 提取对应结果:
mem_compare_dict, cpu_compare_dict, succeed_compare_dict = {}, {}, {}
for index, method in enumerate(method_list):
mem_list, cpu_list, succeed_list = [], [], []
for exec_info in exec_info_list:
current_method_exec_info = exec_info[index]
mem_list.append(round(current_method_exec_info["mem_max"], 2)) # MB
# mem_list.append(round(current_method_exec_info["mem_max"] / 1024, 2)) # GB
cpu_list.append(round(current_method_exec_info["cpu_max"], 2))
succeed_ret = True if current_method_exec_info["result"] else False
succeed_list.append(succeed_ret)
mem_compare_dict.update({method: mem_list})
cpu_compare_dict.update({method: cpu_list})
succeed_compare_dict.update({method: succeed_list})
color_list = get_color_list(method_list)
# # 绘制三张表格
# plot_compare_table(image_list, method_list, color_list, mem_compare_dict, "memory (GB)", 311)
# plot_compare_table(image_list, method_list, color_list, cpu_compare_dict, "CPU (%)", 312)
# plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Result", 313)
# plt.show()
# 绘制两个曲线图、一个表格图:
plot_compare_curves(image_list, method_list, color_list, mem_compare_dict, "Title: Memory (GB)", 311)
plot_compare_curves(image_list, method_list, color_list, cpu_compare_dict, "Title: CPU (%)", 312)
plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, "Title: Result", 313)
plt.show() | [
"def",
"plot_profiled_all_images_table",
"(",
"method_list",
")",
":",
"high_dpi_dir_path",
",",
"high_dpi_file_name",
"=",
"\"result\"",
",",
"\"high_dpi.json\"",
"rich_texture_dir_path",
",",
"rich_texture_file_name",
"=",
"\"result\"",
",",
"\"rich_texture.json\"",
"text_dir_path",
",",
"text_file_name",
"=",
"\"result\"",
",",
"\"text.json\"",
"image_list",
"=",
"[",
"'high_dpi'",
",",
"'rich_texture'",
",",
"'text'",
"]",
"# high_dpi_method_exec_info",
"high_dpi_plot_object",
"=",
"PlotResult",
"(",
"high_dpi_dir_path",
",",
"high_dpi_file_name",
")",
"high_dpi_method_exec_info",
"=",
"high_dpi_plot_object",
".",
"method_exec_info",
"# rich_texture_method_exec_info",
"rich_texture_plot_object",
"=",
"PlotResult",
"(",
"rich_texture_dir_path",
",",
"rich_texture_file_name",
")",
"rich_texture_method_exec_info",
"=",
"rich_texture_plot_object",
".",
"method_exec_info",
"# text_method_exec_info",
"text_plot_object",
"=",
"PlotResult",
"(",
"text_dir_path",
",",
"text_file_name",
")",
"text_method_exec_info",
"=",
"text_plot_object",
".",
"method_exec_info",
"exec_info_list",
"=",
"[",
"high_dpi_method_exec_info",
",",
"rich_texture_method_exec_info",
",",
"text_method_exec_info",
"]",
"# 提取对应结果:",
"mem_compare_dict",
",",
"cpu_compare_dict",
",",
"succeed_compare_dict",
"=",
"{",
"}",
",",
"{",
"}",
",",
"{",
"}",
"for",
"index",
",",
"method",
"in",
"enumerate",
"(",
"method_list",
")",
":",
"mem_list",
",",
"cpu_list",
",",
"succeed_list",
"=",
"[",
"]",
",",
"[",
"]",
",",
"[",
"]",
"for",
"exec_info",
"in",
"exec_info_list",
":",
"current_method_exec_info",
"=",
"exec_info",
"[",
"index",
"]",
"mem_list",
".",
"append",
"(",
"round",
"(",
"current_method_exec_info",
"[",
"\"mem_max\"",
"]",
",",
"2",
")",
")",
"# MB",
"# mem_list.append(round(current_method_exec_info[\"mem_max\"] / 1024, 2)) # GB",
"cpu_list",
".",
"append",
"(",
"round",
"(",
"current_method_exec_info",
"[",
"\"cpu_max\"",
"]",
",",
"2",
")",
")",
"succeed_ret",
"=",
"True",
"if",
"current_method_exec_info",
"[",
"\"result\"",
"]",
"else",
"False",
"succeed_list",
".",
"append",
"(",
"succeed_ret",
")",
"mem_compare_dict",
".",
"update",
"(",
"{",
"method",
":",
"mem_list",
"}",
")",
"cpu_compare_dict",
".",
"update",
"(",
"{",
"method",
":",
"cpu_list",
"}",
")",
"succeed_compare_dict",
".",
"update",
"(",
"{",
"method",
":",
"succeed_list",
"}",
")",
"color_list",
"=",
"get_color_list",
"(",
"method_list",
")",
"# # 绘制三张表格",
"# plot_compare_table(image_list, method_list, color_list, mem_compare_dict, \"memory (GB)\", 311)",
"# plot_compare_table(image_list, method_list, color_list, cpu_compare_dict, \"CPU (%)\", 312)",
"# plot_compare_table(image_list, method_list, color_list, succeed_compare_dict, \"Result\", 313)",
"# plt.show()",
"# 绘制两个曲线图、一个表格图:",
"plot_compare_curves",
"(",
"image_list",
",",
"method_list",
",",
"color_list",
",",
"mem_compare_dict",
",",
"\"Title: Memory (GB)\"",
",",
"311",
")",
"plot_compare_curves",
"(",
"image_list",
",",
"method_list",
",",
"color_list",
",",
"cpu_compare_dict",
",",
"\"Title: CPU (%)\"",
",",
"312",
")",
"plot_compare_table",
"(",
"image_list",
",",
"method_list",
",",
"color_list",
",",
"succeed_compare_dict",
",",
"\"Title: Result\"",
",",
"313",
")",
"plt",
".",
"show",
"(",
")"
] | 绘制多个图片的结果. | [
"绘制多个图片的结果",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/benchmark.py#L53-L99 | train |
AirtestProject/Airtest | benchmark/benchmark.py | get_color_list | def get_color_list(method_list):
"""获取method对应的color列表."""
color_list = []
for method in method_list:
color = tuple([random() for _ in range(3)]) # 随机颜色画线
color_list.append(color)
return color_list | python | def get_color_list(method_list):
"""获取method对应的color列表."""
color_list = []
for method in method_list:
color = tuple([random() for _ in range(3)]) # 随机颜色画线
color_list.append(color)
return color_list | [
"def",
"get_color_list",
"(",
"method_list",
")",
":",
"color_list",
"=",
"[",
"]",
"for",
"method",
"in",
"method_list",
":",
"color",
"=",
"tuple",
"(",
"[",
"random",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"3",
")",
"]",
")",
"# 随机颜色画线",
"color_list",
".",
"append",
"(",
"color",
")",
"return",
"color_list"
] | 获取method对应的color列表. | [
"获取method对应的color列表",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/benchmark.py#L102-L108 | train |
AirtestProject/Airtest | benchmark/benchmark.py | plot_compare_table | def plot_compare_table(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制了对比表格."""
row_labels = image_list
# 写入值:
table_vals = []
for i in range(len(row_labels)):
row_vals = []
for method in method_list:
row_vals.append(compare_dict[method][i])
table_vals.append(row_vals)
# 绘制表格图
colors = [[(0.95, 0.95, 0.95) for c in range(len(method_list))] for r in range(len(row_labels))] # cell的颜色
# plt.figure(figsize=(8, 4), dpi=120)
plt.subplot(fig_num)
plt.title(fig_name) # 绘制标题
lightgrn = (0.5, 0.8, 0.5) # 这个是label的背景色
plt.table(cellText=table_vals,
rowLabels=row_labels,
colLabels=method_list,
rowColours=[lightgrn] * len(row_labels),
colColours=color_list,
cellColours=colors,
cellLoc='center',
loc='upper left')
plt.axis('off') | python | def plot_compare_table(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制了对比表格."""
row_labels = image_list
# 写入值:
table_vals = []
for i in range(len(row_labels)):
row_vals = []
for method in method_list:
row_vals.append(compare_dict[method][i])
table_vals.append(row_vals)
# 绘制表格图
colors = [[(0.95, 0.95, 0.95) for c in range(len(method_list))] for r in range(len(row_labels))] # cell的颜色
# plt.figure(figsize=(8, 4), dpi=120)
plt.subplot(fig_num)
plt.title(fig_name) # 绘制标题
lightgrn = (0.5, 0.8, 0.5) # 这个是label的背景色
plt.table(cellText=table_vals,
rowLabels=row_labels,
colLabels=method_list,
rowColours=[lightgrn] * len(row_labels),
colColours=color_list,
cellColours=colors,
cellLoc='center',
loc='upper left')
plt.axis('off') | [
"def",
"plot_compare_table",
"(",
"image_list",
",",
"method_list",
",",
"color_list",
",",
"compare_dict",
",",
"fig_name",
"=",
"\"\"",
",",
"fig_num",
"=",
"111",
")",
":",
"row_labels",
"=",
"image_list",
"# 写入值:",
"table_vals",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"row_labels",
")",
")",
":",
"row_vals",
"=",
"[",
"]",
"for",
"method",
"in",
"method_list",
":",
"row_vals",
".",
"append",
"(",
"compare_dict",
"[",
"method",
"]",
"[",
"i",
"]",
")",
"table_vals",
".",
"append",
"(",
"row_vals",
")",
"# 绘制表格图",
"colors",
"=",
"[",
"[",
"(",
"0.95",
",",
"0.95",
",",
"0.95",
")",
"for",
"c",
"in",
"range",
"(",
"len",
"(",
"method_list",
")",
")",
"]",
"for",
"r",
"in",
"range",
"(",
"len",
"(",
"row_labels",
")",
")",
"]",
"# cell的颜色",
"# plt.figure(figsize=(8, 4), dpi=120)",
"plt",
".",
"subplot",
"(",
"fig_num",
")",
"plt",
".",
"title",
"(",
"fig_name",
")",
"# 绘制标题",
"lightgrn",
"=",
"(",
"0.5",
",",
"0.8",
",",
"0.5",
")",
"# 这个是label的背景色",
"plt",
".",
"table",
"(",
"cellText",
"=",
"table_vals",
",",
"rowLabels",
"=",
"row_labels",
",",
"colLabels",
"=",
"method_list",
",",
"rowColours",
"=",
"[",
"lightgrn",
"]",
"*",
"len",
"(",
"row_labels",
")",
",",
"colColours",
"=",
"color_list",
",",
"cellColours",
"=",
"colors",
",",
"cellLoc",
"=",
"'center'",
",",
"loc",
"=",
"'upper left'",
")",
"plt",
".",
"axis",
"(",
"'off'",
")"
] | 绘制了对比表格. | [
"绘制了对比表格",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/benchmark.py#L111-L136 | train |
AirtestProject/Airtest | benchmark/benchmark.py | plot_compare_curves | def plot_compare_curves(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制对比曲线."""
plt.subplot(fig_num)
plt.title(fig_name, loc="center") # 设置绘图的标题
mix_ins = []
for index, method in enumerate(method_list):
mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color=color_list[index], linestyle='-', marker='.')
# mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color='deepskyblue', linestyle='-', marker='.')
mix_ins.append(mem_ins)
plt.legend(loc='upper right') # 说明标签的位置
plt.grid() # 加网格
# plt.xlabel("Image")
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0) | python | def plot_compare_curves(image_list, method_list, color_list, compare_dict, fig_name="", fig_num=111):
"""绘制对比曲线."""
plt.subplot(fig_num)
plt.title(fig_name, loc="center") # 设置绘图的标题
mix_ins = []
for index, method in enumerate(method_list):
mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color=color_list[index], linestyle='-', marker='.')
# mem_ins = plt.plot(image_list, compare_dict[method], "-", label=method, color='deepskyblue', linestyle='-', marker='.')
mix_ins.append(mem_ins)
plt.legend(loc='upper right') # 说明标签的位置
plt.grid() # 加网格
# plt.xlabel("Image")
plt.ylabel("Mem(MB)")
plt.ylim(bottom=0) | [
"def",
"plot_compare_curves",
"(",
"image_list",
",",
"method_list",
",",
"color_list",
",",
"compare_dict",
",",
"fig_name",
"=",
"\"\"",
",",
"fig_num",
"=",
"111",
")",
":",
"plt",
".",
"subplot",
"(",
"fig_num",
")",
"plt",
".",
"title",
"(",
"fig_name",
",",
"loc",
"=",
"\"center\"",
")",
"# 设置绘图的标题",
"mix_ins",
"=",
"[",
"]",
"for",
"index",
",",
"method",
"in",
"enumerate",
"(",
"method_list",
")",
":",
"mem_ins",
"=",
"plt",
".",
"plot",
"(",
"image_list",
",",
"compare_dict",
"[",
"method",
"]",
",",
"\"-\"",
",",
"label",
"=",
"method",
",",
"color",
"=",
"color_list",
"[",
"index",
"]",
",",
"linestyle",
"=",
"'-'",
",",
"marker",
"=",
"'.'",
")",
"# mem_ins = plt.plot(image_list, compare_dict[method], \"-\", label=method, color='deepskyblue', linestyle='-', marker='.')",
"mix_ins",
".",
"append",
"(",
"mem_ins",
")",
"plt",
".",
"legend",
"(",
"loc",
"=",
"'upper right'",
")",
"# 说明标签的位置",
"plt",
".",
"grid",
"(",
")",
"# 加网格",
"# plt.xlabel(\"Image\")",
"plt",
".",
"ylabel",
"(",
"\"Mem(MB)\"",
")",
"plt",
".",
"ylim",
"(",
"bottom",
"=",
"0",
")"
] | 绘制对比曲线. | [
"绘制对比曲线",
"."
] | 21583da2698a601cd632228228fc16d41f60a517 | https://github.com/AirtestProject/Airtest/blob/21583da2698a601cd632228228fc16d41f60a517/benchmark/benchmark.py#L139-L153 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | ReadTag | def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while six.indexbytes(buffer, pos) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos) | python | def ReadTag(buffer, pos):
"""Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python.
"""
start = pos
while six.indexbytes(buffer, pos) & 0x80:
pos += 1
pos += 1
return (buffer[start:pos], pos) | [
"def",
"ReadTag",
"(",
"buffer",
",",
"pos",
")",
":",
"start",
"=",
"pos",
"while",
"six",
".",
"indexbytes",
"(",
"buffer",
",",
"pos",
")",
"&",
"0x80",
":",
"pos",
"+=",
"1",
"pos",
"+=",
"1",
"return",
"(",
"buffer",
"[",
"start",
":",
"pos",
"]",
",",
"pos",
")"
] | Read a tag from the buffer, and return a (tag_bytes, new_pos) tuple.
We return the raw bytes of the tag rather than decoding them. The raw
bytes can then be used to look up the proper decoder. This effectively allows
us to trade some work that would be done in pure-python (decoding a varint)
for work that is done in C (searching for a byte string in a hash table).
In a low-level language it would be much cheaper to decode the varint and
use that, but not in Python. | [
"Read",
"a",
"tag",
"from",
"the",
"buffer",
"and",
"return",
"a",
"(",
"tag_bytes",
"new_pos",
")",
"tuple",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L169-L184 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | _SimpleDecoder | def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder | python | def _SimpleDecoder(wire_type, decode_value):
"""Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint()
"""
def SpecificDecoder(field_number, is_repeated, is_packed, key, new_default):
if is_packed:
local_DecodeVarint = _DecodeVarint
def DecodePackedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
(endpoint, pos) = local_DecodeVarint(buffer, pos)
endpoint += pos
if endpoint > end:
raise _DecodeError('Truncated message.')
while pos < endpoint:
(element, pos) = decode_value(buffer, pos)
value.append(element)
if pos > endpoint:
del value[-1] # Discard corrupt value.
raise _DecodeError('Packed element was truncated.')
return pos
return DecodePackedField
elif is_repeated:
tag_bytes = encoder.TagBytes(field_number, wire_type)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(element, new_pos) = decode_value(buffer, pos)
value.append(element)
# Predict that the next tag is another copy of the same repeated
# field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos >= end:
# Prediction failed. Return.
if new_pos > end:
raise _DecodeError('Truncated message.')
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(field_dict[key], pos) = decode_value(buffer, pos)
if pos > end:
del field_dict[key] # Discard corrupt value.
raise _DecodeError('Truncated message.')
return pos
return DecodeField
return SpecificDecoder | [
"def",
"_SimpleDecoder",
"(",
"wire_type",
",",
"decode_value",
")",
":",
"def",
"SpecificDecoder",
"(",
"field_number",
",",
"is_repeated",
",",
"is_packed",
",",
"key",
",",
"new_default",
")",
":",
"if",
"is_packed",
":",
"local_DecodeVarint",
"=",
"_DecodeVarint",
"def",
"DecodePackedField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"value",
"=",
"field_dict",
".",
"get",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"field_dict",
".",
"setdefault",
"(",
"key",
",",
"new_default",
"(",
"message",
")",
")",
"(",
"endpoint",
",",
"pos",
")",
"=",
"local_DecodeVarint",
"(",
"buffer",
",",
"pos",
")",
"endpoint",
"+=",
"pos",
"if",
"endpoint",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated message.'",
")",
"while",
"pos",
"<",
"endpoint",
":",
"(",
"element",
",",
"pos",
")",
"=",
"decode_value",
"(",
"buffer",
",",
"pos",
")",
"value",
".",
"append",
"(",
"element",
")",
"if",
"pos",
">",
"endpoint",
":",
"del",
"value",
"[",
"-",
"1",
"]",
"# Discard corrupt value.",
"raise",
"_DecodeError",
"(",
"'Packed element was truncated.'",
")",
"return",
"pos",
"return",
"DecodePackedField",
"elif",
"is_repeated",
":",
"tag_bytes",
"=",
"encoder",
".",
"TagBytes",
"(",
"field_number",
",",
"wire_type",
")",
"tag_len",
"=",
"len",
"(",
"tag_bytes",
")",
"def",
"DecodeRepeatedField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"value",
"=",
"field_dict",
".",
"get",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"field_dict",
".",
"setdefault",
"(",
"key",
",",
"new_default",
"(",
"message",
")",
")",
"while",
"1",
":",
"(",
"element",
",",
"new_pos",
")",
"=",
"decode_value",
"(",
"buffer",
",",
"pos",
")",
"value",
".",
"append",
"(",
"element",
")",
"# Predict that the next tag is another copy of the same repeated",
"# field.",
"pos",
"=",
"new_pos",
"+",
"tag_len",
"if",
"buffer",
"[",
"new_pos",
":",
"pos",
"]",
"!=",
"tag_bytes",
"or",
"new_pos",
">=",
"end",
":",
"# Prediction failed. Return.",
"if",
"new_pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated message.'",
")",
"return",
"new_pos",
"return",
"DecodeRepeatedField",
"else",
":",
"def",
"DecodeField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"(",
"field_dict",
"[",
"key",
"]",
",",
"pos",
")",
"=",
"decode_value",
"(",
"buffer",
",",
"pos",
")",
"if",
"pos",
">",
"end",
":",
"del",
"field_dict",
"[",
"key",
"]",
"# Discard corrupt value.",
"raise",
"_DecodeError",
"(",
"'Truncated message.'",
")",
"return",
"pos",
"return",
"DecodeField",
"return",
"SpecificDecoder"
] | Return a constructor for a decoder for fields of a particular type.
Args:
wire_type: The field's wire type.
decode_value: A function which decodes an individual value, e.g.
_DecodeVarint() | [
"Return",
"a",
"constructor",
"for",
"a",
"decoder",
"for",
"fields",
"of",
"a",
"particular",
"type",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L190-L246 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | _ModifiedDecoder | def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode) | python | def _ModifiedDecoder(wire_type, decode_value, modify_value):
"""Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode.
"""
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
def InnerDecode(buffer, pos):
(result, new_pos) = decode_value(buffer, pos)
return (modify_value(result), new_pos)
return _SimpleDecoder(wire_type, InnerDecode) | [
"def",
"_ModifiedDecoder",
"(",
"wire_type",
",",
"decode_value",
",",
"modify_value",
")",
":",
"# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but",
"# not enough to make a significant difference.",
"def",
"InnerDecode",
"(",
"buffer",
",",
"pos",
")",
":",
"(",
"result",
",",
"new_pos",
")",
"=",
"decode_value",
"(",
"buffer",
",",
"pos",
")",
"return",
"(",
"modify_value",
"(",
"result",
")",
",",
"new_pos",
")",
"return",
"_SimpleDecoder",
"(",
"wire_type",
",",
"InnerDecode",
")"
] | Like SimpleDecoder but additionally invokes modify_value on every value
before storing it. Usually modify_value is ZigZagDecode. | [
"Like",
"SimpleDecoder",
"but",
"additionally",
"invokes",
"modify_value",
"on",
"every",
"value",
"before",
"storing",
"it",
".",
"Usually",
"modify_value",
"is",
"ZigZagDecode",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L249-L260 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | _StructPackDecoder | def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode) | python | def _StructPackDecoder(wire_type, format):
"""Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack().
"""
value_size = struct.calcsize(format)
local_unpack = struct.unpack
# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but
# not enough to make a significant difference.
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
def InnerDecode(buffer, pos):
new_pos = pos + value_size
result = local_unpack(format, buffer[pos:new_pos])[0]
return (result, new_pos)
return _SimpleDecoder(wire_type, InnerDecode) | [
"def",
"_StructPackDecoder",
"(",
"wire_type",
",",
"format",
")",
":",
"value_size",
"=",
"struct",
".",
"calcsize",
"(",
"format",
")",
"local_unpack",
"=",
"struct",
".",
"unpack",
"# Reusing _SimpleDecoder is slightly slower than copying a bunch of code, but",
"# not enough to make a significant difference.",
"# Note that we expect someone up-stack to catch struct.error and convert",
"# it to _DecodeError -- this way we don't have to set up exception-",
"# handling blocks every time we parse one value.",
"def",
"InnerDecode",
"(",
"buffer",
",",
"pos",
")",
":",
"new_pos",
"=",
"pos",
"+",
"value_size",
"result",
"=",
"local_unpack",
"(",
"format",
",",
"buffer",
"[",
"pos",
":",
"new_pos",
"]",
")",
"[",
"0",
"]",
"return",
"(",
"result",
",",
"new_pos",
")",
"return",
"_SimpleDecoder",
"(",
"wire_type",
",",
"InnerDecode",
")"
] | Return a constructor for a decoder for a fixed-width field.
Args:
wire_type: The field's wire type.
format: The format string to pass to struct.unpack(). | [
"Return",
"a",
"constructor",
"for",
"a",
"decoder",
"for",
"a",
"fixed",
"-",
"width",
"field",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L263-L285 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | _FloatDecoder | def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'):
# If at least one significand bit is set...
if float_bytes[0:3] != b'\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3:4] == b'\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode) | python | def _FloatDecoder():
"""Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.
new_pos = pos + 4
float_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set, then it's non-finite.
# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.
# To avoid that, we parse it specially.
if (float_bytes[3:4] in b'\x7F\xFF' and float_bytes[2:3] >= b'\x80'):
# If at least one significand bit is set...
if float_bytes[0:3] != b'\x00\x00\x80':
return (_NAN, new_pos)
# If sign bit is set...
if float_bytes[3:4] == b'\xFF':
return (_NEG_INF, new_pos)
return (_POS_INF, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<f', float_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED32, InnerDecode) | [
"def",
"_FloatDecoder",
"(",
")",
":",
"local_unpack",
"=",
"struct",
".",
"unpack",
"def",
"InnerDecode",
"(",
"buffer",
",",
"pos",
")",
":",
"# We expect a 32-bit value in little-endian byte order. Bit 1 is the sign",
"# bit, bits 2-9 represent the exponent, and bits 10-32 are the significand.",
"new_pos",
"=",
"pos",
"+",
"4",
"float_bytes",
"=",
"buffer",
"[",
"pos",
":",
"new_pos",
"]",
"# If this value has all its exponent bits set, then it's non-finite.",
"# In Python 2.4, struct.unpack will convert it to a finite 64-bit value.",
"# To avoid that, we parse it specially.",
"if",
"(",
"float_bytes",
"[",
"3",
":",
"4",
"]",
"in",
"b'\\x7F\\xFF'",
"and",
"float_bytes",
"[",
"2",
":",
"3",
"]",
">=",
"b'\\x80'",
")",
":",
"# If at least one significand bit is set...",
"if",
"float_bytes",
"[",
"0",
":",
"3",
"]",
"!=",
"b'\\x00\\x00\\x80'",
":",
"return",
"(",
"_NAN",
",",
"new_pos",
")",
"# If sign bit is set...",
"if",
"float_bytes",
"[",
"3",
":",
"4",
"]",
"==",
"b'\\xFF'",
":",
"return",
"(",
"_NEG_INF",
",",
"new_pos",
")",
"return",
"(",
"_POS_INF",
",",
"new_pos",
")",
"# Note that we expect someone up-stack to catch struct.error and convert",
"# it to _DecodeError -- this way we don't have to set up exception-",
"# handling blocks every time we parse one value.",
"result",
"=",
"local_unpack",
"(",
"'<f'",
",",
"float_bytes",
")",
"[",
"0",
"]",
"return",
"(",
"result",
",",
"new_pos",
")",
"return",
"_SimpleDecoder",
"(",
"wire_format",
".",
"WIRETYPE_FIXED32",
",",
"InnerDecode",
")"
] | Returns a decoder for a float field.
This code works around a bug in struct.unpack for non-finite 32-bit
floating-point values. | [
"Returns",
"a",
"decoder",
"for",
"a",
"float",
"field",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L288-L320 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | _DoubleDecoder | def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7:8] in b'\x7F\xFF')
and (double_bytes[6:7] >= b'\xF0')
and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode) | python | def _DoubleDecoder():
"""Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number.
"""
local_unpack = struct.unpack
def InnerDecode(buffer, pos):
# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign
# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.
new_pos = pos + 8
double_bytes = buffer[pos:new_pos]
# If this value has all its exponent bits set and at least one significand
# bit set, it's not a number. In Python 2.4, struct.unpack will treat it
# as inf or -inf. To avoid that, we treat it specially.
if ((double_bytes[7:8] in b'\x7F\xFF')
and (double_bytes[6:7] >= b'\xF0')
and (double_bytes[0:7] != b'\x00\x00\x00\x00\x00\x00\xF0')):
return (_NAN, new_pos)
# Note that we expect someone up-stack to catch struct.error and convert
# it to _DecodeError -- this way we don't have to set up exception-
# handling blocks every time we parse one value.
result = local_unpack('<d', double_bytes)[0]
return (result, new_pos)
return _SimpleDecoder(wire_format.WIRETYPE_FIXED64, InnerDecode) | [
"def",
"_DoubleDecoder",
"(",
")",
":",
"local_unpack",
"=",
"struct",
".",
"unpack",
"def",
"InnerDecode",
"(",
"buffer",
",",
"pos",
")",
":",
"# We expect a 64-bit value in little-endian byte order. Bit 1 is the sign",
"# bit, bits 2-12 represent the exponent, and bits 13-64 are the significand.",
"new_pos",
"=",
"pos",
"+",
"8",
"double_bytes",
"=",
"buffer",
"[",
"pos",
":",
"new_pos",
"]",
"# If this value has all its exponent bits set and at least one significand",
"# bit set, it's not a number. In Python 2.4, struct.unpack will treat it",
"# as inf or -inf. To avoid that, we treat it specially.",
"if",
"(",
"(",
"double_bytes",
"[",
"7",
":",
"8",
"]",
"in",
"b'\\x7F\\xFF'",
")",
"and",
"(",
"double_bytes",
"[",
"6",
":",
"7",
"]",
">=",
"b'\\xF0'",
")",
"and",
"(",
"double_bytes",
"[",
"0",
":",
"7",
"]",
"!=",
"b'\\x00\\x00\\x00\\x00\\x00\\x00\\xF0'",
")",
")",
":",
"return",
"(",
"_NAN",
",",
"new_pos",
")",
"# Note that we expect someone up-stack to catch struct.error and convert",
"# it to _DecodeError -- this way we don't have to set up exception-",
"# handling blocks every time we parse one value.",
"result",
"=",
"local_unpack",
"(",
"'<d'",
",",
"double_bytes",
")",
"[",
"0",
"]",
"return",
"(",
"result",
",",
"new_pos",
")",
"return",
"_SimpleDecoder",
"(",
"wire_format",
".",
"WIRETYPE_FIXED64",
",",
"InnerDecode",
")"
] | Returns a decoder for a double field.
This code works around a bug in struct.unpack for not-a-number. | [
"Returns",
"a",
"decoder",
"for",
"a",
"double",
"field",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L323-L350 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | StringDecoder | def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = six.text_type
def _ConvertToUnicode(byte_str):
try:
return local_unicode(byte_str, 'utf-8')
except UnicodeDecodeError as e:
# add more information to the error message and re-raise it.
e.reason = '%s in field: %s' % (e, key.full_name)
raise
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(_ConvertToUnicode(buffer[pos:new_pos]))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos])
return new_pos
return DecodeField | python | def StringDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a string field."""
local_DecodeVarint = _DecodeVarint
local_unicode = six.text_type
def _ConvertToUnicode(byte_str):
try:
return local_unicode(byte_str, 'utf-8')
except UnicodeDecodeError as e:
# add more information to the error message and re-raise it.
e.reason = '%s in field: %s' % (e, key.full_name)
raise
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(_ConvertToUnicode(buffer[pos:new_pos]))
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = _ConvertToUnicode(buffer[pos:new_pos])
return new_pos
return DecodeField | [
"def",
"StringDecoder",
"(",
"field_number",
",",
"is_repeated",
",",
"is_packed",
",",
"key",
",",
"new_default",
")",
":",
"local_DecodeVarint",
"=",
"_DecodeVarint",
"local_unicode",
"=",
"six",
".",
"text_type",
"def",
"_ConvertToUnicode",
"(",
"byte_str",
")",
":",
"try",
":",
"return",
"local_unicode",
"(",
"byte_str",
",",
"'utf-8'",
")",
"except",
"UnicodeDecodeError",
"as",
"e",
":",
"# add more information to the error message and re-raise it.",
"e",
".",
"reason",
"=",
"'%s in field: %s'",
"%",
"(",
"e",
",",
"key",
".",
"full_name",
")",
"raise",
"assert",
"not",
"is_packed",
"if",
"is_repeated",
":",
"tag_bytes",
"=",
"encoder",
".",
"TagBytes",
"(",
"field_number",
",",
"wire_format",
".",
"WIRETYPE_LENGTH_DELIMITED",
")",
"tag_len",
"=",
"len",
"(",
"tag_bytes",
")",
"def",
"DecodeRepeatedField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"value",
"=",
"field_dict",
".",
"get",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"field_dict",
".",
"setdefault",
"(",
"key",
",",
"new_default",
"(",
"message",
")",
")",
"while",
"1",
":",
"(",
"size",
",",
"pos",
")",
"=",
"local_DecodeVarint",
"(",
"buffer",
",",
"pos",
")",
"new_pos",
"=",
"pos",
"+",
"size",
"if",
"new_pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated string.'",
")",
"value",
".",
"append",
"(",
"_ConvertToUnicode",
"(",
"buffer",
"[",
"pos",
":",
"new_pos",
"]",
")",
")",
"# Predict that the next tag is another copy of the same repeated field.",
"pos",
"=",
"new_pos",
"+",
"tag_len",
"if",
"buffer",
"[",
"new_pos",
":",
"pos",
"]",
"!=",
"tag_bytes",
"or",
"new_pos",
"==",
"end",
":",
"# Prediction failed. Return.",
"return",
"new_pos",
"return",
"DecodeRepeatedField",
"else",
":",
"def",
"DecodeField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"(",
"size",
",",
"pos",
")",
"=",
"local_DecodeVarint",
"(",
"buffer",
",",
"pos",
")",
"new_pos",
"=",
"pos",
"+",
"size",
"if",
"new_pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated string.'",
")",
"field_dict",
"[",
"key",
"]",
"=",
"_ConvertToUnicode",
"(",
"buffer",
"[",
"pos",
":",
"new_pos",
"]",
")",
"return",
"new_pos",
"return",
"DecodeField"
] | Returns a decoder for a string field. | [
"Returns",
"a",
"decoder",
"for",
"a",
"string",
"field",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L461-L504 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | BytesDecoder | def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField | python | def BytesDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a bytes field."""
local_DecodeVarint = _DecodeVarint
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
value.append(buffer[pos:new_pos])
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated string.')
field_dict[key] = buffer[pos:new_pos]
return new_pos
return DecodeField | [
"def",
"BytesDecoder",
"(",
"field_number",
",",
"is_repeated",
",",
"is_packed",
",",
"key",
",",
"new_default",
")",
":",
"local_DecodeVarint",
"=",
"_DecodeVarint",
"assert",
"not",
"is_packed",
"if",
"is_repeated",
":",
"tag_bytes",
"=",
"encoder",
".",
"TagBytes",
"(",
"field_number",
",",
"wire_format",
".",
"WIRETYPE_LENGTH_DELIMITED",
")",
"tag_len",
"=",
"len",
"(",
"tag_bytes",
")",
"def",
"DecodeRepeatedField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"value",
"=",
"field_dict",
".",
"get",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"field_dict",
".",
"setdefault",
"(",
"key",
",",
"new_default",
"(",
"message",
")",
")",
"while",
"1",
":",
"(",
"size",
",",
"pos",
")",
"=",
"local_DecodeVarint",
"(",
"buffer",
",",
"pos",
")",
"new_pos",
"=",
"pos",
"+",
"size",
"if",
"new_pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated string.'",
")",
"value",
".",
"append",
"(",
"buffer",
"[",
"pos",
":",
"new_pos",
"]",
")",
"# Predict that the next tag is another copy of the same repeated field.",
"pos",
"=",
"new_pos",
"+",
"tag_len",
"if",
"buffer",
"[",
"new_pos",
":",
"pos",
"]",
"!=",
"tag_bytes",
"or",
"new_pos",
"==",
"end",
":",
"# Prediction failed. Return.",
"return",
"new_pos",
"return",
"DecodeRepeatedField",
"else",
":",
"def",
"DecodeField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"(",
"size",
",",
"pos",
")",
"=",
"local_DecodeVarint",
"(",
"buffer",
",",
"pos",
")",
"new_pos",
"=",
"pos",
"+",
"size",
"if",
"new_pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated string.'",
")",
"field_dict",
"[",
"key",
"]",
"=",
"buffer",
"[",
"pos",
":",
"new_pos",
"]",
"return",
"new_pos",
"return",
"DecodeField"
] | Returns a decoder for a bytes field. | [
"Returns",
"a",
"decoder",
"for",
"a",
"bytes",
"field",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L507-L541 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | GroupDecoder | def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField | python | def GroupDecoder(field_number, is_repeated, is_packed, key, new_default):
"""Returns a decoder for a group field."""
end_tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_END_GROUP)
end_tag_len = len(end_tag_bytes)
assert not is_packed
if is_repeated:
tag_bytes = encoder.TagBytes(field_number,
wire_format.WIRETYPE_START_GROUP)
tag_len = len(tag_bytes)
def DecodeRepeatedField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value.add()._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeRepeatedField
else:
def DecodeField(buffer, pos, end, message, field_dict):
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
# Read sub-message.
pos = value._InternalParse(buffer, pos, end)
# Read end tag.
new_pos = pos+end_tag_len
if buffer[pos:new_pos] != end_tag_bytes or new_pos > end:
raise _DecodeError('Missing group end tag.')
return new_pos
return DecodeField | [
"def",
"GroupDecoder",
"(",
"field_number",
",",
"is_repeated",
",",
"is_packed",
",",
"key",
",",
"new_default",
")",
":",
"end_tag_bytes",
"=",
"encoder",
".",
"TagBytes",
"(",
"field_number",
",",
"wire_format",
".",
"WIRETYPE_END_GROUP",
")",
"end_tag_len",
"=",
"len",
"(",
"end_tag_bytes",
")",
"assert",
"not",
"is_packed",
"if",
"is_repeated",
":",
"tag_bytes",
"=",
"encoder",
".",
"TagBytes",
"(",
"field_number",
",",
"wire_format",
".",
"WIRETYPE_START_GROUP",
")",
"tag_len",
"=",
"len",
"(",
"tag_bytes",
")",
"def",
"DecodeRepeatedField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"value",
"=",
"field_dict",
".",
"get",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"field_dict",
".",
"setdefault",
"(",
"key",
",",
"new_default",
"(",
"message",
")",
")",
"while",
"1",
":",
"value",
"=",
"field_dict",
".",
"get",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"field_dict",
".",
"setdefault",
"(",
"key",
",",
"new_default",
"(",
"message",
")",
")",
"# Read sub-message.",
"pos",
"=",
"value",
".",
"add",
"(",
")",
".",
"_InternalParse",
"(",
"buffer",
",",
"pos",
",",
"end",
")",
"# Read end tag.",
"new_pos",
"=",
"pos",
"+",
"end_tag_len",
"if",
"buffer",
"[",
"pos",
":",
"new_pos",
"]",
"!=",
"end_tag_bytes",
"or",
"new_pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Missing group end tag.'",
")",
"# Predict that the next tag is another copy of the same repeated field.",
"pos",
"=",
"new_pos",
"+",
"tag_len",
"if",
"buffer",
"[",
"new_pos",
":",
"pos",
"]",
"!=",
"tag_bytes",
"or",
"new_pos",
"==",
"end",
":",
"# Prediction failed. Return.",
"return",
"new_pos",
"return",
"DecodeRepeatedField",
"else",
":",
"def",
"DecodeField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"value",
"=",
"field_dict",
".",
"get",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"field_dict",
".",
"setdefault",
"(",
"key",
",",
"new_default",
"(",
"message",
")",
")",
"# Read sub-message.",
"pos",
"=",
"value",
".",
"_InternalParse",
"(",
"buffer",
",",
"pos",
",",
"end",
")",
"# Read end tag.",
"new_pos",
"=",
"pos",
"+",
"end_tag_len",
"if",
"buffer",
"[",
"pos",
":",
"new_pos",
"]",
"!=",
"end_tag_bytes",
"or",
"new_pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Missing group end tag.'",
")",
"return",
"new_pos",
"return",
"DecodeField"
] | Returns a decoder for a group field. | [
"Returns",
"a",
"decoder",
"for",
"a",
"group",
"field",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L544-L588 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | MapDecoder | def MapDecoder(field_descriptor, new_default, is_message_map):
"""Returns a decoder for a map field."""
key = field_descriptor
tag_bytes = encoder.TagBytes(field_descriptor.number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
local_DecodeVarint = _DecodeVarint
# Can't read _concrete_class yet; might not be initialized.
message_type = field_descriptor.message_type
def DecodeMap(buffer, pos, end, message, field_dict):
submsg = message_type._concrete_class()
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
submsg.Clear()
if submsg._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
if is_message_map:
value[submsg.key].MergeFrom(submsg.value)
else:
value[submsg.key] = submsg.value
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeMap | python | def MapDecoder(field_descriptor, new_default, is_message_map):
"""Returns a decoder for a map field."""
key = field_descriptor
tag_bytes = encoder.TagBytes(field_descriptor.number,
wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
local_DecodeVarint = _DecodeVarint
# Can't read _concrete_class yet; might not be initialized.
message_type = field_descriptor.message_type
def DecodeMap(buffer, pos, end, message, field_dict):
submsg = message_type._concrete_class()
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
# Read length.
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise _DecodeError('Truncated message.')
# Read sub-message.
submsg.Clear()
if submsg._InternalParse(buffer, pos, new_pos) != new_pos:
# The only reason _InternalParse would return early is if it
# encountered an end-group tag.
raise _DecodeError('Unexpected end-group tag.')
if is_message_map:
value[submsg.key].MergeFrom(submsg.value)
else:
value[submsg.key] = submsg.value
# Predict that the next tag is another copy of the same repeated field.
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
# Prediction failed. Return.
return new_pos
return DecodeMap | [
"def",
"MapDecoder",
"(",
"field_descriptor",
",",
"new_default",
",",
"is_message_map",
")",
":",
"key",
"=",
"field_descriptor",
"tag_bytes",
"=",
"encoder",
".",
"TagBytes",
"(",
"field_descriptor",
".",
"number",
",",
"wire_format",
".",
"WIRETYPE_LENGTH_DELIMITED",
")",
"tag_len",
"=",
"len",
"(",
"tag_bytes",
")",
"local_DecodeVarint",
"=",
"_DecodeVarint",
"# Can't read _concrete_class yet; might not be initialized.",
"message_type",
"=",
"field_descriptor",
".",
"message_type",
"def",
"DecodeMap",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"message",
",",
"field_dict",
")",
":",
"submsg",
"=",
"message_type",
".",
"_concrete_class",
"(",
")",
"value",
"=",
"field_dict",
".",
"get",
"(",
"key",
")",
"if",
"value",
"is",
"None",
":",
"value",
"=",
"field_dict",
".",
"setdefault",
"(",
"key",
",",
"new_default",
"(",
"message",
")",
")",
"while",
"1",
":",
"# Read length.",
"(",
"size",
",",
"pos",
")",
"=",
"local_DecodeVarint",
"(",
"buffer",
",",
"pos",
")",
"new_pos",
"=",
"pos",
"+",
"size",
"if",
"new_pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated message.'",
")",
"# Read sub-message.",
"submsg",
".",
"Clear",
"(",
")",
"if",
"submsg",
".",
"_InternalParse",
"(",
"buffer",
",",
"pos",
",",
"new_pos",
")",
"!=",
"new_pos",
":",
"# The only reason _InternalParse would return early is if it",
"# encountered an end-group tag.",
"raise",
"_DecodeError",
"(",
"'Unexpected end-group tag.'",
")",
"if",
"is_message_map",
":",
"value",
"[",
"submsg",
".",
"key",
"]",
".",
"MergeFrom",
"(",
"submsg",
".",
"value",
")",
"else",
":",
"value",
"[",
"submsg",
".",
"key",
"]",
"=",
"submsg",
".",
"value",
"# Predict that the next tag is another copy of the same repeated field.",
"pos",
"=",
"new_pos",
"+",
"tag_len",
"if",
"buffer",
"[",
"new_pos",
":",
"pos",
"]",
"!=",
"tag_bytes",
"or",
"new_pos",
"==",
"end",
":",
"# Prediction failed. Return.",
"return",
"new_pos",
"return",
"DecodeMap"
] | Returns a decoder for a map field. | [
"Returns",
"a",
"decoder",
"for",
"a",
"map",
"field",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L719-L759 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | _SkipVarint | def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
# Previously ord(buffer[pos]) raised IndexError when pos is out of range.
# With this code, ord(b'') raises TypeError. Both are handled in
# python_message.py to generate a 'Truncated message' error.
while ord(buffer[pos:pos+1]) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos | python | def _SkipVarint(buffer, pos, end):
"""Skip a varint value. Returns the new position."""
# Previously ord(buffer[pos]) raised IndexError when pos is out of range.
# With this code, ord(b'') raises TypeError. Both are handled in
# python_message.py to generate a 'Truncated message' error.
while ord(buffer[pos:pos+1]) & 0x80:
pos += 1
pos += 1
if pos > end:
raise _DecodeError('Truncated message.')
return pos | [
"def",
"_SkipVarint",
"(",
"buffer",
",",
"pos",
",",
"end",
")",
":",
"# Previously ord(buffer[pos]) raised IndexError when pos is out of range.",
"# With this code, ord(b'') raises TypeError. Both are handled in",
"# python_message.py to generate a 'Truncated message' error.",
"while",
"ord",
"(",
"buffer",
"[",
"pos",
":",
"pos",
"+",
"1",
"]",
")",
"&",
"0x80",
":",
"pos",
"+=",
"1",
"pos",
"+=",
"1",
"if",
"pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated message.'",
")",
"return",
"pos"
] | Skip a varint value. Returns the new position. | [
"Skip",
"a",
"varint",
"value",
".",
"Returns",
"the",
"new",
"position",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L765-L775 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | _SkipLengthDelimited | def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos | python | def _SkipLengthDelimited(buffer, pos, end):
"""Skip a length-delimited value. Returns the new position."""
(size, pos) = _DecodeVarint(buffer, pos)
pos += size
if pos > end:
raise _DecodeError('Truncated message.')
return pos | [
"def",
"_SkipLengthDelimited",
"(",
"buffer",
",",
"pos",
",",
"end",
")",
":",
"(",
"size",
",",
"pos",
")",
"=",
"_DecodeVarint",
"(",
"buffer",
",",
"pos",
")",
"pos",
"+=",
"size",
"if",
"pos",
">",
"end",
":",
"raise",
"_DecodeError",
"(",
"'Truncated message.'",
")",
"return",
"pos"
] | Skip a length-delimited value. Returns the new position. | [
"Skip",
"a",
"length",
"-",
"delimited",
"value",
".",
"Returns",
"the",
"new",
"position",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L785-L792 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | _SkipGroup | def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos | python | def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos | [
"def",
"_SkipGroup",
"(",
"buffer",
",",
"pos",
",",
"end",
")",
":",
"while",
"1",
":",
"(",
"tag_bytes",
",",
"pos",
")",
"=",
"ReadTag",
"(",
"buffer",
",",
"pos",
")",
"new_pos",
"=",
"SkipField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"tag_bytes",
")",
"if",
"new_pos",
"==",
"-",
"1",
":",
"return",
"pos",
"pos",
"=",
"new_pos"
] | Skip sub-group. Returns the new position. | [
"Skip",
"sub",
"-",
"group",
".",
"Returns",
"the",
"new",
"position",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L794-L802 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py | _FieldSkipper | def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = ord(tag_bytes[0:1]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField | python | def _FieldSkipper():
"""Constructs the SkipField function."""
WIRETYPE_TO_SKIPPER = [
_SkipVarint,
_SkipFixed64,
_SkipLengthDelimited,
_SkipGroup,
_EndGroup,
_SkipFixed32,
_RaiseInvalidWireType,
_RaiseInvalidWireType,
]
wiretype_mask = wire_format.TAG_TYPE_MASK
def SkipField(buffer, pos, end, tag_bytes):
"""Skips a field with the specified tag.
|pos| should point to the byte immediately after the tag.
Returns:
The new position (after the tag value), or -1 if the tag is an end-group
tag (in which case the calling loop should break).
"""
# The wire type is always in the first byte since varints are little-endian.
wire_type = ord(tag_bytes[0:1]) & wiretype_mask
return WIRETYPE_TO_SKIPPER[wire_type](buffer, pos, end)
return SkipField | [
"def",
"_FieldSkipper",
"(",
")",
":",
"WIRETYPE_TO_SKIPPER",
"=",
"[",
"_SkipVarint",
",",
"_SkipFixed64",
",",
"_SkipLengthDelimited",
",",
"_SkipGroup",
",",
"_EndGroup",
",",
"_SkipFixed32",
",",
"_RaiseInvalidWireType",
",",
"_RaiseInvalidWireType",
",",
"]",
"wiretype_mask",
"=",
"wire_format",
".",
"TAG_TYPE_MASK",
"def",
"SkipField",
"(",
"buffer",
",",
"pos",
",",
"end",
",",
"tag_bytes",
")",
":",
"\"\"\"Skips a field with the specified tag.\n\n |pos| should point to the byte immediately after the tag.\n\n Returns:\n The new position (after the tag value), or -1 if the tag is an end-group\n tag (in which case the calling loop should break).\n \"\"\"",
"# The wire type is always in the first byte since varints are little-endian.",
"wire_type",
"=",
"ord",
"(",
"tag_bytes",
"[",
"0",
":",
"1",
"]",
")",
"&",
"wiretype_mask",
"return",
"WIRETYPE_TO_SKIPPER",
"[",
"wire_type",
"]",
"(",
"buffer",
",",
"pos",
",",
"end",
")",
"return",
"SkipField"
] | Constructs the SkipField function. | [
"Constructs",
"the",
"SkipField",
"function",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/decoder.py#L822-L852 | train |
apple/turicreate | src/external/xgboost/python-package/xgboost/plotting.py | _parse_node | def _parse_node(graph, text):
"""parse dumped node"""
match = _NODEPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), shape='circle')
return node
match = _LEAFPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), shape='box')
return node
raise ValueError('Unable to parse node: {0}'.format(text)) | python | def _parse_node(graph, text):
"""parse dumped node"""
match = _NODEPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), shape='circle')
return node
match = _LEAFPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), shape='box')
return node
raise ValueError('Unable to parse node: {0}'.format(text)) | [
"def",
"_parse_node",
"(",
"graph",
",",
"text",
")",
":",
"match",
"=",
"_NODEPAT",
".",
"match",
"(",
"text",
")",
"if",
"match",
"is",
"not",
"None",
":",
"node",
"=",
"match",
".",
"group",
"(",
"1",
")",
"graph",
".",
"node",
"(",
"node",
",",
"label",
"=",
"match",
".",
"group",
"(",
"2",
")",
",",
"shape",
"=",
"'circle'",
")",
"return",
"node",
"match",
"=",
"_LEAFPAT",
".",
"match",
"(",
"text",
")",
"if",
"match",
"is",
"not",
"None",
":",
"node",
"=",
"match",
".",
"group",
"(",
"1",
")",
"graph",
".",
"node",
"(",
"node",
",",
"label",
"=",
"match",
".",
"group",
"(",
"2",
")",
",",
"shape",
"=",
"'box'",
")",
"return",
"node",
"raise",
"ValueError",
"(",
"'Unable to parse node: {0}'",
".",
"format",
"(",
"text",
")",
")"
] | parse dumped node | [
"parse",
"dumped",
"node"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/plotting.py#L109-L121 | train |
apple/turicreate | src/external/xgboost/python-package/xgboost/plotting.py | plot_tree | def plot_tree(booster, num_trees=0, rankdir='UT', ax=None, **kwargs):
"""Plot specified tree.
Parameters
----------
booster : Booster, XGBModel
Booster or XGBModel instance
num_trees : int, default 0
Specify the ordinal number of target tree
rankdir : str, default "UT"
Passed to graphiz via graph_attr
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
kwargs :
Other keywords passed to to_graphviz
Returns
-------
ax : matplotlib Axes
"""
try:
import matplotlib.pyplot as plt
import matplotlib.image as image
except ImportError:
raise ImportError('You must install matplotlib to plot tree')
if ax is None:
_, ax = plt.subplots(1, 1)
g = to_graphviz(booster, num_trees=num_trees, rankdir=rankdir, **kwargs)
s = BytesIO()
s.write(g.pipe(format='png'))
s.seek(0)
img = image.imread(s)
ax.imshow(img)
ax.axis('off')
return ax | python | def plot_tree(booster, num_trees=0, rankdir='UT', ax=None, **kwargs):
"""Plot specified tree.
Parameters
----------
booster : Booster, XGBModel
Booster or XGBModel instance
num_trees : int, default 0
Specify the ordinal number of target tree
rankdir : str, default "UT"
Passed to graphiz via graph_attr
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
kwargs :
Other keywords passed to to_graphviz
Returns
-------
ax : matplotlib Axes
"""
try:
import matplotlib.pyplot as plt
import matplotlib.image as image
except ImportError:
raise ImportError('You must install matplotlib to plot tree')
if ax is None:
_, ax = plt.subplots(1, 1)
g = to_graphviz(booster, num_trees=num_trees, rankdir=rankdir, **kwargs)
s = BytesIO()
s.write(g.pipe(format='png'))
s.seek(0)
img = image.imread(s)
ax.imshow(img)
ax.axis('off')
return ax | [
"def",
"plot_tree",
"(",
"booster",
",",
"num_trees",
"=",
"0",
",",
"rankdir",
"=",
"'UT'",
",",
"ax",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"import",
"matplotlib",
".",
"pyplot",
"as",
"plt",
"import",
"matplotlib",
".",
"image",
"as",
"image",
"except",
"ImportError",
":",
"raise",
"ImportError",
"(",
"'You must install matplotlib to plot tree'",
")",
"if",
"ax",
"is",
"None",
":",
"_",
",",
"ax",
"=",
"plt",
".",
"subplots",
"(",
"1",
",",
"1",
")",
"g",
"=",
"to_graphviz",
"(",
"booster",
",",
"num_trees",
"=",
"num_trees",
",",
"rankdir",
"=",
"rankdir",
",",
"*",
"*",
"kwargs",
")",
"s",
"=",
"BytesIO",
"(",
")",
"s",
".",
"write",
"(",
"g",
".",
"pipe",
"(",
"format",
"=",
"'png'",
")",
")",
"s",
".",
"seek",
"(",
"0",
")",
"img",
"=",
"image",
".",
"imread",
"(",
"s",
")",
"ax",
".",
"imshow",
"(",
"img",
")",
"ax",
".",
"axis",
"(",
"'off'",
")",
"return",
"ax"
] | Plot specified tree.
Parameters
----------
booster : Booster, XGBModel
Booster or XGBModel instance
num_trees : int, default 0
Specify the ordinal number of target tree
rankdir : str, default "UT"
Passed to graphiz via graph_attr
ax : matplotlib Axes, default None
Target axes instance. If None, new figure and axes will be created.
kwargs :
Other keywords passed to to_graphviz
Returns
-------
ax : matplotlib Axes | [
"Plot",
"specified",
"tree",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/python-package/xgboost/plotting.py#L206-L246 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/manager.py | Manager.construct | def construct (self, properties = [], targets = []):
""" Constructs the dependency graph.
properties: the build properties.
targets: the targets to consider. If none is specified, uses all.
"""
if not targets:
for name, project in self.projects ().projects ():
targets.append (project.target ())
property_groups = build_request.expand_no_defaults (properties)
virtual_targets = []
build_prop_sets = []
for p in property_groups:
build_prop_sets.append (property_set.create (feature.split (p)))
if not build_prop_sets:
build_prop_sets = [property_set.empty ()]
for build_properties in build_prop_sets:
for target in targets:
result = target.generate (build_properties)
virtual_targets.extend (result.targets ())
actual_targets = []
for virtual_target in virtual_targets:
actual_targets.extend (virtual_target.actualize ()) | python | def construct (self, properties = [], targets = []):
""" Constructs the dependency graph.
properties: the build properties.
targets: the targets to consider. If none is specified, uses all.
"""
if not targets:
for name, project in self.projects ().projects ():
targets.append (project.target ())
property_groups = build_request.expand_no_defaults (properties)
virtual_targets = []
build_prop_sets = []
for p in property_groups:
build_prop_sets.append (property_set.create (feature.split (p)))
if not build_prop_sets:
build_prop_sets = [property_set.empty ()]
for build_properties in build_prop_sets:
for target in targets:
result = target.generate (build_properties)
virtual_targets.extend (result.targets ())
actual_targets = []
for virtual_target in virtual_targets:
actual_targets.extend (virtual_target.actualize ()) | [
"def",
"construct",
"(",
"self",
",",
"properties",
"=",
"[",
"]",
",",
"targets",
"=",
"[",
"]",
")",
":",
"if",
"not",
"targets",
":",
"for",
"name",
",",
"project",
"in",
"self",
".",
"projects",
"(",
")",
".",
"projects",
"(",
")",
":",
"targets",
".",
"append",
"(",
"project",
".",
"target",
"(",
")",
")",
"property_groups",
"=",
"build_request",
".",
"expand_no_defaults",
"(",
"properties",
")",
"virtual_targets",
"=",
"[",
"]",
"build_prop_sets",
"=",
"[",
"]",
"for",
"p",
"in",
"property_groups",
":",
"build_prop_sets",
".",
"append",
"(",
"property_set",
".",
"create",
"(",
"feature",
".",
"split",
"(",
"p",
")",
")",
")",
"if",
"not",
"build_prop_sets",
":",
"build_prop_sets",
"=",
"[",
"property_set",
".",
"empty",
"(",
")",
"]",
"for",
"build_properties",
"in",
"build_prop_sets",
":",
"for",
"target",
"in",
"targets",
":",
"result",
"=",
"target",
".",
"generate",
"(",
"build_properties",
")",
"virtual_targets",
".",
"extend",
"(",
"result",
".",
"targets",
"(",
")",
")",
"actual_targets",
"=",
"[",
"]",
"for",
"virtual_target",
"in",
"virtual_targets",
":",
"actual_targets",
".",
"extend",
"(",
"virtual_target",
".",
"actualize",
"(",
")",
")"
] | Constructs the dependency graph.
properties: the build properties.
targets: the targets to consider. If none is specified, uses all. | [
"Constructs",
"the",
"dependency",
"graph",
".",
"properties",
":",
"the",
"build",
"properties",
".",
"targets",
":",
"the",
"targets",
"to",
"consider",
".",
"If",
"none",
"is",
"specified",
"uses",
"all",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/manager.py#L83-L109 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/decision_tree_classifier.py | DecisionTreeClassifier.evaluate | def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'log_loss' : Log loss
- 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an ROC curve
For more flexibility in calculating evaluation metrics, use the
:class:`~turicreate.evaluation` module.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict, classify
Examples
--------
.. sourcecode:: python
>>> results = model.evaluate(test_data)
>>> results = model.evaluate(test_data, metric='accuracy')
>>> results = model.evaluate(test_data, metric='confusion_matrix')
"""
_raise_error_evaluation_metric_is_valid(metric,
['auto', 'accuracy', 'confusion_matrix', 'roc_curve', 'auc',
'log_loss', 'precision', 'recall', 'f1_score'])
return super(_Classifier, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric) | python | def evaluate(self, dataset, metric='auto', missing_value_action='auto'):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'log_loss' : Log loss
- 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an ROC curve
For more flexibility in calculating evaluation metrics, use the
:class:`~turicreate.evaluation` module.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict, classify
Examples
--------
.. sourcecode:: python
>>> results = model.evaluate(test_data)
>>> results = model.evaluate(test_data, metric='accuracy')
>>> results = model.evaluate(test_data, metric='confusion_matrix')
"""
_raise_error_evaluation_metric_is_valid(metric,
['auto', 'accuracy', 'confusion_matrix', 'roc_curve', 'auc',
'log_loss', 'precision', 'recall', 'f1_score'])
return super(_Classifier, self).evaluate(dataset,
missing_value_action=missing_value_action,
metric=metric) | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"metric",
"=",
"'auto'",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"_raise_error_evaluation_metric_is_valid",
"(",
"metric",
",",
"[",
"'auto'",
",",
"'accuracy'",
",",
"'confusion_matrix'",
",",
"'roc_curve'",
",",
"'auc'",
",",
"'log_loss'",
",",
"'precision'",
",",
"'recall'",
",",
"'f1_score'",
"]",
")",
"return",
"super",
"(",
"_Classifier",
",",
"self",
")",
".",
"evaluate",
"(",
"dataset",
",",
"missing_value_action",
"=",
"missing_value_action",
",",
"metric",
"=",
"metric",
")"
] | Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'log_loss' : Log loss
- 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an ROC curve
For more flexibility in calculating evaluation metrics, use the
:class:`~turicreate.evaluation` module.
missing_value_action : str, optional
Action to perform when missing values are encountered. This can be
one of:
- 'auto': Default to 'impute'
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict, classify
Examples
--------
.. sourcecode:: python
>>> results = model.evaluate(test_data)
>>> results = model.evaluate(test_data, metric='accuracy')
>>> results = model.evaluate(test_data, metric='confusion_matrix') | [
"Evaluate",
"the",
"model",
"by",
"making",
"predictions",
"of",
"target",
"values",
"and",
"comparing",
"these",
"to",
"actual",
"values",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/decision_tree_classifier.py#L143-L208 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/decision_tree_classifier.py | DecisionTreeClassifier.predict | def predict(self, dataset, output_type='class', missing_value_action='auto'):
"""
A flexible and advanced prediction API.
The target column is provided during
:func:`~turicreate.decision_tree.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
output_type : {'probability', 'margin', 'class', 'probability_vector'}, optional.
Form of the predictions which are one of:
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'margin': Margin associated with the prediction (not applicable
for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, evaluate, classify
Examples
--------
>>> m.predict(testdata)
>>> m.predict(testdata, output_type='probability')
>>> m.predict(testdata, output_type='margin')
"""
_check_categorical_option_type('output_type', output_type,
['class', 'margin', 'probability', 'probability_vector'])
return super(_Classifier, self).predict(dataset,
output_type=output_type,
missing_value_action=missing_value_action) | python | def predict(self, dataset, output_type='class', missing_value_action='auto'):
"""
A flexible and advanced prediction API.
The target column is provided during
:func:`~turicreate.decision_tree.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
output_type : {'probability', 'margin', 'class', 'probability_vector'}, optional.
Form of the predictions which are one of:
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'margin': Margin associated with the prediction (not applicable
for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, evaluate, classify
Examples
--------
>>> m.predict(testdata)
>>> m.predict(testdata, output_type='probability')
>>> m.predict(testdata, output_type='margin')
"""
_check_categorical_option_type('output_type', output_type,
['class', 'margin', 'probability', 'probability_vector'])
return super(_Classifier, self).predict(dataset,
output_type=output_type,
missing_value_action=missing_value_action) | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"output_type",
"=",
"'class'",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"_check_categorical_option_type",
"(",
"'output_type'",
",",
"output_type",
",",
"[",
"'class'",
",",
"'margin'",
",",
"'probability'",
",",
"'probability_vector'",
"]",
")",
"return",
"super",
"(",
"_Classifier",
",",
"self",
")",
".",
"predict",
"(",
"dataset",
",",
"output_type",
"=",
"output_type",
",",
"missing_value_action",
"=",
"missing_value_action",
")"
] | A flexible and advanced prediction API.
The target column is provided during
:func:`~turicreate.decision_tree.create`. If the target column is in the
`dataset` it will be ignored.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
output_type : {'probability', 'margin', 'class', 'probability_vector'}, optional.
Form of the predictions which are one of:
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'margin': Margin associated with the prediction (not applicable
for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SArray
Predicted target value for each example (i.e. row) in the dataset.
See Also
----------
create, evaluate, classify
Examples
--------
>>> m.predict(testdata)
>>> m.predict(testdata, output_type='probability')
>>> m.predict(testdata, output_type='margin') | [
"A",
"flexible",
"and",
"advanced",
"prediction",
"API",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/decision_tree_classifier.py#L210-L271 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/decision_tree_classifier.py | DecisionTreeClassifier.predict_topk | def predict_topk(self, dataset, output_type="probability", k=3, missing_value_action='auto'):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability`, `margin`, or `rank`, depending on the ``output_type``
parameter. Input dataset size must be the same as for training of the model.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
output_type : {'probability', 'rank', 'margin'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
- `margin` : Margin associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+--------+-------+-------------------+
| id | class | probability |
+--------+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| ... | ... | ... |
+--------+-------+-------------------+
[35688 rows x 3 columns]
"""
_check_categorical_option_type('output_type', output_type, ['rank', 'margin', 'probability'])
if missing_value_action == 'auto':
missing_value_action = _sl.select_default_missing_value_policy(self, 'predict')
# Low latency path
if isinstance(dataset, list):
return self.__proxy__.fast_predict_topk(
dataset, missing_value_action, output_type, k)
if isinstance(dataset, dict):
return self.__proxy__.fast_predict_topk(
[dataset], missing_value_action, output_type, k)
# Fast path
_raise_error_if_not_sframe(dataset, "dataset")
return self.__proxy__.predict_topk(
dataset, missing_value_action, output_type, k) | python | def predict_topk(self, dataset, output_type="probability", k=3, missing_value_action='auto'):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability`, `margin`, or `rank`, depending on the ``output_type``
parameter. Input dataset size must be the same as for training of the model.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
output_type : {'probability', 'rank', 'margin'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
- `margin` : Margin associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+--------+-------+-------------------+
| id | class | probability |
+--------+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| ... | ... | ... |
+--------+-------+-------------------+
[35688 rows x 3 columns]
"""
_check_categorical_option_type('output_type', output_type, ['rank', 'margin', 'probability'])
if missing_value_action == 'auto':
missing_value_action = _sl.select_default_missing_value_policy(self, 'predict')
# Low latency path
if isinstance(dataset, list):
return self.__proxy__.fast_predict_topk(
dataset, missing_value_action, output_type, k)
if isinstance(dataset, dict):
return self.__proxy__.fast_predict_topk(
[dataset], missing_value_action, output_type, k)
# Fast path
_raise_error_if_not_sframe(dataset, "dataset")
return self.__proxy__.predict_topk(
dataset, missing_value_action, output_type, k) | [
"def",
"predict_topk",
"(",
"self",
",",
"dataset",
",",
"output_type",
"=",
"\"probability\"",
",",
"k",
"=",
"3",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"_check_categorical_option_type",
"(",
"'output_type'",
",",
"output_type",
",",
"[",
"'rank'",
",",
"'margin'",
",",
"'probability'",
"]",
")",
"if",
"missing_value_action",
"==",
"'auto'",
":",
"missing_value_action",
"=",
"_sl",
".",
"select_default_missing_value_policy",
"(",
"self",
",",
"'predict'",
")",
"# Low latency path",
"if",
"isinstance",
"(",
"dataset",
",",
"list",
")",
":",
"return",
"self",
".",
"__proxy__",
".",
"fast_predict_topk",
"(",
"dataset",
",",
"missing_value_action",
",",
"output_type",
",",
"k",
")",
"if",
"isinstance",
"(",
"dataset",
",",
"dict",
")",
":",
"return",
"self",
".",
"__proxy__",
".",
"fast_predict_topk",
"(",
"[",
"dataset",
"]",
",",
"missing_value_action",
",",
"output_type",
",",
"k",
")",
"# Fast path",
"_raise_error_if_not_sframe",
"(",
"dataset",
",",
"\"dataset\"",
")",
"return",
"self",
".",
"__proxy__",
".",
"predict_topk",
"(",
"dataset",
",",
"missing_value_action",
",",
"output_type",
",",
"k",
")"
] | Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability`, `margin`, or `rank`, depending on the ``output_type``
parameter. Input dataset size must be the same as for training of the model.
Parameters
----------
dataset : SFrame
A dataset that has the same columns that were used during training.
If the target column exists in ``dataset`` it will be ignored
while making predictions.
output_type : {'probability', 'rank', 'margin'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
- `margin` : Margin associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+--------+-------+-------------------+
| id | class | probability |
+--------+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| ... | ... | ... |
+--------+-------+-------------------+
[35688 rows x 3 columns] | [
"Return",
"top",
"-",
"k",
"predictions",
"for",
"the",
"dataset",
"using",
"the",
"trained",
"model",
".",
"Predictions",
"are",
"returned",
"as",
"an",
"SFrame",
"with",
"three",
"columns",
":",
"id",
"class",
"and",
"probability",
"margin",
"or",
"rank",
"depending",
"on",
"the",
"output_type",
"parameter",
".",
"Input",
"dataset",
"size",
"must",
"be",
"the",
"same",
"as",
"for",
"training",
"of",
"the",
"model",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/decision_tree_classifier.py#L273-L353 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/decision_tree_classifier.py | DecisionTreeClassifier.classify | def classify(self, dataset, missing_value_action='auto'):
"""
Return a classification, for each example in the ``dataset``, using the
trained model. The output SFrame contains predictions as class labels
(0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.decision_tree_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data)
"""
return super(DecisionTreeClassifier, self).classify(dataset,
missing_value_action=missing_value_action) | python | def classify(self, dataset, missing_value_action='auto'):
"""
Return a classification, for each example in the ``dataset``, using the
trained model. The output SFrame contains predictions as class labels
(0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.decision_tree_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data)
"""
return super(DecisionTreeClassifier, self).classify(dataset,
missing_value_action=missing_value_action) | [
"def",
"classify",
"(",
"self",
",",
"dataset",
",",
"missing_value_action",
"=",
"'auto'",
")",
":",
"return",
"super",
"(",
"DecisionTreeClassifier",
",",
"self",
")",
".",
"classify",
"(",
"dataset",
",",
"missing_value_action",
"=",
"missing_value_action",
")"
] | Return a classification, for each example in the ``dataset``, using the
trained model. The output SFrame contains predictions as class labels
(0 or 1) and probabilities associated with the the example.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
missing_value_action : str, optional
Action to perform when missing values are encountered. Can be
one of:
- 'auto': By default the model will treat missing value as is.
- 'impute': Proceed with evaluation by filling in the missing
values with the mean of the training data. Missing
values are also imputed if an entire column of data is
missing during evaluation.
- 'error': Do not proceed with evaluation and terminate with
an error message.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and probabilities
associated with each of the class labels.
See Also
----------
create, evaluate, predict
Examples
----------
>>> data = turicreate.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
>>> model = turicreate.decision_tree_classifier.create(data,
>>> target='is_expensive',
>>> features=['bath', 'bedroom', 'size'])
>>> classes = model.classify(data) | [
"Return",
"a",
"classification",
"for",
"each",
"example",
"in",
"the",
"dataset",
"using",
"the",
"trained",
"model",
".",
"The",
"output",
"SFrame",
"contains",
"predictions",
"as",
"class",
"labels",
"(",
"0",
"or",
"1",
")",
"and",
"probabilities",
"associated",
"with",
"the",
"the",
"example",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/decision_tree_classifier.py#L355-L403 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/tracker/rabit_tracker.py | Tracker.slave_envs | def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
if self.hostIP == 'dns':
host = socket.gethostname()
elif self.hostIP == 'ip':
host = socket.gethostbyname(socket.getfqdn())
else:
host = self.hostIP
return {'rabit_tracker_uri': host,
'rabit_tracker_port': self.port} | python | def slave_envs(self):
"""
get enviroment variables for slaves
can be passed in as args or envs
"""
if self.hostIP == 'dns':
host = socket.gethostname()
elif self.hostIP == 'ip':
host = socket.gethostbyname(socket.getfqdn())
else:
host = self.hostIP
return {'rabit_tracker_uri': host,
'rabit_tracker_port': self.port} | [
"def",
"slave_envs",
"(",
"self",
")",
":",
"if",
"self",
".",
"hostIP",
"==",
"'dns'",
":",
"host",
"=",
"socket",
".",
"gethostname",
"(",
")",
"elif",
"self",
".",
"hostIP",
"==",
"'ip'",
":",
"host",
"=",
"socket",
".",
"gethostbyname",
"(",
"socket",
".",
"getfqdn",
"(",
")",
")",
"else",
":",
"host",
"=",
"self",
".",
"hostIP",
"return",
"{",
"'rabit_tracker_uri'",
":",
"host",
",",
"'rabit_tracker_port'",
":",
"self",
".",
"port",
"}"
] | get enviroment variables for slaves
can be passed in as args or envs | [
"get",
"enviroment",
"variables",
"for",
"slaves",
"can",
"be",
"passed",
"in",
"as",
"args",
"or",
"envs"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/tracker/rabit_tracker.py#L144-L156 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/tracker/rabit_tracker.py | Tracker.find_share_ring | def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst | python | def find_share_ring(self, tree_map, parent_map, r):
"""
get a ring structure that tends to share nodes with the tree
return a list starting from r
"""
nset = set(tree_map[r])
cset = nset - set([parent_map[r]])
if len(cset) == 0:
return [r]
rlst = [r]
cnt = 0
for v in cset:
vlst = self.find_share_ring(tree_map, parent_map, v)
cnt += 1
if cnt == len(cset):
vlst.reverse()
rlst += vlst
return rlst | [
"def",
"find_share_ring",
"(",
"self",
",",
"tree_map",
",",
"parent_map",
",",
"r",
")",
":",
"nset",
"=",
"set",
"(",
"tree_map",
"[",
"r",
"]",
")",
"cset",
"=",
"nset",
"-",
"set",
"(",
"[",
"parent_map",
"[",
"r",
"]",
"]",
")",
"if",
"len",
"(",
"cset",
")",
"==",
"0",
":",
"return",
"[",
"r",
"]",
"rlst",
"=",
"[",
"r",
"]",
"cnt",
"=",
"0",
"for",
"v",
"in",
"cset",
":",
"vlst",
"=",
"self",
".",
"find_share_ring",
"(",
"tree_map",
",",
"parent_map",
",",
"v",
")",
"cnt",
"+=",
"1",
"if",
"cnt",
"==",
"len",
"(",
"cset",
")",
":",
"vlst",
".",
"reverse",
"(",
")",
"rlst",
"+=",
"vlst",
"return",
"rlst"
] | get a ring structure that tends to share nodes with the tree
return a list starting from r | [
"get",
"a",
"ring",
"structure",
"that",
"tends",
"to",
"share",
"nodes",
"with",
"the",
"tree",
"return",
"a",
"list",
"starting",
"from",
"r"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/tracker/rabit_tracker.py#L174-L191 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/tracker/rabit_tracker.py | Tracker.get_ring | def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map | python | def get_ring(self, tree_map, parent_map):
"""
get a ring connection used to recover local data
"""
assert parent_map[0] == -1
rlst = self.find_share_ring(tree_map, parent_map, 0)
assert len(rlst) == len(tree_map)
ring_map = {}
nslave = len(tree_map)
for r in range(nslave):
rprev = (r + nslave - 1) % nslave
rnext = (r + 1) % nslave
ring_map[rlst[r]] = (rlst[rprev], rlst[rnext])
return ring_map | [
"def",
"get_ring",
"(",
"self",
",",
"tree_map",
",",
"parent_map",
")",
":",
"assert",
"parent_map",
"[",
"0",
"]",
"==",
"-",
"1",
"rlst",
"=",
"self",
".",
"find_share_ring",
"(",
"tree_map",
",",
"parent_map",
",",
"0",
")",
"assert",
"len",
"(",
"rlst",
")",
"==",
"len",
"(",
"tree_map",
")",
"ring_map",
"=",
"{",
"}",
"nslave",
"=",
"len",
"(",
"tree_map",
")",
"for",
"r",
"in",
"range",
"(",
"nslave",
")",
":",
"rprev",
"=",
"(",
"r",
"+",
"nslave",
"-",
"1",
")",
"%",
"nslave",
"rnext",
"=",
"(",
"r",
"+",
"1",
")",
"%",
"nslave",
"ring_map",
"[",
"rlst",
"[",
"r",
"]",
"]",
"=",
"(",
"rlst",
"[",
"rprev",
"]",
",",
"rlst",
"[",
"rnext",
"]",
")",
"return",
"ring_map"
] | get a ring connection used to recover local data | [
"get",
"a",
"ring",
"connection",
"used",
"to",
"recover",
"local",
"data"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/tracker/rabit_tracker.py#L193-L206 | train |
apple/turicreate | src/external/xgboost/subtree/rabit/tracker/rabit_tracker.py | Tracker.get_link_map | def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ ={}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_ | python | def get_link_map(self, nslave):
"""
get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together
"""
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ ={}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_ | [
"def",
"get_link_map",
"(",
"self",
",",
"nslave",
")",
":",
"tree_map",
",",
"parent_map",
"=",
"self",
".",
"get_tree",
"(",
"nslave",
")",
"ring_map",
"=",
"self",
".",
"get_ring",
"(",
"tree_map",
",",
"parent_map",
")",
"rmap",
"=",
"{",
"0",
":",
"0",
"}",
"k",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"nslave",
"-",
"1",
")",
":",
"k",
"=",
"ring_map",
"[",
"k",
"]",
"[",
"1",
"]",
"rmap",
"[",
"k",
"]",
"=",
"i",
"+",
"1",
"ring_map_",
"=",
"{",
"}",
"tree_map_",
"=",
"{",
"}",
"parent_map_",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"ring_map",
".",
"items",
"(",
")",
":",
"ring_map_",
"[",
"rmap",
"[",
"k",
"]",
"]",
"=",
"(",
"rmap",
"[",
"v",
"[",
"0",
"]",
"]",
",",
"rmap",
"[",
"v",
"[",
"1",
"]",
"]",
")",
"for",
"k",
",",
"v",
"in",
"tree_map",
".",
"items",
"(",
")",
":",
"tree_map_",
"[",
"rmap",
"[",
"k",
"]",
"]",
"=",
"[",
"rmap",
"[",
"x",
"]",
"for",
"x",
"in",
"v",
"]",
"for",
"k",
",",
"v",
"in",
"parent_map",
".",
"items",
"(",
")",
":",
"if",
"k",
"!=",
"0",
":",
"parent_map_",
"[",
"rmap",
"[",
"k",
"]",
"]",
"=",
"rmap",
"[",
"v",
"]",
"else",
":",
"parent_map_",
"[",
"rmap",
"[",
"k",
"]",
"]",
"=",
"-",
"1",
"return",
"tree_map_",
",",
"parent_map_",
",",
"ring_map_"
] | get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together | [
"get",
"the",
"link",
"map",
"this",
"is",
"a",
"bit",
"hacky",
"call",
"for",
"better",
"algorithm",
"to",
"place",
"similar",
"nodes",
"together"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/xgboost/subtree/rabit/tracker/rabit_tracker.py#L208-L233 | train |
apple/turicreate | deps/src/boost_1_68_0/tools/build/src/tools/msvc.py | maybe_rewrite_setup | def maybe_rewrite_setup(toolset, setup_script, setup_options, version, rewrite_setup='off'):
"""
Helper rule to generate a faster alternative to MSVC setup scripts.
We used to call MSVC setup scripts directly in every action, however in
newer MSVC versions (10.0+) they make long-lasting registry queries
which have a significant impact on build time.
"""
result = '"{}" {}'.format(setup_script, setup_options)
# At the moment we only know how to rewrite scripts with cmd shell.
if os.name == 'nt' and rewrite_setup != 'off':
basename = os.path.basename(setup_script)
filename, _ = os.path.splitext(basename)
setup_script_id = 'b2_{}_{}_{}'.format(toolset, version, filename)
if setup_options:
setup_script_id = '{}_{}'.format(setup_script_id, setup_options)
tempdir = os.environ.get('TEMP')
replacement = os.path.join(tempdir, setup_script_id + '.cmd')
if rewrite_setup == 'always' or not os.path.exists(replacement):
import subprocess
# call the setup script and print the environment after doing so
p = subprocess.Popen([
setup_script, setup_options, '>', 'nul', '&&', 'set',
], stdout=subprocess.PIPE, shell=True
)
stdout, _ = p.communicate()
diff_vars = []
for var in stdout.splitlines():
# returns a tuple of ('var-name', '=', 'value').
# partition is being used here (over something like .split())
# for two reasons:
# 1) an environment variable may have a value that contains an '=';
# .partition() will still return the correct key and value pair.
# 2) if the line doesn't contain an '=' at all, then the returned
# tuple will contain only empty strings rather than raising
# an exception.
key, _, value = var.partition('=')
# os.environ handles casing differences here. Usually the
# call to "set" above will produce pascal-cased environment
# variable names, so a normal python dict can't be used here.
# check for the existence of key in case the partitioning() above
# returned an empty key value pair.
if key and os.environ.get(key) != value:
diff_vars.append('SET {}={}'.format(key, value))
if diff_vars:
with open(replacement, 'wb') as f:
f.write(os.linesep.join(diff_vars))
result = '"{}"'.format(replacement)
else:
result = '"{}"'.format(replacement)
return result | python | def maybe_rewrite_setup(toolset, setup_script, setup_options, version, rewrite_setup='off'):
"""
Helper rule to generate a faster alternative to MSVC setup scripts.
We used to call MSVC setup scripts directly in every action, however in
newer MSVC versions (10.0+) they make long-lasting registry queries
which have a significant impact on build time.
"""
result = '"{}" {}'.format(setup_script, setup_options)
# At the moment we only know how to rewrite scripts with cmd shell.
if os.name == 'nt' and rewrite_setup != 'off':
basename = os.path.basename(setup_script)
filename, _ = os.path.splitext(basename)
setup_script_id = 'b2_{}_{}_{}'.format(toolset, version, filename)
if setup_options:
setup_script_id = '{}_{}'.format(setup_script_id, setup_options)
tempdir = os.environ.get('TEMP')
replacement = os.path.join(tempdir, setup_script_id + '.cmd')
if rewrite_setup == 'always' or not os.path.exists(replacement):
import subprocess
# call the setup script and print the environment after doing so
p = subprocess.Popen([
setup_script, setup_options, '>', 'nul', '&&', 'set',
], stdout=subprocess.PIPE, shell=True
)
stdout, _ = p.communicate()
diff_vars = []
for var in stdout.splitlines():
# returns a tuple of ('var-name', '=', 'value').
# partition is being used here (over something like .split())
# for two reasons:
# 1) an environment variable may have a value that contains an '=';
# .partition() will still return the correct key and value pair.
# 2) if the line doesn't contain an '=' at all, then the returned
# tuple will contain only empty strings rather than raising
# an exception.
key, _, value = var.partition('=')
# os.environ handles casing differences here. Usually the
# call to "set" above will produce pascal-cased environment
# variable names, so a normal python dict can't be used here.
# check for the existence of key in case the partitioning() above
# returned an empty key value pair.
if key and os.environ.get(key) != value:
diff_vars.append('SET {}={}'.format(key, value))
if diff_vars:
with open(replacement, 'wb') as f:
f.write(os.linesep.join(diff_vars))
result = '"{}"'.format(replacement)
else:
result = '"{}"'.format(replacement)
return result | [
"def",
"maybe_rewrite_setup",
"(",
"toolset",
",",
"setup_script",
",",
"setup_options",
",",
"version",
",",
"rewrite_setup",
"=",
"'off'",
")",
":",
"result",
"=",
"'\"{}\" {}'",
".",
"format",
"(",
"setup_script",
",",
"setup_options",
")",
"# At the moment we only know how to rewrite scripts with cmd shell.",
"if",
"os",
".",
"name",
"==",
"'nt'",
"and",
"rewrite_setup",
"!=",
"'off'",
":",
"basename",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"setup_script",
")",
"filename",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"basename",
")",
"setup_script_id",
"=",
"'b2_{}_{}_{}'",
".",
"format",
"(",
"toolset",
",",
"version",
",",
"filename",
")",
"if",
"setup_options",
":",
"setup_script_id",
"=",
"'{}_{}'",
".",
"format",
"(",
"setup_script_id",
",",
"setup_options",
")",
"tempdir",
"=",
"os",
".",
"environ",
".",
"get",
"(",
"'TEMP'",
")",
"replacement",
"=",
"os",
".",
"path",
".",
"join",
"(",
"tempdir",
",",
"setup_script_id",
"+",
"'.cmd'",
")",
"if",
"rewrite_setup",
"==",
"'always'",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"replacement",
")",
":",
"import",
"subprocess",
"# call the setup script and print the environment after doing so",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"setup_script",
",",
"setup_options",
",",
"'>'",
",",
"'nul'",
",",
"'&&'",
",",
"'set'",
",",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"shell",
"=",
"True",
")",
"stdout",
",",
"_",
"=",
"p",
".",
"communicate",
"(",
")",
"diff_vars",
"=",
"[",
"]",
"for",
"var",
"in",
"stdout",
".",
"splitlines",
"(",
")",
":",
"# returns a tuple of ('var-name', '=', 'value').",
"# partition is being used here (over something like .split())",
"# for two reasons:",
"# 1) an environment variable may have a value that contains an '=';",
"# .partition() will still return the correct key and value pair.",
"# 2) if the line doesn't contain an '=' at all, then the returned",
"# tuple will contain only empty strings rather than raising",
"# an exception.",
"key",
",",
"_",
",",
"value",
"=",
"var",
".",
"partition",
"(",
"'='",
")",
"# os.environ handles casing differences here. Usually the",
"# call to \"set\" above will produce pascal-cased environment",
"# variable names, so a normal python dict can't be used here.",
"# check for the existence of key in case the partitioning() above",
"# returned an empty key value pair.",
"if",
"key",
"and",
"os",
".",
"environ",
".",
"get",
"(",
"key",
")",
"!=",
"value",
":",
"diff_vars",
".",
"append",
"(",
"'SET {}={}'",
".",
"format",
"(",
"key",
",",
"value",
")",
")",
"if",
"diff_vars",
":",
"with",
"open",
"(",
"replacement",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"os",
".",
"linesep",
".",
"join",
"(",
"diff_vars",
")",
")",
"result",
"=",
"'\"{}\"'",
".",
"format",
"(",
"replacement",
")",
"else",
":",
"result",
"=",
"'\"{}\"'",
".",
"format",
"(",
"replacement",
")",
"return",
"result"
] | Helper rule to generate a faster alternative to MSVC setup scripts.
We used to call MSVC setup scripts directly in every action, however in
newer MSVC versions (10.0+) they make long-lasting registry queries
which have a significant impact on build time. | [
"Helper",
"rule",
"to",
"generate",
"a",
"faster",
"alternative",
"to",
"MSVC",
"setup",
"scripts",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/tools/msvc.py#L626-L682 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/classifier/_classifier.py | create | def create(dataset, target, features=None, validation_set = 'auto',
verbose=True):
"""
Automatically create a suitable classifier model based on the provided
training data.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in the order in which they are
provided. For example, a target variable with 'cat' and 'dog' as
possible values is mapped to 0 and 1 respectively with 0 being the base
class and 1 being the reference class. Use `model.classes` to
retrieve the order in which the classes are mapped.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance. For
each row of the progress table, the chosen metrics are computed for
both the provided training dataset and the validation_set. The format
of this SFrame must be the same as the training set. By default this
argument is set to 'auto' and a validation set is automatically sampled
and used for progress printing. If validation_set is set to None, then
no additional metrics are computed. The default value is 'auto'.
verbose : boolean, optional
If True, print progress information during training.
Returns
-------
out : A trained classifier model.
See Also
--------
turicreate.boosted_trees_classifier.BoostedTreesClassifier,
turicreate.logistic_classifier.LogisticClassifier,
turicreate.svm_classifier.SVMClassifier,
turicreate.nearest_neighbor_classifier.NearestNeighborClassifier
Examples
--------
.. sourcecode:: python
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
# Selects the best model based on your data.
>>> model = tc.classifier.create(data, target='is_expensive',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.classify(data)
>>> results = model.evaluate(data)
"""
return _sl.create_classification_with_model_selector(
dataset,
target,
model_selector = _turicreate.extensions._supervised_learning._classifier_available_models,
features = features,
validation_set = validation_set,
verbose = verbose) | python | def create(dataset, target, features=None, validation_set = 'auto',
verbose=True):
"""
Automatically create a suitable classifier model based on the provided
training data.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in the order in which they are
provided. For example, a target variable with 'cat' and 'dog' as
possible values is mapped to 0 and 1 respectively with 0 being the base
class and 1 being the reference class. Use `model.classes` to
retrieve the order in which the classes are mapped.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance. For
each row of the progress table, the chosen metrics are computed for
both the provided training dataset and the validation_set. The format
of this SFrame must be the same as the training set. By default this
argument is set to 'auto' and a validation set is automatically sampled
and used for progress printing. If validation_set is set to None, then
no additional metrics are computed. The default value is 'auto'.
verbose : boolean, optional
If True, print progress information during training.
Returns
-------
out : A trained classifier model.
See Also
--------
turicreate.boosted_trees_classifier.BoostedTreesClassifier,
turicreate.logistic_classifier.LogisticClassifier,
turicreate.svm_classifier.SVMClassifier,
turicreate.nearest_neighbor_classifier.NearestNeighborClassifier
Examples
--------
.. sourcecode:: python
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
# Selects the best model based on your data.
>>> model = tc.classifier.create(data, target='is_expensive',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.classify(data)
>>> results = model.evaluate(data)
"""
return _sl.create_classification_with_model_selector(
dataset,
target,
model_selector = _turicreate.extensions._supervised_learning._classifier_available_models,
features = features,
validation_set = validation_set,
verbose = verbose) | [
"def",
"create",
"(",
"dataset",
",",
"target",
",",
"features",
"=",
"None",
",",
"validation_set",
"=",
"'auto'",
",",
"verbose",
"=",
"True",
")",
":",
"return",
"_sl",
".",
"create_classification_with_model_selector",
"(",
"dataset",
",",
"target",
",",
"model_selector",
"=",
"_turicreate",
".",
"extensions",
".",
"_supervised_learning",
".",
"_classifier_available_models",
",",
"features",
"=",
"features",
",",
"validation_set",
"=",
"validation_set",
",",
"verbose",
"=",
"verbose",
")"
] | Automatically create a suitable classifier model based on the provided
training data.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in the order in which they are
provided. For example, a target variable with 'cat' and 'dog' as
possible values is mapped to 0 and 1 respectively with 0 being the base
class and 1 being the reference class. Use `model.classes` to
retrieve the order in which the classes are mapped.
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance. For
each row of the progress table, the chosen metrics are computed for
both the provided training dataset and the validation_set. The format
of this SFrame must be the same as the training set. By default this
argument is set to 'auto' and a validation set is automatically sampled
and used for progress printing. If validation_set is set to None, then
no additional metrics are computed. The default value is 'auto'.
verbose : boolean, optional
If True, print progress information during training.
Returns
-------
out : A trained classifier model.
See Also
--------
turicreate.boosted_trees_classifier.BoostedTreesClassifier,
turicreate.logistic_classifier.LogisticClassifier,
turicreate.svm_classifier.SVMClassifier,
turicreate.nearest_neighbor_classifier.NearestNeighborClassifier
Examples
--------
.. sourcecode:: python
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
>>> data['is_expensive'] = data['price'] > 30000
# Selects the best model based on your data.
>>> model = tc.classifier.create(data, target='is_expensive',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.classify(data)
>>> results = model.evaluate(data) | [
"Automatically",
"create",
"a",
"suitable",
"classifier",
"model",
"based",
"on",
"the",
"provided",
"training",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/classifier/_classifier.py#L12-L106 | train |
apple/turicreate | src/unity/python/turicreate/data_structures/gframe.py | GFrame.add_column | def add_column(self, data, column_name="", inplace=False):
"""
Adds the specified column to this SFrame. The number of elements in
the data given must match every other column of the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : SArray
The 'column' of data.
column_name : string
The name of the column. If no name is given, a default name is chosen.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(column_name, str):
raise TypeError("Invalid column name: must be str")
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.add_vertex_field(data.__proxy__, column_name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.add_edge_field(data.__proxy__, column_name)
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).add_column(data, column_name, inplace=inplace) | python | def add_column(self, data, column_name="", inplace=False):
"""
Adds the specified column to this SFrame. The number of elements in
the data given must match every other column of the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : SArray
The 'column' of data.
column_name : string
The name of the column. If no name is given, a default name is chosen.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
# Check type for pandas dataframe or SArray?
if not isinstance(data, SArray):
raise TypeError("Must give column as SArray")
if not isinstance(column_name, str):
raise TypeError("Invalid column name: must be str")
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.add_vertex_field(data.__proxy__, column_name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.add_edge_field(data.__proxy__, column_name)
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).add_column(data, column_name, inplace=inplace) | [
"def",
"add_column",
"(",
"self",
",",
"data",
",",
"column_name",
"=",
"\"\"",
",",
"inplace",
"=",
"False",
")",
":",
"# Check type for pandas dataframe or SArray?",
"if",
"not",
"isinstance",
"(",
"data",
",",
"SArray",
")",
":",
"raise",
"TypeError",
"(",
"\"Must give column as SArray\"",
")",
"if",
"not",
"isinstance",
"(",
"column_name",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid column name: must be str\"",
")",
"if",
"inplace",
":",
"self",
".",
"__is_dirty__",
"=",
"True",
"with",
"cython_context",
"(",
")",
":",
"if",
"self",
".",
"_is_vertex_frame",
"(",
")",
":",
"graph_proxy",
"=",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"add_vertex_field",
"(",
"data",
".",
"__proxy__",
",",
"column_name",
")",
"self",
".",
"__graph__",
".",
"__proxy__",
"=",
"graph_proxy",
"elif",
"self",
".",
"_is_edge_frame",
"(",
")",
":",
"graph_proxy",
"=",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"add_edge_field",
"(",
"data",
".",
"__proxy__",
",",
"column_name",
")",
"self",
".",
"__graph__",
".",
"__proxy__",
"=",
"graph_proxy",
"return",
"self",
"else",
":",
"return",
"super",
"(",
"GFrame",
",",
"self",
")",
".",
"add_column",
"(",
"data",
",",
"column_name",
",",
"inplace",
"=",
"inplace",
")"
] | Adds the specified column to this SFrame. The number of elements in
the data given must match every other column of the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : SArray
The 'column' of data.
column_name : string
The name of the column. If no name is given, a default name is chosen.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place. | [
"Adds",
"the",
"specified",
"column",
"to",
"this",
"SFrame",
".",
"The",
"number",
"of",
"elements",
"in",
"the",
"data",
"given",
"must",
"match",
"every",
"other",
"column",
"of",
"the",
"SFrame",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/gframe.py#L62-L101 | train |
apple/turicreate | src/unity/python/turicreate/data_structures/gframe.py | GFrame.add_columns | def add_columns(self, data, column_names=None, inplace=False):
"""
Adds columns to the SFrame. The number of elements in all columns must
match every other column of the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
column_names: list of string, optional
A list of column names. All names must be specified. ``column_names`` is
ignored if data is an SFrame.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
column_names = other.column_names()
my_columns = set(self.column_names())
for name in column_names:
if name in my_columns:
raise ValueError("Column '" + name + "' already exists in current SFrame")
else:
if not _is_non_string_iterable(datalist):
raise TypeError("datalist must be an iterable")
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in column_names]):
raise TypeError("Invalid column name in list : must all be str")
if inplace:
for (data, name) in zip(datalist, column_names):
self.add_column(data, name)
return self
else:
return super(GFrame, self).add_column(datalist, column_names, inplace=inplace) | python | def add_columns(self, data, column_names=None, inplace=False):
"""
Adds columns to the SFrame. The number of elements in all columns must
match every other column of the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
column_names: list of string, optional
A list of column names. All names must be specified. ``column_names`` is
ignored if data is an SFrame.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
datalist = data
if isinstance(data, SFrame):
other = data
datalist = [other.select_column(name) for name in other.column_names()]
column_names = other.column_names()
my_columns = set(self.column_names())
for name in column_names:
if name in my_columns:
raise ValueError("Column '" + name + "' already exists in current SFrame")
else:
if not _is_non_string_iterable(datalist):
raise TypeError("datalist must be an iterable")
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not all([isinstance(x, SArray) for x in datalist]):
raise TypeError("Must give column as SArray")
if not all([isinstance(x, str) for x in column_names]):
raise TypeError("Invalid column name in list : must all be str")
if inplace:
for (data, name) in zip(datalist, column_names):
self.add_column(data, name)
return self
else:
return super(GFrame, self).add_column(datalist, column_names, inplace=inplace) | [
"def",
"add_columns",
"(",
"self",
",",
"data",
",",
"column_names",
"=",
"None",
",",
"inplace",
"=",
"False",
")",
":",
"datalist",
"=",
"data",
"if",
"isinstance",
"(",
"data",
",",
"SFrame",
")",
":",
"other",
"=",
"data",
"datalist",
"=",
"[",
"other",
".",
"select_column",
"(",
"name",
")",
"for",
"name",
"in",
"other",
".",
"column_names",
"(",
")",
"]",
"column_names",
"=",
"other",
".",
"column_names",
"(",
")",
"my_columns",
"=",
"set",
"(",
"self",
".",
"column_names",
"(",
")",
")",
"for",
"name",
"in",
"column_names",
":",
"if",
"name",
"in",
"my_columns",
":",
"raise",
"ValueError",
"(",
"\"Column '\"",
"+",
"name",
"+",
"\"' already exists in current SFrame\"",
")",
"else",
":",
"if",
"not",
"_is_non_string_iterable",
"(",
"datalist",
")",
":",
"raise",
"TypeError",
"(",
"\"datalist must be an iterable\"",
")",
"if",
"not",
"_is_non_string_iterable",
"(",
"column_names",
")",
":",
"raise",
"TypeError",
"(",
"\"column_names must be an iterable\"",
")",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"x",
",",
"SArray",
")",
"for",
"x",
"in",
"datalist",
"]",
")",
":",
"raise",
"TypeError",
"(",
"\"Must give column as SArray\"",
")",
"if",
"not",
"all",
"(",
"[",
"isinstance",
"(",
"x",
",",
"str",
")",
"for",
"x",
"in",
"column_names",
"]",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid column name in list : must all be str\"",
")",
"if",
"inplace",
":",
"for",
"(",
"data",
",",
"name",
")",
"in",
"zip",
"(",
"datalist",
",",
"column_names",
")",
":",
"self",
".",
"add_column",
"(",
"data",
",",
"name",
")",
"return",
"self",
"else",
":",
"return",
"super",
"(",
"GFrame",
",",
"self",
")",
".",
"add_column",
"(",
"datalist",
",",
"column_names",
",",
"inplace",
"=",
"inplace",
")"
] | Adds columns to the SFrame. The number of elements in all columns must
match every other column of the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
data : list[SArray] or SFrame
The columns to add.
column_names: list of string, optional
A list of column names. All names must be specified. ``column_names`` is
ignored if data is an SFrame.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place. | [
"Adds",
"columns",
"to",
"the",
"SFrame",
".",
"The",
"number",
"of",
"elements",
"in",
"all",
"columns",
"must",
"match",
"every",
"other",
"column",
"of",
"the",
"SFrame",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/gframe.py#L104-L154 | train |
apple/turicreate | src/unity/python/turicreate/data_structures/gframe.py | GFrame.remove_column | def remove_column(self, column_name, inplace=False):
"""
Removes the column with the given name from the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : string
The name of the column to remove.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
if column_name not in self.column_names():
raise KeyError('Cannot find column %s' % column_name)
if inplace:
self.__is_dirty__ = True
try:
with cython_context():
if self._is_vertex_frame():
assert column_name != '__id', 'Cannot remove \"__id\" column'
graph_proxy = self.__graph__.__proxy__.delete_vertex_field(column_name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
assert column_name != '__src_id', 'Cannot remove \"__src_id\" column'
assert column_name != '__dst_id', 'Cannot remove \"__dst_id\" column'
graph_proxy = self.__graph__.__proxy__.delete_edge_field(column_name)
self.__graph__.__proxy__ = graph_proxy
return self
except:
self.__is_dirty__ = False
raise
else:
return super(GFrame, self).remove_column(column_name, inplace=inplace) | python | def remove_column(self, column_name, inplace=False):
"""
Removes the column with the given name from the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : string
The name of the column to remove.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
if column_name not in self.column_names():
raise KeyError('Cannot find column %s' % column_name)
if inplace:
self.__is_dirty__ = True
try:
with cython_context():
if self._is_vertex_frame():
assert column_name != '__id', 'Cannot remove \"__id\" column'
graph_proxy = self.__graph__.__proxy__.delete_vertex_field(column_name)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
assert column_name != '__src_id', 'Cannot remove \"__src_id\" column'
assert column_name != '__dst_id', 'Cannot remove \"__dst_id\" column'
graph_proxy = self.__graph__.__proxy__.delete_edge_field(column_name)
self.__graph__.__proxy__ = graph_proxy
return self
except:
self.__is_dirty__ = False
raise
else:
return super(GFrame, self).remove_column(column_name, inplace=inplace) | [
"def",
"remove_column",
"(",
"self",
",",
"column_name",
",",
"inplace",
"=",
"False",
")",
":",
"if",
"column_name",
"not",
"in",
"self",
".",
"column_names",
"(",
")",
":",
"raise",
"KeyError",
"(",
"'Cannot find column %s'",
"%",
"column_name",
")",
"if",
"inplace",
":",
"self",
".",
"__is_dirty__",
"=",
"True",
"try",
":",
"with",
"cython_context",
"(",
")",
":",
"if",
"self",
".",
"_is_vertex_frame",
"(",
")",
":",
"assert",
"column_name",
"!=",
"'__id'",
",",
"'Cannot remove \\\"__id\\\" column'",
"graph_proxy",
"=",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"delete_vertex_field",
"(",
"column_name",
")",
"self",
".",
"__graph__",
".",
"__proxy__",
"=",
"graph_proxy",
"elif",
"self",
".",
"_is_edge_frame",
"(",
")",
":",
"assert",
"column_name",
"!=",
"'__src_id'",
",",
"'Cannot remove \\\"__src_id\\\" column'",
"assert",
"column_name",
"!=",
"'__dst_id'",
",",
"'Cannot remove \\\"__dst_id\\\" column'",
"graph_proxy",
"=",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"delete_edge_field",
"(",
"column_name",
")",
"self",
".",
"__graph__",
".",
"__proxy__",
"=",
"graph_proxy",
"return",
"self",
"except",
":",
"self",
".",
"__is_dirty__",
"=",
"False",
"raise",
"else",
":",
"return",
"super",
"(",
"GFrame",
",",
"self",
")",
".",
"remove_column",
"(",
"column_name",
",",
"inplace",
"=",
"inplace",
")"
] | Removes the column with the given name from the SFrame.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : string
The name of the column to remove.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place. | [
"Removes",
"the",
"column",
"with",
"the",
"given",
"name",
"from",
"the",
"SFrame",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/gframe.py#L157-L195 | train |
apple/turicreate | src/unity/python/turicreate/data_structures/gframe.py | GFrame.swap_columns | def swap_columns(self, column_name_1, column_name_2, inplace=False):
"""
Swaps the columns with the given names.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name_1 : string
Name of column to swap
column_name_2 : string
Name of other column to swap
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.swap_vertex_fields(column_name_1, column_name_2)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.swap_edge_fields(column_name_1, column_name_2)
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).swap_columns(column_name_1, column_name_2, inplace=inplace) | python | def swap_columns(self, column_name_1, column_name_2, inplace=False):
"""
Swaps the columns with the given names.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name_1 : string
Name of column to swap
column_name_2 : string
Name of other column to swap
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.swap_vertex_fields(column_name_1, column_name_2)
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.swap_edge_fields(column_name_1, column_name_2)
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).swap_columns(column_name_1, column_name_2, inplace=inplace) | [
"def",
"swap_columns",
"(",
"self",
",",
"column_name_1",
",",
"column_name_2",
",",
"inplace",
"=",
"False",
")",
":",
"if",
"inplace",
":",
"self",
".",
"__is_dirty__",
"=",
"True",
"with",
"cython_context",
"(",
")",
":",
"if",
"self",
".",
"_is_vertex_frame",
"(",
")",
":",
"graph_proxy",
"=",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"swap_vertex_fields",
"(",
"column_name_1",
",",
"column_name_2",
")",
"self",
".",
"__graph__",
".",
"__proxy__",
"=",
"graph_proxy",
"elif",
"self",
".",
"_is_edge_frame",
"(",
")",
":",
"graph_proxy",
"=",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"swap_edge_fields",
"(",
"column_name_1",
",",
"column_name_2",
")",
"self",
".",
"__graph__",
".",
"__proxy__",
"=",
"graph_proxy",
"return",
"self",
"else",
":",
"return",
"super",
"(",
"GFrame",
",",
"self",
")",
".",
"swap_columns",
"(",
"column_name_1",
",",
"column_name_2",
",",
"inplace",
"=",
"inplace",
")"
] | Swaps the columns with the given names.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name_1 : string
Name of column to swap
column_name_2 : string
Name of other column to swap
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place. | [
"Swaps",
"the",
"columns",
"with",
"the",
"given",
"names",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/gframe.py#L211-L243 | train |
apple/turicreate | src/unity/python/turicreate/data_structures/gframe.py | GFrame.rename | def rename(self, names, inplace=False):
"""
Rename the columns using the 'names' dict. This changes the names of
the columns given as the keys and replaces them with the names given as
the values.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
names : dict[string, string]
Dictionary of [old_name, new_name]
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.rename_vertex_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.rename_edge_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).rename(names, inplace=inplace) | python | def rename(self, names, inplace=False):
"""
Rename the columns using the 'names' dict. This changes the names of
the columns given as the keys and replaces them with the names given as
the values.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
names : dict[string, string]
Dictionary of [old_name, new_name]
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
"""
if (type(names) is not dict):
raise TypeError('names must be a dictionary: oldname -> newname')
if inplace:
self.__is_dirty__ = True
with cython_context():
if self._is_vertex_frame():
graph_proxy = self.__graph__.__proxy__.rename_vertex_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
elif self._is_edge_frame():
graph_proxy = self.__graph__.__proxy__.rename_edge_fields(names.keys(), names.values())
self.__graph__.__proxy__ = graph_proxy
return self
else:
return super(GFrame, self).rename(names, inplace=inplace) | [
"def",
"rename",
"(",
"self",
",",
"names",
",",
"inplace",
"=",
"False",
")",
":",
"if",
"(",
"type",
"(",
"names",
")",
"is",
"not",
"dict",
")",
":",
"raise",
"TypeError",
"(",
"'names must be a dictionary: oldname -> newname'",
")",
"if",
"inplace",
":",
"self",
".",
"__is_dirty__",
"=",
"True",
"with",
"cython_context",
"(",
")",
":",
"if",
"self",
".",
"_is_vertex_frame",
"(",
")",
":",
"graph_proxy",
"=",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"rename_vertex_fields",
"(",
"names",
".",
"keys",
"(",
")",
",",
"names",
".",
"values",
"(",
")",
")",
"self",
".",
"__graph__",
".",
"__proxy__",
"=",
"graph_proxy",
"elif",
"self",
".",
"_is_edge_frame",
"(",
")",
":",
"graph_proxy",
"=",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"rename_edge_fields",
"(",
"names",
".",
"keys",
"(",
")",
",",
"names",
".",
"values",
"(",
")",
")",
"self",
".",
"__graph__",
".",
"__proxy__",
"=",
"graph_proxy",
"return",
"self",
"else",
":",
"return",
"super",
"(",
"GFrame",
",",
"self",
")",
".",
"rename",
"(",
"names",
",",
"inplace",
"=",
"inplace",
")"
] | Rename the columns using the 'names' dict. This changes the names of
the columns given as the keys and replaces them with the names given as
the values.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
names : dict[string, string]
Dictionary of [old_name, new_name]
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place. | [
"Rename",
"the",
"columns",
"using",
"the",
"names",
"dict",
".",
"This",
"changes",
"the",
"names",
"of",
"the",
"columns",
"given",
"as",
"the",
"keys",
"and",
"replaces",
"them",
"with",
"the",
"names",
"given",
"as",
"the",
"values",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/gframe.py#L245-L279 | train |
apple/turicreate | src/unity/python/turicreate/data_structures/gframe.py | GFrame.num_rows | def num_rows(self):
"""
Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.summary()['num_vertices']
elif self._is_edge_frame():
return self.__graph__.summary()['num_edges'] | python | def num_rows(self):
"""
Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.summary()['num_vertices']
elif self._is_edge_frame():
return self.__graph__.summary()['num_edges'] | [
"def",
"num_rows",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_vertex_frame",
"(",
")",
":",
"return",
"self",
".",
"__graph__",
".",
"summary",
"(",
")",
"[",
"'num_vertices'",
"]",
"elif",
"self",
".",
"_is_edge_frame",
"(",
")",
":",
"return",
"self",
".",
"__graph__",
".",
"summary",
"(",
")",
"[",
"'num_edges'",
"]"
] | Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame. | [
"Returns",
"the",
"number",
"of",
"rows",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/gframe.py#L321-L333 | train |
apple/turicreate | src/unity/python/turicreate/data_structures/gframe.py | GFrame.column_names | def column_names(self):
"""
Returns the column names.
Returns
-------
out : list[string]
Column names of the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.__proxy__.get_vertex_fields()
elif self._is_edge_frame():
return self.__graph__.__proxy__.get_edge_fields() | python | def column_names(self):
"""
Returns the column names.
Returns
-------
out : list[string]
Column names of the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.__proxy__.get_vertex_fields()
elif self._is_edge_frame():
return self.__graph__.__proxy__.get_edge_fields() | [
"def",
"column_names",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_vertex_frame",
"(",
")",
":",
"return",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"get_vertex_fields",
"(",
")",
"elif",
"self",
".",
"_is_edge_frame",
"(",
")",
":",
"return",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"get_edge_fields",
"(",
")"
] | Returns the column names.
Returns
-------
out : list[string]
Column names of the SFrame. | [
"Returns",
"the",
"column",
"names",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/gframe.py#L346-L358 | train |
apple/turicreate | src/unity/python/turicreate/data_structures/gframe.py | GFrame.column_types | def column_types(self):
"""
Returns the column types.
Returns
-------
out : list[type]
Column types of the SFrame.
"""
if self.__type__ == VERTEX_GFRAME:
return self.__graph__.__proxy__.get_vertex_field_types()
elif self.__type__ == EDGE_GFRAME:
return self.__graph__.__proxy__.get_edge_field_types() | python | def column_types(self):
"""
Returns the column types.
Returns
-------
out : list[type]
Column types of the SFrame.
"""
if self.__type__ == VERTEX_GFRAME:
return self.__graph__.__proxy__.get_vertex_field_types()
elif self.__type__ == EDGE_GFRAME:
return self.__graph__.__proxy__.get_edge_field_types() | [
"def",
"column_types",
"(",
"self",
")",
":",
"if",
"self",
".",
"__type__",
"==",
"VERTEX_GFRAME",
":",
"return",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"get_vertex_field_types",
"(",
")",
"elif",
"self",
".",
"__type__",
"==",
"EDGE_GFRAME",
":",
"return",
"self",
".",
"__graph__",
".",
"__proxy__",
".",
"get_edge_field_types",
"(",
")"
] | Returns the column types.
Returns
-------
out : list[type]
Column types of the SFrame. | [
"Returns",
"the",
"column",
"types",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/data_structures/gframe.py#L360-L372 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/regression/_regression.py | create | def create(dataset, target, features=None, validation_set = 'auto',
verbose=True):
"""
Automatically create a suitable regression model based on the provided
training data.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : str
The name of the column in ``dataset`` that is the prediction target.
This column must have a numeric type (int/float).
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance. For
each row of the progress table, the chosen metrics are computed for
both the provided training dataset and the validation_set. The format
of this SFrame must be the same as the training set. By default this
argument is set to 'auto' and a validation set is automatically sampled
and used for progress printing. If validation_set is set to None, then
no additional metrics are computed. The default value is 'auto'.
verbose : boolean, optional
If True, print progress information during training.
Returns
-------
out : A trained regression model.
See Also
--------
turicreate.linear_regression.LinearRegression,
turicreate.boosted_trees_regression.BoostedTreesRegression
Examples
--------
.. sourcecode:: python
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
# Selects the best model based on your data.
>>> model = tc.regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.predict(data)
>>> results = model.evaluate(data)
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
# Selects the best model based on your data.
>>> model = tc.regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.predict(data)
>>> results = model.evaluate(data)
"""
dataset, validation_set = _validate_data(dataset, target, features,
validation_set)
if validation_set is None:
validation_set = _turicreate.SFrame()
model_proxy = _turicreate.extensions.create_automatic_regression_model(
dataset, target, validation_set, {})
return _sl.wrap_model_proxy(model_proxy) | python | def create(dataset, target, features=None, validation_set = 'auto',
verbose=True):
"""
Automatically create a suitable regression model based on the provided
training data.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : str
The name of the column in ``dataset`` that is the prediction target.
This column must have a numeric type (int/float).
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance. For
each row of the progress table, the chosen metrics are computed for
both the provided training dataset and the validation_set. The format
of this SFrame must be the same as the training set. By default this
argument is set to 'auto' and a validation set is automatically sampled
and used for progress printing. If validation_set is set to None, then
no additional metrics are computed. The default value is 'auto'.
verbose : boolean, optional
If True, print progress information during training.
Returns
-------
out : A trained regression model.
See Also
--------
turicreate.linear_regression.LinearRegression,
turicreate.boosted_trees_regression.BoostedTreesRegression
Examples
--------
.. sourcecode:: python
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
# Selects the best model based on your data.
>>> model = tc.regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.predict(data)
>>> results = model.evaluate(data)
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
# Selects the best model based on your data.
>>> model = tc.regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.predict(data)
>>> results = model.evaluate(data)
"""
dataset, validation_set = _validate_data(dataset, target, features,
validation_set)
if validation_set is None:
validation_set = _turicreate.SFrame()
model_proxy = _turicreate.extensions.create_automatic_regression_model(
dataset, target, validation_set, {})
return _sl.wrap_model_proxy(model_proxy) | [
"def",
"create",
"(",
"dataset",
",",
"target",
",",
"features",
"=",
"None",
",",
"validation_set",
"=",
"'auto'",
",",
"verbose",
"=",
"True",
")",
":",
"dataset",
",",
"validation_set",
"=",
"_validate_data",
"(",
"dataset",
",",
"target",
",",
"features",
",",
"validation_set",
")",
"if",
"validation_set",
"is",
"None",
":",
"validation_set",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"model_proxy",
"=",
"_turicreate",
".",
"extensions",
".",
"create_automatic_regression_model",
"(",
"dataset",
",",
"target",
",",
"validation_set",
",",
"{",
"}",
")",
"return",
"_sl",
".",
"wrap_model_proxy",
"(",
"model_proxy",
")"
] | Automatically create a suitable regression model based on the provided
training data.
To use specific options of a desired model, use the ``create`` function
of the corresponding model.
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : str
The name of the column in ``dataset`` that is the prediction target.
This column must have a numeric type (int/float).
features : list[string], optional
Names of the columns containing features. 'None' (the default) indicates
that all columns except the target variable should be used as features.
The features are columns in the input SFrame that can be of the
following types:
- *Numeric*: values of numeric type integer or float.
- *Categorical*: values of type string.
- *Array*: list of numeric (integer or float) values. Each list element
is treated as a separate feature in the model.
- *Dictionary*: key-value pairs with numeric (integer or float) values
Each key of a dictionary is treated as a separate feature and the
value in the dictionary corresponds to the value of the feature.
Dictionaries are ideal for representing sparse data.
Columns of type *list* are not supported. Convert such feature
columns to type array if all entries in the list are of numeric
types. If the lists contain data of mixed types, separate
them out into different columns.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance. For
each row of the progress table, the chosen metrics are computed for
both the provided training dataset and the validation_set. The format
of this SFrame must be the same as the training set. By default this
argument is set to 'auto' and a validation set is automatically sampled
and used for progress printing. If validation_set is set to None, then
no additional metrics are computed. The default value is 'auto'.
verbose : boolean, optional
If True, print progress information during training.
Returns
-------
out : A trained regression model.
See Also
--------
turicreate.linear_regression.LinearRegression,
turicreate.boosted_trees_regression.BoostedTreesRegression
Examples
--------
.. sourcecode:: python
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
# Selects the best model based on your data.
>>> model = tc.regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.predict(data)
>>> results = model.evaluate(data)
# Setup the data
>>> import turicreate as tc
>>> data = tc.SFrame('https://static.turi.com/datasets/regression/houses.csv')
# Selects the best model based on your data.
>>> model = tc.regression.create(data, target='price',
... features=['bath', 'bedroom', 'size'])
# Make predictions and evaluate results.
>>> predictions = model.predict(data)
>>> results = model.evaluate(data) | [
"Automatically",
"create",
"a",
"suitable",
"regression",
"model",
"based",
"on",
"the",
"provided",
"training",
"data",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/regression/_regression.py#L14-L116 | train |
apple/turicreate | src/unity/python/turicreate/meta/asttools/mutators/prune_mutator.py | removable | def removable(self, node):
'''
node is removable only if all of its children are as well.
'''
throw_away = []
for child in self.children(node):
throw_away.append(self.visit(child))
if self.mode == 'exclusive':
return all(throw_away)
elif self.mode == 'inclusive':
return any(throw_away)
else:
raise TypeError("mode must be one of 'exclusive' or 'inclusive'") | python | def removable(self, node):
'''
node is removable only if all of its children are as well.
'''
throw_away = []
for child in self.children(node):
throw_away.append(self.visit(child))
if self.mode == 'exclusive':
return all(throw_away)
elif self.mode == 'inclusive':
return any(throw_away)
else:
raise TypeError("mode must be one of 'exclusive' or 'inclusive'") | [
"def",
"removable",
"(",
"self",
",",
"node",
")",
":",
"throw_away",
"=",
"[",
"]",
"for",
"child",
"in",
"self",
".",
"children",
"(",
"node",
")",
":",
"throw_away",
".",
"append",
"(",
"self",
".",
"visit",
"(",
"child",
")",
")",
"if",
"self",
".",
"mode",
"==",
"'exclusive'",
":",
"return",
"all",
"(",
"throw_away",
")",
"elif",
"self",
".",
"mode",
"==",
"'inclusive'",
":",
"return",
"any",
"(",
"throw_away",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"mode must be one of 'exclusive' or 'inclusive'\"",
")"
] | node is removable only if all of its children are as well. | [
"node",
"is",
"removable",
"only",
"if",
"all",
"of",
"its",
"children",
"are",
"as",
"well",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/asttools/mutators/prune_mutator.py#L17-L30 | train |
apple/turicreate | src/unity/python/turicreate/meta/asttools/mutators/prune_mutator.py | PruneVisitor.reduce | def reduce(self, body):
'''
remove nodes from a list
'''
i = 0
while i < len(body):
stmnt = body[i]
if self.visit(stmnt):
body.pop(i)
else:
i += 1 | python | def reduce(self, body):
'''
remove nodes from a list
'''
i = 0
while i < len(body):
stmnt = body[i]
if self.visit(stmnt):
body.pop(i)
else:
i += 1 | [
"def",
"reduce",
"(",
"self",
",",
"body",
")",
":",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"body",
")",
":",
"stmnt",
"=",
"body",
"[",
"i",
"]",
"if",
"self",
".",
"visit",
"(",
"stmnt",
")",
":",
"body",
".",
"pop",
"(",
"i",
")",
"else",
":",
"i",
"+=",
"1"
] | remove nodes from a list | [
"remove",
"nodes",
"from",
"a",
"list"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/meta/asttools/mutators/prune_mutator.py#L52-L62 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/audio_analysis/audio_analysis.py | load_audio | def load_audio(path, with_path=True, recursive=True, ignore_failure=True, random_order=False):
"""
Loads WAV file(s) from a path.
Parameters
----------
path : str
Path to WAV files to be loaded.
with_path : bool, optional
Indicates whether a path column is added to the returned SFrame.
recursive : bool, optional
Indicates whether ``load_audio`` should do a recursive directory traversal,
or only load audio files directly under ``path``.
ignore_failure : bool, optional
If True, only print warnings for failed files and keep loading the remaining
audio files.
random_order : bool, optional
Load audio files in random order.
Returns
-------
out : SFrame
Returns an SFrame with either an 'audio' column or both an 'audio' and
a 'path' column. The 'audio' column is a column of dictionaries.
Each dictionary contains two items. One item is the sample rate, in
samples per second (int type). The other item will be the data in a numpy
array. If the wav file has a single channel, the array will have a single
dimension. If there are multiple channels, the array will have shape
(L,C) where L is the number of samples and C is the number of channels.
Examples
--------
>>> audio_path = "~/Documents/myAudioFiles/"
>>> audio_sframe = tc.audio_analysis.load_audio(audio_path, recursive=True)
"""
from scipy.io import wavfile as _wavfile
all_wav_files = []
if _fnmatch(path, '*.wav'): # single file
all_wav_files.append(path)
elif recursive:
for (dir_path, _, file_names) in _os.walk(path):
for cur_file in file_names:
if _fnmatch(cur_file, '*.wav'):
all_wav_files.append(dir_path + '/' + cur_file)
else:
all_wav_files = _glob(path + '/*.wav')
if random_order:
_shuffle(all_wav_files)
result_builder = _tc.SFrameBuilder(column_types=[dict, str], column_names=['audio', 'path'])
for cur_file_path in all_wav_files:
try:
sample_rate, data = _wavfile.read(cur_file_path)
except Exception as e:
error_string = "Could not read {}: {}".format(cur_file_path, e)
if not ignore_failure:
raise _ToolkitError(error_string)
else:
print(error_string)
continue
result_builder.append([{'sample_rate': sample_rate, 'data': data}, cur_file_path])
result = result_builder.close()
if not with_path:
del result['path']
return result | python | def load_audio(path, with_path=True, recursive=True, ignore_failure=True, random_order=False):
"""
Loads WAV file(s) from a path.
Parameters
----------
path : str
Path to WAV files to be loaded.
with_path : bool, optional
Indicates whether a path column is added to the returned SFrame.
recursive : bool, optional
Indicates whether ``load_audio`` should do a recursive directory traversal,
or only load audio files directly under ``path``.
ignore_failure : bool, optional
If True, only print warnings for failed files and keep loading the remaining
audio files.
random_order : bool, optional
Load audio files in random order.
Returns
-------
out : SFrame
Returns an SFrame with either an 'audio' column or both an 'audio' and
a 'path' column. The 'audio' column is a column of dictionaries.
Each dictionary contains two items. One item is the sample rate, in
samples per second (int type). The other item will be the data in a numpy
array. If the wav file has a single channel, the array will have a single
dimension. If there are multiple channels, the array will have shape
(L,C) where L is the number of samples and C is the number of channels.
Examples
--------
>>> audio_path = "~/Documents/myAudioFiles/"
>>> audio_sframe = tc.audio_analysis.load_audio(audio_path, recursive=True)
"""
from scipy.io import wavfile as _wavfile
all_wav_files = []
if _fnmatch(path, '*.wav'): # single file
all_wav_files.append(path)
elif recursive:
for (dir_path, _, file_names) in _os.walk(path):
for cur_file in file_names:
if _fnmatch(cur_file, '*.wav'):
all_wav_files.append(dir_path + '/' + cur_file)
else:
all_wav_files = _glob(path + '/*.wav')
if random_order:
_shuffle(all_wav_files)
result_builder = _tc.SFrameBuilder(column_types=[dict, str], column_names=['audio', 'path'])
for cur_file_path in all_wav_files:
try:
sample_rate, data = _wavfile.read(cur_file_path)
except Exception as e:
error_string = "Could not read {}: {}".format(cur_file_path, e)
if not ignore_failure:
raise _ToolkitError(error_string)
else:
print(error_string)
continue
result_builder.append([{'sample_rate': sample_rate, 'data': data}, cur_file_path])
result = result_builder.close()
if not with_path:
del result['path']
return result | [
"def",
"load_audio",
"(",
"path",
",",
"with_path",
"=",
"True",
",",
"recursive",
"=",
"True",
",",
"ignore_failure",
"=",
"True",
",",
"random_order",
"=",
"False",
")",
":",
"from",
"scipy",
".",
"io",
"import",
"wavfile",
"as",
"_wavfile",
"all_wav_files",
"=",
"[",
"]",
"if",
"_fnmatch",
"(",
"path",
",",
"'*.wav'",
")",
":",
"# single file",
"all_wav_files",
".",
"append",
"(",
"path",
")",
"elif",
"recursive",
":",
"for",
"(",
"dir_path",
",",
"_",
",",
"file_names",
")",
"in",
"_os",
".",
"walk",
"(",
"path",
")",
":",
"for",
"cur_file",
"in",
"file_names",
":",
"if",
"_fnmatch",
"(",
"cur_file",
",",
"'*.wav'",
")",
":",
"all_wav_files",
".",
"append",
"(",
"dir_path",
"+",
"'/'",
"+",
"cur_file",
")",
"else",
":",
"all_wav_files",
"=",
"_glob",
"(",
"path",
"+",
"'/*.wav'",
")",
"if",
"random_order",
":",
"_shuffle",
"(",
"all_wav_files",
")",
"result_builder",
"=",
"_tc",
".",
"SFrameBuilder",
"(",
"column_types",
"=",
"[",
"dict",
",",
"str",
"]",
",",
"column_names",
"=",
"[",
"'audio'",
",",
"'path'",
"]",
")",
"for",
"cur_file_path",
"in",
"all_wav_files",
":",
"try",
":",
"sample_rate",
",",
"data",
"=",
"_wavfile",
".",
"read",
"(",
"cur_file_path",
")",
"except",
"Exception",
"as",
"e",
":",
"error_string",
"=",
"\"Could not read {}: {}\"",
".",
"format",
"(",
"cur_file_path",
",",
"e",
")",
"if",
"not",
"ignore_failure",
":",
"raise",
"_ToolkitError",
"(",
"error_string",
")",
"else",
":",
"print",
"(",
"error_string",
")",
"continue",
"result_builder",
".",
"append",
"(",
"[",
"{",
"'sample_rate'",
":",
"sample_rate",
",",
"'data'",
":",
"data",
"}",
",",
"cur_file_path",
"]",
")",
"result",
"=",
"result_builder",
".",
"close",
"(",
")",
"if",
"not",
"with_path",
":",
"del",
"result",
"[",
"'path'",
"]",
"return",
"result"
] | Loads WAV file(s) from a path.
Parameters
----------
path : str
Path to WAV files to be loaded.
with_path : bool, optional
Indicates whether a path column is added to the returned SFrame.
recursive : bool, optional
Indicates whether ``load_audio`` should do a recursive directory traversal,
or only load audio files directly under ``path``.
ignore_failure : bool, optional
If True, only print warnings for failed files and keep loading the remaining
audio files.
random_order : bool, optional
Load audio files in random order.
Returns
-------
out : SFrame
Returns an SFrame with either an 'audio' column or both an 'audio' and
a 'path' column. The 'audio' column is a column of dictionaries.
Each dictionary contains two items. One item is the sample rate, in
samples per second (int type). The other item will be the data in a numpy
array. If the wav file has a single channel, the array will have a single
dimension. If there are multiple channels, the array will have shape
(L,C) where L is the number of samples and C is the number of channels.
Examples
--------
>>> audio_path = "~/Documents/myAudioFiles/"
>>> audio_sframe = tc.audio_analysis.load_audio(audio_path, recursive=True) | [
"Loads",
"WAV",
"file",
"(",
"s",
")",
"from",
"a",
"path",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/audio_analysis/audio_analysis.py#L21-L95 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/symbol_database.py | SymbolDatabase.RegisterMessage | def RegisterMessage(self, message):
"""Registers the given message type in the local database.
Calls to GetSymbol() and GetMessages() will return messages registered here.
Args:
message: a message.Message, to be registered.
Returns:
The provided message.
"""
desc = message.DESCRIPTOR
self._classes[desc.full_name] = message
self.pool.AddDescriptor(desc)
return message | python | def RegisterMessage(self, message):
"""Registers the given message type in the local database.
Calls to GetSymbol() and GetMessages() will return messages registered here.
Args:
message: a message.Message, to be registered.
Returns:
The provided message.
"""
desc = message.DESCRIPTOR
self._classes[desc.full_name] = message
self.pool.AddDescriptor(desc)
return message | [
"def",
"RegisterMessage",
"(",
"self",
",",
"message",
")",
":",
"desc",
"=",
"message",
".",
"DESCRIPTOR",
"self",
".",
"_classes",
"[",
"desc",
".",
"full_name",
"]",
"=",
"message",
"self",
".",
"pool",
".",
"AddDescriptor",
"(",
"desc",
")",
"return",
"message"
] | Registers the given message type in the local database.
Calls to GetSymbol() and GetMessages() will return messages registered here.
Args:
message: a message.Message, to be registered.
Returns:
The provided message. | [
"Registers",
"the",
"given",
"message",
"type",
"in",
"the",
"local",
"database",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/symbol_database.py#L68-L83 | train |
apple/turicreate | src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/symbol_database.py | SymbolDatabase.GetMessages | def GetMessages(self, files):
# TODO(amauryfa): Fix the differences with MessageFactory.
"""Gets all registered messages from a specified file.
Only messages already created and registered will be returned; (this is the
case for imported _pb2 modules)
But unlike MessageFactory, this version also returns already defined nested
messages, but does not register any message extensions.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes.
Raises:
KeyError: if a file could not be found.
"""
def _GetAllMessageNames(desc):
"""Walk a message Descriptor and recursively yields all message names."""
yield desc.full_name
for msg_desc in desc.nested_types:
for full_name in _GetAllMessageNames(msg_desc):
yield full_name
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for msg_desc in file_desc.message_types_by_name.values():
for full_name in _GetAllMessageNames(msg_desc):
try:
result[full_name] = self._classes[full_name]
except KeyError:
# This descriptor has no registered class, skip it.
pass
return result | python | def GetMessages(self, files):
# TODO(amauryfa): Fix the differences with MessageFactory.
"""Gets all registered messages from a specified file.
Only messages already created and registered will be returned; (this is the
case for imported _pb2 modules)
But unlike MessageFactory, this version also returns already defined nested
messages, but does not register any message extensions.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes.
Raises:
KeyError: if a file could not be found.
"""
def _GetAllMessageNames(desc):
"""Walk a message Descriptor and recursively yields all message names."""
yield desc.full_name
for msg_desc in desc.nested_types:
for full_name in _GetAllMessageNames(msg_desc):
yield full_name
result = {}
for file_name in files:
file_desc = self.pool.FindFileByName(file_name)
for msg_desc in file_desc.message_types_by_name.values():
for full_name in _GetAllMessageNames(msg_desc):
try:
result[full_name] = self._classes[full_name]
except KeyError:
# This descriptor has no registered class, skip it.
pass
return result | [
"def",
"GetMessages",
"(",
"self",
",",
"files",
")",
":",
"# TODO(amauryfa): Fix the differences with MessageFactory.",
"def",
"_GetAllMessageNames",
"(",
"desc",
")",
":",
"\"\"\"Walk a message Descriptor and recursively yields all message names.\"\"\"",
"yield",
"desc",
".",
"full_name",
"for",
"msg_desc",
"in",
"desc",
".",
"nested_types",
":",
"for",
"full_name",
"in",
"_GetAllMessageNames",
"(",
"msg_desc",
")",
":",
"yield",
"full_name",
"result",
"=",
"{",
"}",
"for",
"file_name",
"in",
"files",
":",
"file_desc",
"=",
"self",
".",
"pool",
".",
"FindFileByName",
"(",
"file_name",
")",
"for",
"msg_desc",
"in",
"file_desc",
".",
"message_types_by_name",
".",
"values",
"(",
")",
":",
"for",
"full_name",
"in",
"_GetAllMessageNames",
"(",
"msg_desc",
")",
":",
"try",
":",
"result",
"[",
"full_name",
"]",
"=",
"self",
".",
"_classes",
"[",
"full_name",
"]",
"except",
"KeyError",
":",
"# This descriptor has no registered class, skip it.",
"pass",
"return",
"result"
] | Gets all registered messages from a specified file.
Only messages already created and registered will be returned; (this is the
case for imported _pb2 modules)
But unlike MessageFactory, this version also returns already defined nested
messages, but does not register any message extensions.
Args:
files: The file names to extract messages from.
Returns:
A dictionary mapping proto names to the message classes.
Raises:
KeyError: if a file could not be found. | [
"Gets",
"all",
"registered",
"messages",
"from",
"a",
"specified",
"file",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/symbol_database.py#L137-L173 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/object_detector/util/_visualization.py | _string_hash | def _string_hash(s):
"""String hash (djb2) with consistency between py2/py3 and persistency between runs (unlike `hash`)."""
h = 5381
for c in s:
h = h * 33 + ord(c)
return h | python | def _string_hash(s):
"""String hash (djb2) with consistency between py2/py3 and persistency between runs (unlike `hash`)."""
h = 5381
for c in s:
h = h * 33 + ord(c)
return h | [
"def",
"_string_hash",
"(",
"s",
")",
":",
"h",
"=",
"5381",
"for",
"c",
"in",
"s",
":",
"h",
"=",
"h",
"*",
"33",
"+",
"ord",
"(",
"c",
")",
"return",
"h"
] | String hash (djb2) with consistency between py2/py3 and persistency between runs (unlike `hash`). | [
"String",
"hash",
"(",
"djb2",
")",
"with",
"consistency",
"between",
"py2",
"/",
"py3",
"and",
"persistency",
"between",
"runs",
"(",
"unlike",
"hash",
")",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/util/_visualization.py#L14-L19 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/object_detector/util/_visualization.py | draw_bounding_boxes | def draw_bounding_boxes(images, annotations, confidence_threshold=0):
"""
Visualizes bounding boxes (ground truth or predictions) by
returning annotated copies of the images.
Parameters
----------
images: SArray or Image
An `SArray` of type `Image`. A single `Image` instance may also be
given.
annotations: SArray or list
An `SArray` of annotations (either output from the
`ObjectDetector.predict` function or ground truth). A single list of
annotations may also be given, provided that it is coupled with a
single image.
confidence_threshold: float
Confidence threshold can limit the number of boxes to draw. By
default, this is set to 0, since the prediction may have already pruned
with an appropriate confidence threshold.
Returns
-------
annotated_images: SArray or Image
Similar to the input `images`, except the images are decorated with
boxes to visualize the object instances.
See also
--------
unstack_annotations
"""
_numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0)
from PIL import Image
def draw_single_image(row):
image = row['image']
anns = row['annotations']
if anns == None:
anns = []
elif type(anns) == dict:
anns = [anns]
pil_img = Image.fromarray(image.pixel_data)
_annotate_image(pil_img, anns, confidence_threshold=confidence_threshold)
image = _np.array(pil_img)
FORMAT_RAW = 2
annotated_image = _tc.Image(_image_data=image.tobytes(),
_width=image.shape[1],
_height=image.shape[0],
_channels=image.shape[2],
_format_enum=FORMAT_RAW,
_image_data_size=image.size)
return annotated_image
if isinstance(images, _tc.Image) and isinstance(annotations, list):
return draw_single_image({'image': images, 'annotations': annotations})
else:
return (_tc.SFrame({'image': images, 'annotations': annotations})
.apply(draw_single_image)) | python | def draw_bounding_boxes(images, annotations, confidence_threshold=0):
"""
Visualizes bounding boxes (ground truth or predictions) by
returning annotated copies of the images.
Parameters
----------
images: SArray or Image
An `SArray` of type `Image`. A single `Image` instance may also be
given.
annotations: SArray or list
An `SArray` of annotations (either output from the
`ObjectDetector.predict` function or ground truth). A single list of
annotations may also be given, provided that it is coupled with a
single image.
confidence_threshold: float
Confidence threshold can limit the number of boxes to draw. By
default, this is set to 0, since the prediction may have already pruned
with an appropriate confidence threshold.
Returns
-------
annotated_images: SArray or Image
Similar to the input `images`, except the images are decorated with
boxes to visualize the object instances.
See also
--------
unstack_annotations
"""
_numeric_param_check_range('confidence_threshold', confidence_threshold, 0.0, 1.0)
from PIL import Image
def draw_single_image(row):
image = row['image']
anns = row['annotations']
if anns == None:
anns = []
elif type(anns) == dict:
anns = [anns]
pil_img = Image.fromarray(image.pixel_data)
_annotate_image(pil_img, anns, confidence_threshold=confidence_threshold)
image = _np.array(pil_img)
FORMAT_RAW = 2
annotated_image = _tc.Image(_image_data=image.tobytes(),
_width=image.shape[1],
_height=image.shape[0],
_channels=image.shape[2],
_format_enum=FORMAT_RAW,
_image_data_size=image.size)
return annotated_image
if isinstance(images, _tc.Image) and isinstance(annotations, list):
return draw_single_image({'image': images, 'annotations': annotations})
else:
return (_tc.SFrame({'image': images, 'annotations': annotations})
.apply(draw_single_image)) | [
"def",
"draw_bounding_boxes",
"(",
"images",
",",
"annotations",
",",
"confidence_threshold",
"=",
"0",
")",
":",
"_numeric_param_check_range",
"(",
"'confidence_threshold'",
",",
"confidence_threshold",
",",
"0.0",
",",
"1.0",
")",
"from",
"PIL",
"import",
"Image",
"def",
"draw_single_image",
"(",
"row",
")",
":",
"image",
"=",
"row",
"[",
"'image'",
"]",
"anns",
"=",
"row",
"[",
"'annotations'",
"]",
"if",
"anns",
"==",
"None",
":",
"anns",
"=",
"[",
"]",
"elif",
"type",
"(",
"anns",
")",
"==",
"dict",
":",
"anns",
"=",
"[",
"anns",
"]",
"pil_img",
"=",
"Image",
".",
"fromarray",
"(",
"image",
".",
"pixel_data",
")",
"_annotate_image",
"(",
"pil_img",
",",
"anns",
",",
"confidence_threshold",
"=",
"confidence_threshold",
")",
"image",
"=",
"_np",
".",
"array",
"(",
"pil_img",
")",
"FORMAT_RAW",
"=",
"2",
"annotated_image",
"=",
"_tc",
".",
"Image",
"(",
"_image_data",
"=",
"image",
".",
"tobytes",
"(",
")",
",",
"_width",
"=",
"image",
".",
"shape",
"[",
"1",
"]",
",",
"_height",
"=",
"image",
".",
"shape",
"[",
"0",
"]",
",",
"_channels",
"=",
"image",
".",
"shape",
"[",
"2",
"]",
",",
"_format_enum",
"=",
"FORMAT_RAW",
",",
"_image_data_size",
"=",
"image",
".",
"size",
")",
"return",
"annotated_image",
"if",
"isinstance",
"(",
"images",
",",
"_tc",
".",
"Image",
")",
"and",
"isinstance",
"(",
"annotations",
",",
"list",
")",
":",
"return",
"draw_single_image",
"(",
"{",
"'image'",
":",
"images",
",",
"'annotations'",
":",
"annotations",
"}",
")",
"else",
":",
"return",
"(",
"_tc",
".",
"SFrame",
"(",
"{",
"'image'",
":",
"images",
",",
"'annotations'",
":",
"annotations",
"}",
")",
".",
"apply",
"(",
"draw_single_image",
")",
")"
] | Visualizes bounding boxes (ground truth or predictions) by
returning annotated copies of the images.
Parameters
----------
images: SArray or Image
An `SArray` of type `Image`. A single `Image` instance may also be
given.
annotations: SArray or list
An `SArray` of annotations (either output from the
`ObjectDetector.predict` function or ground truth). A single list of
annotations may also be given, provided that it is coupled with a
single image.
confidence_threshold: float
Confidence threshold can limit the number of boxes to draw. By
default, this is set to 0, since the prediction may have already pruned
with an appropriate confidence threshold.
Returns
-------
annotated_images: SArray or Image
Similar to the input `images`, except the images are decorated with
boxes to visualize the object instances.
See also
--------
unstack_annotations | [
"Visualizes",
"bounding",
"boxes",
"(",
"ground",
"truth",
"or",
"predictions",
")",
"by",
"returning",
"annotated",
"copies",
"of",
"the",
"images",
"."
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/object_detector/util/_visualization.py#L94-L151 | train |
apple/turicreate | src/unity/python/turicreate/toolkits/_supervised_learning.py | create | def create(dataset, target, model_name, features=None,
validation_set='auto', distributed='auto',
verbose=True, seed=None, **kwargs):
"""
Create a :class:`~turicreate.toolkits.SupervisedLearningModel`,
This is generic function that allows you to create any model that
implements SupervisedLearningModel This function is normally not called, call
specific model's create function instead
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be 0 or 1, of integer type.
model_name : string
Name of the model
features : list[string], optional
List of feature names used by feature column
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
distributed: env
The distributed environment
verbose : boolean
whether print out messages during training
seed : int, optional
Seed for random number generation. Set this value to ensure that the
same model is created every time.
kwargs : dict
Additional parameter options that can be passed
"""
# Perform error-checking and trim inputs to specified columns
dataset, validation_set = _validate_data(dataset, target, features,
validation_set)
# Sample a validation set from the training data if requested
if isinstance(validation_set, str):
assert validation_set == 'auto'
if dataset.num_rows() >= 100:
if verbose:
print_validation_track_notification()
dataset, validation_set = dataset.random_split(.95, seed=seed, exact=True)
else:
validation_set = _turicreate.SFrame()
elif validation_set is None:
validation_set = _turicreate.SFrame()
# Sanitize model-specific options
options = {k.lower(): kwargs[k] for k in kwargs}
# Create a model instance and train it
model = _turicreate.extensions.__dict__[model_name]()
with QuietProgress(verbose):
model.train(dataset, target, validation_set, options)
return SupervisedLearningModel(model, model_name) | python | def create(dataset, target, model_name, features=None,
validation_set='auto', distributed='auto',
verbose=True, seed=None, **kwargs):
"""
Create a :class:`~turicreate.toolkits.SupervisedLearningModel`,
This is generic function that allows you to create any model that
implements SupervisedLearningModel This function is normally not called, call
specific model's create function instead
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be 0 or 1, of integer type.
model_name : string
Name of the model
features : list[string], optional
List of feature names used by feature column
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
distributed: env
The distributed environment
verbose : boolean
whether print out messages during training
seed : int, optional
Seed for random number generation. Set this value to ensure that the
same model is created every time.
kwargs : dict
Additional parameter options that can be passed
"""
# Perform error-checking and trim inputs to specified columns
dataset, validation_set = _validate_data(dataset, target, features,
validation_set)
# Sample a validation set from the training data if requested
if isinstance(validation_set, str):
assert validation_set == 'auto'
if dataset.num_rows() >= 100:
if verbose:
print_validation_track_notification()
dataset, validation_set = dataset.random_split(.95, seed=seed, exact=True)
else:
validation_set = _turicreate.SFrame()
elif validation_set is None:
validation_set = _turicreate.SFrame()
# Sanitize model-specific options
options = {k.lower(): kwargs[k] for k in kwargs}
# Create a model instance and train it
model = _turicreate.extensions.__dict__[model_name]()
with QuietProgress(verbose):
model.train(dataset, target, validation_set, options)
return SupervisedLearningModel(model, model_name) | [
"def",
"create",
"(",
"dataset",
",",
"target",
",",
"model_name",
",",
"features",
"=",
"None",
",",
"validation_set",
"=",
"'auto'",
",",
"distributed",
"=",
"'auto'",
",",
"verbose",
"=",
"True",
",",
"seed",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Perform error-checking and trim inputs to specified columns",
"dataset",
",",
"validation_set",
"=",
"_validate_data",
"(",
"dataset",
",",
"target",
",",
"features",
",",
"validation_set",
")",
"# Sample a validation set from the training data if requested",
"if",
"isinstance",
"(",
"validation_set",
",",
"str",
")",
":",
"assert",
"validation_set",
"==",
"'auto'",
"if",
"dataset",
".",
"num_rows",
"(",
")",
">=",
"100",
":",
"if",
"verbose",
":",
"print_validation_track_notification",
"(",
")",
"dataset",
",",
"validation_set",
"=",
"dataset",
".",
"random_split",
"(",
".95",
",",
"seed",
"=",
"seed",
",",
"exact",
"=",
"True",
")",
"else",
":",
"validation_set",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"elif",
"validation_set",
"is",
"None",
":",
"validation_set",
"=",
"_turicreate",
".",
"SFrame",
"(",
")",
"# Sanitize model-specific options",
"options",
"=",
"{",
"k",
".",
"lower",
"(",
")",
":",
"kwargs",
"[",
"k",
"]",
"for",
"k",
"in",
"kwargs",
"}",
"# Create a model instance and train it",
"model",
"=",
"_turicreate",
".",
"extensions",
".",
"__dict__",
"[",
"model_name",
"]",
"(",
")",
"with",
"QuietProgress",
"(",
"verbose",
")",
":",
"model",
".",
"train",
"(",
"dataset",
",",
"target",
",",
"validation_set",
",",
"options",
")",
"return",
"SupervisedLearningModel",
"(",
"model",
",",
"model_name",
")"
] | Create a :class:`~turicreate.toolkits.SupervisedLearningModel`,
This is generic function that allows you to create any model that
implements SupervisedLearningModel This function is normally not called, call
specific model's create function instead
Parameters
----------
dataset : SFrame
Dataset for training the model.
target : string
Name of the column containing the target variable. The values in this
column must be 0 or 1, of integer type.
model_name : string
Name of the model
features : list[string], optional
List of feature names used by feature column
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
For each row of the progress table, the chosen metrics are computed
for both the provided training dataset and the validation_set. The
format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
distributed: env
The distributed environment
verbose : boolean
whether print out messages during training
seed : int, optional
Seed for random number generation. Set this value to ensure that the
same model is created every time.
kwargs : dict
Additional parameter options that can be passed | [
"Create",
"a",
":",
"class",
":",
"~turicreate",
".",
"toolkits",
".",
"SupervisedLearningModel"
] | 74514c3f99e25b46f22c6e02977fe3da69221c2e | https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/unity/python/turicreate/toolkits/_supervised_learning.py#L261-L334 | train |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.