id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 51 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
233,900 | maxpumperla/elephas | elephas/utils/sockets.py | determine_master | def determine_master(port=4000):
"""Determine address of master so that workers
can connect to it. If the environment variable
SPARK_LOCAL_IP is set, that address will be used.
:param port: port on which the application runs
:return: Master address
Example usage:
SPARK_LOCAL_IP=127.0.0.1 spark-submit --master \
local[8] examples/mllib_mlp.py
"""
if os.environ.get('SPARK_LOCAL_IP'):
return os.environ['SPARK_LOCAL_IP'] + ":" + str(port)
else:
return gethostbyname(gethostname()) + ":" + str(port) | python | def determine_master(port=4000):
if os.environ.get('SPARK_LOCAL_IP'):
return os.environ['SPARK_LOCAL_IP'] + ":" + str(port)
else:
return gethostbyname(gethostname()) + ":" + str(port) | [
"def",
"determine_master",
"(",
"port",
"=",
"4000",
")",
":",
"if",
"os",
".",
"environ",
".",
"get",
"(",
"'SPARK_LOCAL_IP'",
")",
":",
"return",
"os",
".",
"environ",
"[",
"'SPARK_LOCAL_IP'",
"]",
"+",
"\":\"",
"+",
"str",
"(",
"port",
")",
"else",
... | Determine address of master so that workers
can connect to it. If the environment variable
SPARK_LOCAL_IP is set, that address will be used.
:param port: port on which the application runs
:return: Master address
Example usage:
SPARK_LOCAL_IP=127.0.0.1 spark-submit --master \
local[8] examples/mllib_mlp.py | [
"Determine",
"address",
"of",
"master",
"so",
"that",
"workers",
"can",
"connect",
"to",
"it",
".",
"If",
"the",
"environment",
"variable",
"SPARK_LOCAL_IP",
"is",
"set",
"that",
"address",
"will",
"be",
"used",
"."
] | 84605acdc9564673c487637dcb27f5def128bcc7 | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/sockets.py#L6-L21 |
233,901 | maxpumperla/elephas | elephas/utils/sockets.py | _receive_all | def _receive_all(socket, num_bytes):
"""Reads `num_bytes` bytes from the specified socket.
:param socket: open socket instance
:param num_bytes: number of bytes to read
:return: received data
"""
buffer = ''
buffer_size = 0
bytes_left = num_bytes
while buffer_size < num_bytes:
data = socket.recv(bytes_left)
delta = len(data)
buffer_size += delta
bytes_left -= delta
buffer += data
return buffer | python | def _receive_all(socket, num_bytes):
buffer = ''
buffer_size = 0
bytes_left = num_bytes
while buffer_size < num_bytes:
data = socket.recv(bytes_left)
delta = len(data)
buffer_size += delta
bytes_left -= delta
buffer += data
return buffer | [
"def",
"_receive_all",
"(",
"socket",
",",
"num_bytes",
")",
":",
"buffer",
"=",
"''",
"buffer_size",
"=",
"0",
"bytes_left",
"=",
"num_bytes",
"while",
"buffer_size",
"<",
"num_bytes",
":",
"data",
"=",
"socket",
".",
"recv",
"(",
"bytes_left",
")",
"delt... | Reads `num_bytes` bytes from the specified socket.
:param socket: open socket instance
:param num_bytes: number of bytes to read
:return: received data | [
"Reads",
"num_bytes",
"bytes",
"from",
"the",
"specified",
"socket",
"."
] | 84605acdc9564673c487637dcb27f5def128bcc7 | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/sockets.py#L24-L42 |
233,902 | maxpumperla/elephas | elephas/utils/sockets.py | receive | def receive(socket, num_bytes=20):
"""Receive data frame from open socket.
:param socket: open socket instance
:param num_bytes: number of bytes to read
:return: received data
"""
length = int(_receive_all(socket, num_bytes).decode())
serialized_data = _receive_all(socket, length)
return pickle.loads(serialized_data) | python | def receive(socket, num_bytes=20):
length = int(_receive_all(socket, num_bytes).decode())
serialized_data = _receive_all(socket, length)
return pickle.loads(serialized_data) | [
"def",
"receive",
"(",
"socket",
",",
"num_bytes",
"=",
"20",
")",
":",
"length",
"=",
"int",
"(",
"_receive_all",
"(",
"socket",
",",
"num_bytes",
")",
".",
"decode",
"(",
")",
")",
"serialized_data",
"=",
"_receive_all",
"(",
"socket",
",",
"length",
... | Receive data frame from open socket.
:param socket: open socket instance
:param num_bytes: number of bytes to read
:return: received data | [
"Receive",
"data",
"frame",
"from",
"open",
"socket",
"."
] | 84605acdc9564673c487637dcb27f5def128bcc7 | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/sockets.py#L45-L55 |
233,903 | maxpumperla/elephas | elephas/utils/sockets.py | send | def send(socket, data, num_bytes=20):
"""Send data to specified socket.
:param socket: open socket instance
:param data: data to send
:param num_bytes: number of bytes to read
:return: received data
"""
pickled_data = pickle.dumps(data, -1)
length = str(len(pickled_data)).zfill(num_bytes)
socket.sendall(length.encode())
socket.sendall(pickled_data) | python | def send(socket, data, num_bytes=20):
pickled_data = pickle.dumps(data, -1)
length = str(len(pickled_data)).zfill(num_bytes)
socket.sendall(length.encode())
socket.sendall(pickled_data) | [
"def",
"send",
"(",
"socket",
",",
"data",
",",
"num_bytes",
"=",
"20",
")",
":",
"pickled_data",
"=",
"pickle",
".",
"dumps",
"(",
"data",
",",
"-",
"1",
")",
"length",
"=",
"str",
"(",
"len",
"(",
"pickled_data",
")",
")",
".",
"zfill",
"(",
"n... | Send data to specified socket.
:param socket: open socket instance
:param data: data to send
:param num_bytes: number of bytes to read
:return: received data | [
"Send",
"data",
"to",
"specified",
"socket",
"."
] | 84605acdc9564673c487637dcb27f5def128bcc7 | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/sockets.py#L58-L71 |
233,904 | maxpumperla/elephas | elephas/utils/rdd_utils.py | to_java_rdd | def to_java_rdd(jsc, features, labels, batch_size):
"""Convert numpy features and labels into a JavaRDD of
DL4J DataSet type.
:param jsc: JavaSparkContext from pyjnius
:param features: numpy array with features
:param labels: numpy array with labels:
:return: JavaRDD<DataSet>
"""
data_sets = java_classes.ArrayList()
num_batches = int(len(features) / batch_size)
for i in range(num_batches):
xi = ndarray(features[:batch_size].copy())
yi = ndarray(labels[:batch_size].copy())
data_set = java_classes.DataSet(xi.array, yi.array)
data_sets.add(data_set)
features = features[batch_size:]
labels = labels[batch_size:]
return jsc.parallelize(data_sets) | python | def to_java_rdd(jsc, features, labels, batch_size):
data_sets = java_classes.ArrayList()
num_batches = int(len(features) / batch_size)
for i in range(num_batches):
xi = ndarray(features[:batch_size].copy())
yi = ndarray(labels[:batch_size].copy())
data_set = java_classes.DataSet(xi.array, yi.array)
data_sets.add(data_set)
features = features[batch_size:]
labels = labels[batch_size:]
return jsc.parallelize(data_sets) | [
"def",
"to_java_rdd",
"(",
"jsc",
",",
"features",
",",
"labels",
",",
"batch_size",
")",
":",
"data_sets",
"=",
"java_classes",
".",
"ArrayList",
"(",
")",
"num_batches",
"=",
"int",
"(",
"len",
"(",
"features",
")",
"/",
"batch_size",
")",
"for",
"i",
... | Convert numpy features and labels into a JavaRDD of
DL4J DataSet type.
:param jsc: JavaSparkContext from pyjnius
:param features: numpy array with features
:param labels: numpy array with labels:
:return: JavaRDD<DataSet> | [
"Convert",
"numpy",
"features",
"and",
"labels",
"into",
"a",
"JavaRDD",
"of",
"DL4J",
"DataSet",
"type",
"."
] | 84605acdc9564673c487637dcb27f5def128bcc7 | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/rdd_utils.py#L16-L35 |
233,905 | maxpumperla/elephas | elephas/utils/rdd_utils.py | to_simple_rdd | def to_simple_rdd(sc, features, labels):
"""Convert numpy arrays of features and labels into
an RDD of pairs.
:param sc: Spark context
:param features: numpy array with features
:param labels: numpy array with labels
:return: Spark RDD with feature-label pairs
"""
pairs = [(x, y) for x, y in zip(features, labels)]
return sc.parallelize(pairs) | python | def to_simple_rdd(sc, features, labels):
pairs = [(x, y) for x, y in zip(features, labels)]
return sc.parallelize(pairs) | [
"def",
"to_simple_rdd",
"(",
"sc",
",",
"features",
",",
"labels",
")",
":",
"pairs",
"=",
"[",
"(",
"x",
",",
"y",
")",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"features",
",",
"labels",
")",
"]",
"return",
"sc",
".",
"parallelize",
"(",
"pairs"... | Convert numpy arrays of features and labels into
an RDD of pairs.
:param sc: Spark context
:param features: numpy array with features
:param labels: numpy array with labels
:return: Spark RDD with feature-label pairs | [
"Convert",
"numpy",
"arrays",
"of",
"features",
"and",
"labels",
"into",
"an",
"RDD",
"of",
"pairs",
"."
] | 84605acdc9564673c487637dcb27f5def128bcc7 | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/rdd_utils.py#L38-L48 |
233,906 | maxpumperla/elephas | elephas/utils/rdd_utils.py | to_labeled_point | def to_labeled_point(sc, features, labels, categorical=False):
"""Convert numpy arrays of features and labels into
a LabeledPoint RDD for MLlib and ML integration.
:param sc: Spark context
:param features: numpy array with features
:param labels: numpy array with labels
:param categorical: boolean, whether labels are already one-hot encoded or not
:return: LabeledPoint RDD with features and labels
"""
labeled_points = []
for x, y in zip(features, labels):
if categorical:
lp = LabeledPoint(np.argmax(y), to_vector(x))
else:
lp = LabeledPoint(y, to_vector(x))
labeled_points.append(lp)
return sc.parallelize(labeled_points) | python | def to_labeled_point(sc, features, labels, categorical=False):
labeled_points = []
for x, y in zip(features, labels):
if categorical:
lp = LabeledPoint(np.argmax(y), to_vector(x))
else:
lp = LabeledPoint(y, to_vector(x))
labeled_points.append(lp)
return sc.parallelize(labeled_points) | [
"def",
"to_labeled_point",
"(",
"sc",
",",
"features",
",",
"labels",
",",
"categorical",
"=",
"False",
")",
":",
"labeled_points",
"=",
"[",
"]",
"for",
"x",
",",
"y",
"in",
"zip",
"(",
"features",
",",
"labels",
")",
":",
"if",
"categorical",
":",
... | Convert numpy arrays of features and labels into
a LabeledPoint RDD for MLlib and ML integration.
:param sc: Spark context
:param features: numpy array with features
:param labels: numpy array with labels
:param categorical: boolean, whether labels are already one-hot encoded or not
:return: LabeledPoint RDD with features and labels | [
"Convert",
"numpy",
"arrays",
"of",
"features",
"and",
"labels",
"into",
"a",
"LabeledPoint",
"RDD",
"for",
"MLlib",
"and",
"ML",
"integration",
"."
] | 84605acdc9564673c487637dcb27f5def128bcc7 | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/rdd_utils.py#L51-L68 |
233,907 | maxpumperla/elephas | elephas/utils/rdd_utils.py | from_labeled_point | def from_labeled_point(rdd, categorical=False, nb_classes=None):
"""Convert a LabeledPoint RDD back to a pair of numpy arrays
:param rdd: LabeledPoint RDD
:param categorical: boolean, if labels should be one-hot encode when returned
:param nb_classes: optional int, indicating the number of class labels
:return: pair of numpy arrays, features and labels
"""
features = np.asarray(
rdd.map(lambda lp: from_vector(lp.features)).collect())
labels = np.asarray(rdd.map(lambda lp: lp.label).collect(), dtype='int32')
if categorical:
if not nb_classes:
nb_classes = np.max(labels) + 1
temp = np.zeros((len(labels), nb_classes))
for i, label in enumerate(labels):
temp[i, label] = 1.
labels = temp
return features, labels | python | def from_labeled_point(rdd, categorical=False, nb_classes=None):
features = np.asarray(
rdd.map(lambda lp: from_vector(lp.features)).collect())
labels = np.asarray(rdd.map(lambda lp: lp.label).collect(), dtype='int32')
if categorical:
if not nb_classes:
nb_classes = np.max(labels) + 1
temp = np.zeros((len(labels), nb_classes))
for i, label in enumerate(labels):
temp[i, label] = 1.
labels = temp
return features, labels | [
"def",
"from_labeled_point",
"(",
"rdd",
",",
"categorical",
"=",
"False",
",",
"nb_classes",
"=",
"None",
")",
":",
"features",
"=",
"np",
".",
"asarray",
"(",
"rdd",
".",
"map",
"(",
"lambda",
"lp",
":",
"from_vector",
"(",
"lp",
".",
"features",
")"... | Convert a LabeledPoint RDD back to a pair of numpy arrays
:param rdd: LabeledPoint RDD
:param categorical: boolean, if labels should be one-hot encode when returned
:param nb_classes: optional int, indicating the number of class labels
:return: pair of numpy arrays, features and labels | [
"Convert",
"a",
"LabeledPoint",
"RDD",
"back",
"to",
"a",
"pair",
"of",
"numpy",
"arrays"
] | 84605acdc9564673c487637dcb27f5def128bcc7 | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/rdd_utils.py#L71-L89 |
233,908 | maxpumperla/elephas | elephas/utils/rdd_utils.py | encode_label | def encode_label(label, nb_classes):
"""One-hot encoding of a single label
:param label: class label (int or double without floating point digits)
:param nb_classes: int, number of total classes
:return: one-hot encoded vector
"""
encoded = np.zeros(nb_classes)
encoded[int(label)] = 1.
return encoded | python | def encode_label(label, nb_classes):
encoded = np.zeros(nb_classes)
encoded[int(label)] = 1.
return encoded | [
"def",
"encode_label",
"(",
"label",
",",
"nb_classes",
")",
":",
"encoded",
"=",
"np",
".",
"zeros",
"(",
"nb_classes",
")",
"encoded",
"[",
"int",
"(",
"label",
")",
"]",
"=",
"1.",
"return",
"encoded"
] | One-hot encoding of a single label
:param label: class label (int or double without floating point digits)
:param nb_classes: int, number of total classes
:return: one-hot encoded vector | [
"One",
"-",
"hot",
"encoding",
"of",
"a",
"single",
"label"
] | 84605acdc9564673c487637dcb27f5def128bcc7 | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/rdd_utils.py#L92-L101 |
233,909 | maxpumperla/elephas | elephas/utils/rdd_utils.py | lp_to_simple_rdd | def lp_to_simple_rdd(lp_rdd, categorical=False, nb_classes=None):
"""Convert a LabeledPoint RDD into an RDD of feature-label pairs
:param lp_rdd: LabeledPoint RDD of features and labels
:param categorical: boolean, if labels should be one-hot encode when returned
:param nb_classes: int, number of total classes
:return: Spark RDD with feature-label pairs
"""
if categorical:
if not nb_classes:
labels = np.asarray(lp_rdd.map(
lambda lp: lp.label).collect(), dtype='int32')
nb_classes = np.max(labels) + 1
rdd = lp_rdd.map(lambda lp: (from_vector(lp.features),
encode_label(lp.label, nb_classes)))
else:
rdd = lp_rdd.map(lambda lp: (from_vector(lp.features), lp.label))
return rdd | python | def lp_to_simple_rdd(lp_rdd, categorical=False, nb_classes=None):
if categorical:
if not nb_classes:
labels = np.asarray(lp_rdd.map(
lambda lp: lp.label).collect(), dtype='int32')
nb_classes = np.max(labels) + 1
rdd = lp_rdd.map(lambda lp: (from_vector(lp.features),
encode_label(lp.label, nb_classes)))
else:
rdd = lp_rdd.map(lambda lp: (from_vector(lp.features), lp.label))
return rdd | [
"def",
"lp_to_simple_rdd",
"(",
"lp_rdd",
",",
"categorical",
"=",
"False",
",",
"nb_classes",
"=",
"None",
")",
":",
"if",
"categorical",
":",
"if",
"not",
"nb_classes",
":",
"labels",
"=",
"np",
".",
"asarray",
"(",
"lp_rdd",
".",
"map",
"(",
"lambda",... | Convert a LabeledPoint RDD into an RDD of feature-label pairs
:param lp_rdd: LabeledPoint RDD of features and labels
:param categorical: boolean, if labels should be one-hot encode when returned
:param nb_classes: int, number of total classes
:return: Spark RDD with feature-label pairs | [
"Convert",
"a",
"LabeledPoint",
"RDD",
"into",
"an",
"RDD",
"of",
"feature",
"-",
"label",
"pairs"
] | 84605acdc9564673c487637dcb27f5def128bcc7 | https://github.com/maxpumperla/elephas/blob/84605acdc9564673c487637dcb27f5def128bcc7/elephas/utils/rdd_utils.py#L104-L121 |
233,910 | lebinh/ngxtop | ngxtop/ngxtop.py | follow | def follow(the_file):
"""
Follow a given file and yield new lines when they are available, like `tail -f`.
"""
with open(the_file) as f:
f.seek(0, 2) # seek to eof
while True:
line = f.readline()
if not line:
time.sleep(0.1) # sleep briefly before trying again
continue
yield line | python | def follow(the_file):
with open(the_file) as f:
f.seek(0, 2) # seek to eof
while True:
line = f.readline()
if not line:
time.sleep(0.1) # sleep briefly before trying again
continue
yield line | [
"def",
"follow",
"(",
"the_file",
")",
":",
"with",
"open",
"(",
"the_file",
")",
"as",
"f",
":",
"f",
".",
"seek",
"(",
"0",
",",
"2",
")",
"# seek to eof",
"while",
"True",
":",
"line",
"=",
"f",
".",
"readline",
"(",
")",
"if",
"not",
"line",
... | Follow a given file and yield new lines when they are available, like `tail -f`. | [
"Follow",
"a",
"given",
"file",
"and",
"yield",
"new",
"lines",
"when",
"they",
"are",
"available",
"like",
"tail",
"-",
"f",
"."
] | 170caa1cd899de051ab961a3e46f7ecfbfed7764 | https://github.com/lebinh/ngxtop/blob/170caa1cd899de051ab961a3e46f7ecfbfed7764/ngxtop/ngxtop.py#L116-L127 |
233,911 | lebinh/ngxtop | ngxtop/ngxtop.py | map_field | def map_field(field, func, dict_sequence):
"""
Apply given function to value of given key in every dictionary in sequence and
set the result as new value for that key.
"""
for item in dict_sequence:
try:
item[field] = func(item.get(field, None))
yield item
except ValueError:
pass | python | def map_field(field, func, dict_sequence):
for item in dict_sequence:
try:
item[field] = func(item.get(field, None))
yield item
except ValueError:
pass | [
"def",
"map_field",
"(",
"field",
",",
"func",
",",
"dict_sequence",
")",
":",
"for",
"item",
"in",
"dict_sequence",
":",
"try",
":",
"item",
"[",
"field",
"]",
"=",
"func",
"(",
"item",
".",
"get",
"(",
"field",
",",
"None",
")",
")",
"yield",
"it... | Apply given function to value of given key in every dictionary in sequence and
set the result as new value for that key. | [
"Apply",
"given",
"function",
"to",
"value",
"of",
"given",
"key",
"in",
"every",
"dictionary",
"in",
"sequence",
"and",
"set",
"the",
"result",
"as",
"new",
"value",
"for",
"that",
"key",
"."
] | 170caa1cd899de051ab961a3e46f7ecfbfed7764 | https://github.com/lebinh/ngxtop/blob/170caa1cd899de051ab961a3e46f7ecfbfed7764/ngxtop/ngxtop.py#L130-L140 |
233,912 | lebinh/ngxtop | ngxtop/ngxtop.py | add_field | def add_field(field, func, dict_sequence):
"""
Apply given function to the record and store result in given field of current record.
Do nothing if record already contains given field.
"""
for item in dict_sequence:
if field not in item:
item[field] = func(item)
yield item | python | def add_field(field, func, dict_sequence):
for item in dict_sequence:
if field not in item:
item[field] = func(item)
yield item | [
"def",
"add_field",
"(",
"field",
",",
"func",
",",
"dict_sequence",
")",
":",
"for",
"item",
"in",
"dict_sequence",
":",
"if",
"field",
"not",
"in",
"item",
":",
"item",
"[",
"field",
"]",
"=",
"func",
"(",
"item",
")",
"yield",
"item"
] | Apply given function to the record and store result in given field of current record.
Do nothing if record already contains given field. | [
"Apply",
"given",
"function",
"to",
"the",
"record",
"and",
"store",
"result",
"in",
"given",
"field",
"of",
"current",
"record",
".",
"Do",
"nothing",
"if",
"record",
"already",
"contains",
"given",
"field",
"."
] | 170caa1cd899de051ab961a3e46f7ecfbfed7764 | https://github.com/lebinh/ngxtop/blob/170caa1cd899de051ab961a3e46f7ecfbfed7764/ngxtop/ngxtop.py#L143-L151 |
233,913 | dedupeio/dedupe | dedupe/canonical.py | getCanonicalRep | def getCanonicalRep(record_cluster):
"""
Given a list of records within a duplicate cluster, constructs a
canonical representation of the cluster by finding canonical
values for each field
"""
canonical_rep = {}
keys = record_cluster[0].keys()
for key in keys:
key_values = []
for record in record_cluster:
# assume non-empty values always better than empty value
# for canonical record
if record[key]:
key_values.append(record[key])
if key_values:
canonical_rep[key] = getCentroid(key_values, comparator)
else:
canonical_rep[key] = ''
return canonical_rep | python | def getCanonicalRep(record_cluster):
canonical_rep = {}
keys = record_cluster[0].keys()
for key in keys:
key_values = []
for record in record_cluster:
# assume non-empty values always better than empty value
# for canonical record
if record[key]:
key_values.append(record[key])
if key_values:
canonical_rep[key] = getCentroid(key_values, comparator)
else:
canonical_rep[key] = ''
return canonical_rep | [
"def",
"getCanonicalRep",
"(",
"record_cluster",
")",
":",
"canonical_rep",
"=",
"{",
"}",
"keys",
"=",
"record_cluster",
"[",
"0",
"]",
".",
"keys",
"(",
")",
"for",
"key",
"in",
"keys",
":",
"key_values",
"=",
"[",
"]",
"for",
"record",
"in",
"record... | Given a list of records within a duplicate cluster, constructs a
canonical representation of the cluster by finding canonical
values for each field | [
"Given",
"a",
"list",
"of",
"records",
"within",
"a",
"duplicate",
"cluster",
"constructs",
"a",
"canonical",
"representation",
"of",
"the",
"cluster",
"by",
"finding",
"canonical",
"values",
"for",
"each",
"field"
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/canonical.py#L48-L71 |
233,914 | dedupeio/dedupe | dedupe/predicates.py | nearIntegersPredicate | def nearIntegersPredicate(field):
"""return any integers N, N+1, and N-1"""
ints = integers(field)
near_ints = set()
for char in ints:
num = int(char)
near_ints.add(str(num - 1))
near_ints.add(str(num))
near_ints.add(str(num + 1))
return near_ints | python | def nearIntegersPredicate(field):
ints = integers(field)
near_ints = set()
for char in ints:
num = int(char)
near_ints.add(str(num - 1))
near_ints.add(str(num))
near_ints.add(str(num + 1))
return near_ints | [
"def",
"nearIntegersPredicate",
"(",
"field",
")",
":",
"ints",
"=",
"integers",
"(",
"field",
")",
"near_ints",
"=",
"set",
"(",
")",
"for",
"char",
"in",
"ints",
":",
"num",
"=",
"int",
"(",
"char",
")",
"near_ints",
".",
"add",
"(",
"str",
"(",
... | return any integers N, N+1, and N-1 | [
"return",
"any",
"integers",
"N",
"N",
"+",
"1",
"and",
"N",
"-",
"1"
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/predicates.py#L320-L330 |
233,915 | dedupeio/dedupe | dedupe/core.py | randomPairsMatch | def randomPairsMatch(n_records_A, n_records_B, sample_size):
"""
Return random combinations of indices for record list A and B
"""
n = int(n_records_A * n_records_B)
if sample_size >= n:
random_pairs = numpy.arange(n)
else:
random_pairs = numpy.array(random.sample(range(n), sample_size),
dtype=int)
i, j = numpy.unravel_index(random_pairs, (n_records_A, n_records_B))
return zip(i, j) | python | def randomPairsMatch(n_records_A, n_records_B, sample_size):
n = int(n_records_A * n_records_B)
if sample_size >= n:
random_pairs = numpy.arange(n)
else:
random_pairs = numpy.array(random.sample(range(n), sample_size),
dtype=int)
i, j = numpy.unravel_index(random_pairs, (n_records_A, n_records_B))
return zip(i, j) | [
"def",
"randomPairsMatch",
"(",
"n_records_A",
",",
"n_records_B",
",",
"sample_size",
")",
":",
"n",
"=",
"int",
"(",
"n_records_A",
"*",
"n_records_B",
")",
"if",
"sample_size",
">=",
"n",
":",
"random_pairs",
"=",
"numpy",
".",
"arange",
"(",
"n",
")",
... | Return random combinations of indices for record list A and B | [
"Return",
"random",
"combinations",
"of",
"indices",
"for",
"record",
"list",
"A",
"and",
"B"
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/core.py#L65-L79 |
233,916 | dedupeio/dedupe | dedupe/api.py | Matching.thresholdBlocks | def thresholdBlocks(self, blocks, recall_weight=1.5): # pragma: nocover
"""
Returns the threshold that maximizes the expected F score, a
weighted average of precision and recall for a sample of
blocked data.
Arguments:
blocks -- Sequence of tuples of records, where each tuple is a
set of records covered by a blocking predicate
recall_weight -- Sets the tradeoff between precision and
recall. I.e. if you care twice as much about
recall as you do precision, set recall_weight
to 2.
"""
candidate_records = itertools.chain.from_iterable(self._blockedPairs(blocks))
probability = core.scoreDuplicates(candidate_records,
self.data_model,
self.classifier,
self.num_cores)['score']
probability = probability.copy()
probability.sort()
probability = probability[::-1]
expected_dupes = numpy.cumsum(probability)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / numpy.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = numpy.argmax(score)
logger.info('Maximum expected recall and precision')
logger.info('recall: %2.3f', recall[i])
logger.info('precision: %2.3f', precision[i])
logger.info('With threshold: %2.3f', probability[i])
return probability[i] | python | def thresholdBlocks(self, blocks, recall_weight=1.5): # pragma: nocover
candidate_records = itertools.chain.from_iterable(self._blockedPairs(blocks))
probability = core.scoreDuplicates(candidate_records,
self.data_model,
self.classifier,
self.num_cores)['score']
probability = probability.copy()
probability.sort()
probability = probability[::-1]
expected_dupes = numpy.cumsum(probability)
recall = expected_dupes / expected_dupes[-1]
precision = expected_dupes / numpy.arange(1, len(expected_dupes) + 1)
score = recall * precision / (recall + recall_weight ** 2 * precision)
i = numpy.argmax(score)
logger.info('Maximum expected recall and precision')
logger.info('recall: %2.3f', recall[i])
logger.info('precision: %2.3f', precision[i])
logger.info('With threshold: %2.3f', probability[i])
return probability[i] | [
"def",
"thresholdBlocks",
"(",
"self",
",",
"blocks",
",",
"recall_weight",
"=",
"1.5",
")",
":",
"# pragma: nocover",
"candidate_records",
"=",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"self",
".",
"_blockedPairs",
"(",
"blocks",
")",
")",
"proba... | Returns the threshold that maximizes the expected F score, a
weighted average of precision and recall for a sample of
blocked data.
Arguments:
blocks -- Sequence of tuples of records, where each tuple is a
set of records covered by a blocking predicate
recall_weight -- Sets the tradeoff between precision and
recall. I.e. if you care twice as much about
recall as you do precision, set recall_weight
to 2. | [
"Returns",
"the",
"threshold",
"that",
"maximizes",
"the",
"expected",
"F",
"score",
"a",
"weighted",
"average",
"of",
"precision",
"and",
"recall",
"for",
"a",
"sample",
"of",
"blocked",
"data",
"."
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/api.py#L52-L94 |
233,917 | dedupeio/dedupe | dedupe/api.py | DedupeMatching.match | def match(self, data, threshold=0.5, generator=False): # pragma: no cover
"""Identifies records that all refer to the same entity, returns
tuples
containing a set of record ids and a confidence score as a
float between 0 and 1. The record_ids within each set should
refer to the same entity and the confidence score is a measure
of our confidence that all the records in a cluster refer to
the same entity.
This method should only used for small to moderately sized
datasets for larger data, use matchBlocks
Arguments:
data -- Dictionary of records, where the keys are record_ids
and the values are dictionaries with the keys being
field names
threshold -- Number between 0 and 1 (default is .5). We will
consider records as potential duplicates if the
predicted probability of being a duplicate is
above the threshold.
Lowering the number will increase recall,
raising it will increase precision
"""
blocked_pairs = self._blockData(data)
clusters = self.matchBlocks(blocked_pairs, threshold)
if generator:
return clusters
else:
return list(clusters) | python | def match(self, data, threshold=0.5, generator=False): # pragma: no cover
blocked_pairs = self._blockData(data)
clusters = self.matchBlocks(blocked_pairs, threshold)
if generator:
return clusters
else:
return list(clusters) | [
"def",
"match",
"(",
"self",
",",
"data",
",",
"threshold",
"=",
"0.5",
",",
"generator",
"=",
"False",
")",
":",
"# pragma: no cover",
"blocked_pairs",
"=",
"self",
".",
"_blockData",
"(",
"data",
")",
"clusters",
"=",
"self",
".",
"matchBlocks",
"(",
"... | Identifies records that all refer to the same entity, returns
tuples
containing a set of record ids and a confidence score as a
float between 0 and 1. The record_ids within each set should
refer to the same entity and the confidence score is a measure
of our confidence that all the records in a cluster refer to
the same entity.
This method should only used for small to moderately sized
datasets for larger data, use matchBlocks
Arguments:
data -- Dictionary of records, where the keys are record_ids
and the values are dictionaries with the keys being
field names
threshold -- Number between 0 and 1 (default is .5). We will
consider records as potential duplicates if the
predicted probability of being a duplicate is
above the threshold.
Lowering the number will increase recall,
raising it will increase precision | [
"Identifies",
"records",
"that",
"all",
"refer",
"to",
"the",
"same",
"entity",
"returns",
"tuples"
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/api.py#L190-L223 |
233,918 | dedupeio/dedupe | dedupe/api.py | ActiveMatching.readTraining | def readTraining(self, training_file):
'''
Read training from previously built training data file object
Arguments:
training_file -- file object containing the training data
'''
logger.info('reading training from file')
training_pairs = json.load(training_file,
cls=serializer.dedupe_decoder)
self.markPairs(training_pairs) | python | def readTraining(self, training_file):
'''
Read training from previously built training data file object
Arguments:
training_file -- file object containing the training data
'''
logger.info('reading training from file')
training_pairs = json.load(training_file,
cls=serializer.dedupe_decoder)
self.markPairs(training_pairs) | [
"def",
"readTraining",
"(",
"self",
",",
"training_file",
")",
":",
"logger",
".",
"info",
"(",
"'reading training from file'",
")",
"training_pairs",
"=",
"json",
".",
"load",
"(",
"training_file",
",",
"cls",
"=",
"serializer",
".",
"dedupe_decoder",
")",
"s... | Read training from previously built training data file object
Arguments:
training_file -- file object containing the training data | [
"Read",
"training",
"from",
"previously",
"built",
"training",
"data",
"file",
"object"
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/api.py#L635-L647 |
233,919 | dedupeio/dedupe | dedupe/api.py | ActiveMatching.writeTraining | def writeTraining(self, file_obj): # pragma: no cover
"""
Write to a json file that contains labeled examples
Keyword arguments:
file_obj -- file object to write training data to
"""
json.dump(self.training_pairs,
file_obj,
default=serializer._to_json,
tuple_as_array=False,
ensure_ascii=True) | python | def writeTraining(self, file_obj): # pragma: no cover
json.dump(self.training_pairs,
file_obj,
default=serializer._to_json,
tuple_as_array=False,
ensure_ascii=True) | [
"def",
"writeTraining",
"(",
"self",
",",
"file_obj",
")",
":",
"# pragma: no cover",
"json",
".",
"dump",
"(",
"self",
".",
"training_pairs",
",",
"file_obj",
",",
"default",
"=",
"serializer",
".",
"_to_json",
",",
"tuple_as_array",
"=",
"False",
",",
"ens... | Write to a json file that contains labeled examples
Keyword arguments:
file_obj -- file object to write training data to | [
"Write",
"to",
"a",
"json",
"file",
"that",
"contains",
"labeled",
"examples"
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/api.py#L680-L692 |
233,920 | dedupeio/dedupe | dedupe/api.py | RecordLink.sample | def sample(self, data_1, data_2, sample_size=15000,
blocked_proportion=.5, original_length_1=None,
original_length_2=None):
'''
Draws a random sample of combinations of records from
the first and second datasets, and initializes active
learning with this sample
Arguments:
data_1 -- Dictionary of records from first dataset, where the
keys are record_ids and the values are dictionaries
with the keys being field names
data_2 -- Dictionary of records from second dataset, same
form as data_1
sample_size -- Size of the sample to draw
'''
self._checkData(data_1, data_2)
self.active_learner = self.ActiveLearner(self.data_model)
self.active_learner.sample_product(data_1, data_2,
blocked_proportion,
sample_size,
original_length_1,
original_length_2) | python | def sample(self, data_1, data_2, sample_size=15000,
blocked_proportion=.5, original_length_1=None,
original_length_2=None):
'''
Draws a random sample of combinations of records from
the first and second datasets, and initializes active
learning with this sample
Arguments:
data_1 -- Dictionary of records from first dataset, where the
keys are record_ids and the values are dictionaries
with the keys being field names
data_2 -- Dictionary of records from second dataset, same
form as data_1
sample_size -- Size of the sample to draw
'''
self._checkData(data_1, data_2)
self.active_learner = self.ActiveLearner(self.data_model)
self.active_learner.sample_product(data_1, data_2,
blocked_proportion,
sample_size,
original_length_1,
original_length_2) | [
"def",
"sample",
"(",
"self",
",",
"data_1",
",",
"data_2",
",",
"sample_size",
"=",
"15000",
",",
"blocked_proportion",
"=",
".5",
",",
"original_length_1",
"=",
"None",
",",
"original_length_2",
"=",
"None",
")",
":",
"self",
".",
"_checkData",
"(",
"dat... | Draws a random sample of combinations of records from
the first and second datasets, and initializes active
learning with this sample
Arguments:
data_1 -- Dictionary of records from first dataset, where the
keys are record_ids and the values are dictionaries
with the keys being field names
data_2 -- Dictionary of records from second dataset, same
form as data_1
sample_size -- Size of the sample to draw | [
"Draws",
"a",
"random",
"sample",
"of",
"combinations",
"of",
"records",
"from",
"the",
"first",
"and",
"second",
"datasets",
"and",
"initializes",
"active",
"learning",
"with",
"this",
"sample"
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/api.py#L816-L841 |
233,921 | dedupeio/dedupe | dedupe/clustering.py | condensedDistance | def condensedDistance(dupes):
'''
Convert the pairwise list of distances in dupes to "condensed
distance matrix" required by the hierarchical clustering
algorithms. Also return a dictionary that maps the distance matrix
to the record_ids.
The formula for an index of the condensed matrix is
index = {N choose 2}-{N-row choose 2} + (col-row-1)
= N*(N-1)/2 - (N-row)*(N-row-1)/2 + col - row - 1
^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^
matrix_length row_step
where (row,col) is index of an uncondensed square N X N distance matrix.
See http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.squareform.html
'''
candidate_set = numpy.unique(dupes['pairs'])
i_to_id = dict(enumerate(candidate_set))
ids = candidate_set.searchsorted(dupes['pairs'])
row = ids[:, 0]
col = ids[:, 1]
N = len(candidate_set)
matrix_length = N * (N - 1) / 2
row_step = (N - row) * (N - row - 1) / 2
index = matrix_length - row_step + col - row - 1
condensed_distances = numpy.ones(int(matrix_length), 'f4')
condensed_distances[index.astype(int)] = 1 - dupes['score']
return i_to_id, condensed_distances, N | python | def condensedDistance(dupes):
'''
Convert the pairwise list of distances in dupes to "condensed
distance matrix" required by the hierarchical clustering
algorithms. Also return a dictionary that maps the distance matrix
to the record_ids.
The formula for an index of the condensed matrix is
index = {N choose 2}-{N-row choose 2} + (col-row-1)
= N*(N-1)/2 - (N-row)*(N-row-1)/2 + col - row - 1
^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^
matrix_length row_step
where (row,col) is index of an uncondensed square N X N distance matrix.
See http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.squareform.html
'''
candidate_set = numpy.unique(dupes['pairs'])
i_to_id = dict(enumerate(candidate_set))
ids = candidate_set.searchsorted(dupes['pairs'])
row = ids[:, 0]
col = ids[:, 1]
N = len(candidate_set)
matrix_length = N * (N - 1) / 2
row_step = (N - row) * (N - row - 1) / 2
index = matrix_length - row_step + col - row - 1
condensed_distances = numpy.ones(int(matrix_length), 'f4')
condensed_distances[index.astype(int)] = 1 - dupes['score']
return i_to_id, condensed_distances, N | [
"def",
"condensedDistance",
"(",
"dupes",
")",
":",
"candidate_set",
"=",
"numpy",
".",
"unique",
"(",
"dupes",
"[",
"'pairs'",
"]",
")",
"i_to_id",
"=",
"dict",
"(",
"enumerate",
"(",
"candidate_set",
")",
")",
"ids",
"=",
"candidate_set",
".",
"searchsor... | Convert the pairwise list of distances in dupes to "condensed
distance matrix" required by the hierarchical clustering
algorithms. Also return a dictionary that maps the distance matrix
to the record_ids.
The formula for an index of the condensed matrix is
index = {N choose 2}-{N-row choose 2} + (col-row-1)
= N*(N-1)/2 - (N-row)*(N-row-1)/2 + col - row - 1
^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^
matrix_length row_step
where (row,col) is index of an uncondensed square N X N distance matrix.
See http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.squareform.html | [
"Convert",
"the",
"pairwise",
"list",
"of",
"distances",
"in",
"dupes",
"to",
"condensed",
"distance",
"matrix",
"required",
"by",
"the",
"hierarchical",
"clustering",
"algorithms",
".",
"Also",
"return",
"a",
"dictionary",
"that",
"maps",
"the",
"distance",
"ma... | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/clustering.py#L95-L131 |
233,922 | dedupeio/dedupe | dedupe/clustering.py | cluster | def cluster(dupes, threshold=.5, max_components=30000):
'''
Takes in a list of duplicate pairs and clusters them in to a
list records that all refer to the same entity based on a given
threshold
Keyword arguments:
threshold -- number betweent 0 and 1 (default is .5). lowering the
number will increase precision, raising it will increase
recall
'''
distance_threshold = 1 - threshold
dupe_sub_graphs = connected_components(dupes, max_components)
for sub_graph in dupe_sub_graphs:
if len(sub_graph) > 1:
i_to_id, condensed_distances, N = condensedDistance(sub_graph)
linkage = fastcluster.linkage(condensed_distances,
method='centroid',
preserve_input=True)
partition = hcluster.fcluster(linkage,
distance_threshold,
criterion='distance')
clusters = defaultdict(list)
for i, cluster_id in enumerate(partition):
clusters[cluster_id].append(i)
for cluster in viewvalues(clusters):
if len(cluster) > 1:
scores = confidences(cluster, condensed_distances, N)
yield tuple(i_to_id[i] for i in cluster), scores
else:
(ids, score), = sub_graph
if score > threshold:
yield tuple(ids), (score,) * 2 | python | def cluster(dupes, threshold=.5, max_components=30000):
'''
Takes in a list of duplicate pairs and clusters them in to a
list records that all refer to the same entity based on a given
threshold
Keyword arguments:
threshold -- number betweent 0 and 1 (default is .5). lowering the
number will increase precision, raising it will increase
recall
'''
distance_threshold = 1 - threshold
dupe_sub_graphs = connected_components(dupes, max_components)
for sub_graph in dupe_sub_graphs:
if len(sub_graph) > 1:
i_to_id, condensed_distances, N = condensedDistance(sub_graph)
linkage = fastcluster.linkage(condensed_distances,
method='centroid',
preserve_input=True)
partition = hcluster.fcluster(linkage,
distance_threshold,
criterion='distance')
clusters = defaultdict(list)
for i, cluster_id in enumerate(partition):
clusters[cluster_id].append(i)
for cluster in viewvalues(clusters):
if len(cluster) > 1:
scores = confidences(cluster, condensed_distances, N)
yield tuple(i_to_id[i] for i in cluster), scores
else:
(ids, score), = sub_graph
if score > threshold:
yield tuple(ids), (score,) * 2 | [
"def",
"cluster",
"(",
"dupes",
",",
"threshold",
"=",
".5",
",",
"max_components",
"=",
"30000",
")",
":",
"distance_threshold",
"=",
"1",
"-",
"threshold",
"dupe_sub_graphs",
"=",
"connected_components",
"(",
"dupes",
",",
"max_components",
")",
"for",
"sub_... | Takes in a list of duplicate pairs and clusters them in to a
list records that all refer to the same entity based on a given
threshold
Keyword arguments:
threshold -- number betweent 0 and 1 (default is .5). lowering the
number will increase precision, raising it will increase
recall | [
"Takes",
"in",
"a",
"list",
"of",
"duplicate",
"pairs",
"and",
"clusters",
"them",
"in",
"to",
"a",
"list",
"records",
"that",
"all",
"refer",
"to",
"the",
"same",
"entity",
"based",
"on",
"a",
"given",
"threshold"
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/clustering.py#L134-L174 |
233,923 | dedupeio/dedupe | dedupe/clustering.py | confidences | def confidences(cluster, condensed_distances, d):
'''
We calculate a per record score that is similar to a standard
deviation. The main reason is that these record scores can be
used to calculate the standard deviation of an entire cluster,
which is a reasonable metric for clusters.
'''
scores = dict.fromkeys(cluster, 0.0)
squared_distances = condensed_distances ** 2
for i, j in itertools.combinations(cluster, 2):
index = d * (d - 1) / 2 - (d - i) * (d - i - 1) / 2 + j - i - 1
squared_dist = squared_distances[int(index)]
scores[i] += squared_dist
scores[j] += squared_dist
scores = numpy.array([score for _, score in sorted(scores.items())])
scores /= len(cluster) - 1
scores = numpy.sqrt(scores)
scores = 1 - scores
return scores | python | def confidences(cluster, condensed_distances, d):
'''
We calculate a per record score that is similar to a standard
deviation. The main reason is that these record scores can be
used to calculate the standard deviation of an entire cluster,
which is a reasonable metric for clusters.
'''
scores = dict.fromkeys(cluster, 0.0)
squared_distances = condensed_distances ** 2
for i, j in itertools.combinations(cluster, 2):
index = d * (d - 1) / 2 - (d - i) * (d - i - 1) / 2 + j - i - 1
squared_dist = squared_distances[int(index)]
scores[i] += squared_dist
scores[j] += squared_dist
scores = numpy.array([score for _, score in sorted(scores.items())])
scores /= len(cluster) - 1
scores = numpy.sqrt(scores)
scores = 1 - scores
return scores | [
"def",
"confidences",
"(",
"cluster",
",",
"condensed_distances",
",",
"d",
")",
":",
"scores",
"=",
"dict",
".",
"fromkeys",
"(",
"cluster",
",",
"0.0",
")",
"squared_distances",
"=",
"condensed_distances",
"**",
"2",
"for",
"i",
",",
"j",
"in",
"itertool... | We calculate a per record score that is similar to a standard
deviation. The main reason is that these record scores can be
used to calculate the standard deviation of an entire cluster,
which is a reasonable metric for clusters. | [
"We",
"calculate",
"a",
"per",
"record",
"score",
"that",
"is",
"similar",
"to",
"a",
"standard",
"deviation",
".",
"The",
"main",
"reason",
"is",
"that",
"these",
"record",
"scores",
"can",
"be",
"used",
"to",
"calculate",
"the",
"standard",
"deviation",
... | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/clustering.py#L177-L196 |
233,924 | dedupeio/dedupe | dedupe/convenience.py | consoleLabel | def consoleLabel(deduper): # pragma: no cover
'''
Command line interface for presenting and labeling training pairs
by the user
Argument :
A deduper object
'''
finished = False
use_previous = False
fields = unique(field.field
for field
in deduper.data_model.primary_fields)
buffer_len = 1 # Max number of previous operations
examples_buffer = []
uncertain_pairs = []
while not finished:
if use_previous:
record_pair, _ = examples_buffer.pop(0)
use_previous = False
else:
if not uncertain_pairs:
uncertain_pairs = deduper.uncertainPairs()
try:
record_pair = uncertain_pairs.pop()
except IndexError:
break
n_match = (len(deduper.training_pairs['match']) +
sum(label == 'match' for _, label in examples_buffer))
n_distinct = (len(deduper.training_pairs['distinct']) +
sum(label == 'distinct' for _, label in examples_buffer))
for pair in record_pair:
for field in fields:
line = "%s : %s" % (field, pair[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
print("{0}/10 positive, {1}/10 negative".format(n_match, n_distinct),
file=sys.stderr)
print('Do these records refer to the same thing?', file=sys.stderr)
valid_response = False
user_input = ''
while not valid_response:
if examples_buffer:
prompt = '(y)es / (n)o / (u)nsure / (f)inished / (p)revious'
valid_responses = {'y', 'n', 'u', 'f', 'p'}
else:
prompt = '(y)es / (n)o / (u)nsure / (f)inished'
valid_responses = {'y', 'n', 'u', 'f'}
print(prompt, file=sys.stderr)
user_input = input()
if user_input in valid_responses:
valid_response = True
if user_input == 'y':
examples_buffer.insert(0, (record_pair, 'match'))
elif user_input == 'n':
examples_buffer.insert(0, (record_pair, 'distinct'))
elif user_input == 'u':
examples_buffer.insert(0, (record_pair, 'uncertain'))
elif user_input == 'f':
print('Finished labeling', file=sys.stderr)
finished = True
elif user_input == 'p':
use_previous = True
uncertain_pairs.append(record_pair)
if len(examples_buffer) > buffer_len:
record_pair, label = examples_buffer.pop()
if label in ['distinct', 'match']:
examples = {'distinct': [], 'match': []}
examples[label].append(record_pair)
deduper.markPairs(examples)
for record_pair, label in examples_buffer:
if label in ['distinct', 'match']:
examples = {'distinct': [], 'match': []}
examples[label].append(record_pair)
deduper.markPairs(examples) | python | def consoleLabel(deduper): # pragma: no cover
'''
Command line interface for presenting and labeling training pairs
by the user
Argument :
A deduper object
'''
finished = False
use_previous = False
fields = unique(field.field
for field
in deduper.data_model.primary_fields)
buffer_len = 1 # Max number of previous operations
examples_buffer = []
uncertain_pairs = []
while not finished:
if use_previous:
record_pair, _ = examples_buffer.pop(0)
use_previous = False
else:
if not uncertain_pairs:
uncertain_pairs = deduper.uncertainPairs()
try:
record_pair = uncertain_pairs.pop()
except IndexError:
break
n_match = (len(deduper.training_pairs['match']) +
sum(label == 'match' for _, label in examples_buffer))
n_distinct = (len(deduper.training_pairs['distinct']) +
sum(label == 'distinct' for _, label in examples_buffer))
for pair in record_pair:
for field in fields:
line = "%s : %s" % (field, pair[field])
print(line, file=sys.stderr)
print(file=sys.stderr)
print("{0}/10 positive, {1}/10 negative".format(n_match, n_distinct),
file=sys.stderr)
print('Do these records refer to the same thing?', file=sys.stderr)
valid_response = False
user_input = ''
while not valid_response:
if examples_buffer:
prompt = '(y)es / (n)o / (u)nsure / (f)inished / (p)revious'
valid_responses = {'y', 'n', 'u', 'f', 'p'}
else:
prompt = '(y)es / (n)o / (u)nsure / (f)inished'
valid_responses = {'y', 'n', 'u', 'f'}
print(prompt, file=sys.stderr)
user_input = input()
if user_input in valid_responses:
valid_response = True
if user_input == 'y':
examples_buffer.insert(0, (record_pair, 'match'))
elif user_input == 'n':
examples_buffer.insert(0, (record_pair, 'distinct'))
elif user_input == 'u':
examples_buffer.insert(0, (record_pair, 'uncertain'))
elif user_input == 'f':
print('Finished labeling', file=sys.stderr)
finished = True
elif user_input == 'p':
use_previous = True
uncertain_pairs.append(record_pair)
if len(examples_buffer) > buffer_len:
record_pair, label = examples_buffer.pop()
if label in ['distinct', 'match']:
examples = {'distinct': [], 'match': []}
examples[label].append(record_pair)
deduper.markPairs(examples)
for record_pair, label in examples_buffer:
if label in ['distinct', 'match']:
examples = {'distinct': [], 'match': []}
examples[label].append(record_pair)
deduper.markPairs(examples) | [
"def",
"consoleLabel",
"(",
"deduper",
")",
":",
"# pragma: no cover",
"finished",
"=",
"False",
"use_previous",
"=",
"False",
"fields",
"=",
"unique",
"(",
"field",
".",
"field",
"for",
"field",
"in",
"deduper",
".",
"data_model",
".",
"primary_fields",
")",
... | Command line interface for presenting and labeling training pairs
by the user
Argument :
A deduper object | [
"Command",
"line",
"interface",
"for",
"presenting",
"and",
"labeling",
"training",
"pairs",
"by",
"the",
"user"
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/convenience.py#L19-L105 |
233,925 | dedupeio/dedupe | dedupe/convenience.py | trainingDataLink | def trainingDataLink(data_1, data_2, common_key, training_size=50000): # pragma: nocover
'''
Construct training data for consumption by the ActiveLearning
markPairs method from already linked datasets.
Arguments :
data_1 -- Dictionary of records from first dataset, where the keys
are record_ids and the values are dictionaries with the
keys being field names
data_2 -- Dictionary of records from second dataset, same form as
data_1
common_key -- The name of the record field that uniquely identifies
a match
training_size -- the rough limit of the number of training examples,
defaults to 50000
Warning:
Every match must be identified by the sharing of a common key.
This function assumes that if two records do not share a common key
then they are distinct records.
'''
identified_records = collections.defaultdict(lambda: [[], []])
matched_pairs = set()
distinct_pairs = set()
for record_id, record in data_1.items():
identified_records[record[common_key]][0].append(record_id)
for record_id, record in data_2.items():
identified_records[record[common_key]][1].append(record_id)
for keys_1, keys_2 in identified_records.values():
if keys_1 and keys_2:
matched_pairs.update(itertools.product(keys_1, keys_2))
keys_1 = list(data_1.keys())
keys_2 = list(data_2.keys())
random_pairs = [(keys_1[i], keys_2[j])
for i, j
in randomPairsMatch(len(data_1), len(data_2),
training_size)]
distinct_pairs = (
pair for pair in random_pairs if pair not in matched_pairs)
matched_records = [(data_1[key_1], data_2[key_2])
for key_1, key_2 in matched_pairs]
distinct_records = [(data_1[key_1], data_2[key_2])
for key_1, key_2 in distinct_pairs]
training_pairs = {'match': matched_records,
'distinct': distinct_records}
return training_pairs | python | def trainingDataLink(data_1, data_2, common_key, training_size=50000): # pragma: nocover
'''
Construct training data for consumption by the ActiveLearning
markPairs method from already linked datasets.
Arguments :
data_1 -- Dictionary of records from first dataset, where the keys
are record_ids and the values are dictionaries with the
keys being field names
data_2 -- Dictionary of records from second dataset, same form as
data_1
common_key -- The name of the record field that uniquely identifies
a match
training_size -- the rough limit of the number of training examples,
defaults to 50000
Warning:
Every match must be identified by the sharing of a common key.
This function assumes that if two records do not share a common key
then they are distinct records.
'''
identified_records = collections.defaultdict(lambda: [[], []])
matched_pairs = set()
distinct_pairs = set()
for record_id, record in data_1.items():
identified_records[record[common_key]][0].append(record_id)
for record_id, record in data_2.items():
identified_records[record[common_key]][1].append(record_id)
for keys_1, keys_2 in identified_records.values():
if keys_1 and keys_2:
matched_pairs.update(itertools.product(keys_1, keys_2))
keys_1 = list(data_1.keys())
keys_2 = list(data_2.keys())
random_pairs = [(keys_1[i], keys_2[j])
for i, j
in randomPairsMatch(len(data_1), len(data_2),
training_size)]
distinct_pairs = (
pair for pair in random_pairs if pair not in matched_pairs)
matched_records = [(data_1[key_1], data_2[key_2])
for key_1, key_2 in matched_pairs]
distinct_records = [(data_1[key_1], data_2[key_2])
for key_1, key_2 in distinct_pairs]
training_pairs = {'match': matched_records,
'distinct': distinct_records}
return training_pairs | [
"def",
"trainingDataLink",
"(",
"data_1",
",",
"data_2",
",",
"common_key",
",",
"training_size",
"=",
"50000",
")",
":",
"# pragma: nocover",
"identified_records",
"=",
"collections",
".",
"defaultdict",
"(",
"lambda",
":",
"[",
"[",
"]",
",",
"[",
"]",
"]"... | Construct training data for consumption by the ActiveLearning
markPairs method from already linked datasets.
Arguments :
data_1 -- Dictionary of records from first dataset, where the keys
are record_ids and the values are dictionaries with the
keys being field names
data_2 -- Dictionary of records from second dataset, same form as
data_1
common_key -- The name of the record field that uniquely identifies
a match
training_size -- the rough limit of the number of training examples,
defaults to 50000
Warning:
Every match must be identified by the sharing of a common key.
This function assumes that if two records do not share a common key
then they are distinct records. | [
"Construct",
"training",
"data",
"for",
"consumption",
"by",
"the",
"ActiveLearning",
"markPairs",
"method",
"from",
"already",
"linked",
"datasets",
"."
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/convenience.py#L108-L167 |
233,926 | dedupeio/dedupe | dedupe/convenience.py | trainingDataDedupe | def trainingDataDedupe(data, common_key, training_size=50000): # pragma: nocover
'''
Construct training data for consumption by the ActiveLearning
markPairs method from an already deduplicated dataset.
Arguments :
data -- Dictionary of records, where the keys are record_ids and
the values are dictionaries with the keys being
field names
common_key -- The name of the record field that uniquely identifies
a match
training_size -- the rough limit of the number of training examples,
defaults to 50000
Warning:
Every match must be identified by the sharing of a common key.
This function assumes that if two records do not share a common key
then they are distinct records.
'''
identified_records = collections.defaultdict(list)
matched_pairs = set()
distinct_pairs = set()
unique_record_ids = set()
# a list of record_ids associated with each common_key
for record_id, record in data.items():
unique_record_ids.add(record_id)
identified_records[record[common_key]].append(record_id)
# all combinations of matched_pairs from each common_key group
for record_ids in identified_records.values():
if len(record_ids) > 1:
matched_pairs.update(itertools.combinations(sorted(record_ids), 2))
# calculate indices using dedupe.core.randomPairs to avoid
# the memory cost of enumerating all possible pairs
unique_record_ids = list(unique_record_ids)
pair_indices = randomPairs(len(unique_record_ids), training_size)
distinct_pairs = set()
for i, j in pair_indices:
distinct_pairs.add((unique_record_ids[i],
unique_record_ids[j]))
distinct_pairs -= matched_pairs
matched_records = [(data[key_1], data[key_2])
for key_1, key_2 in matched_pairs]
distinct_records = [(data[key_1], data[key_2])
for key_1, key_2 in distinct_pairs]
training_pairs = {'match': matched_records,
'distinct': distinct_records}
return training_pairs | python | def trainingDataDedupe(data, common_key, training_size=50000): # pragma: nocover
'''
Construct training data for consumption by the ActiveLearning
markPairs method from an already deduplicated dataset.
Arguments :
data -- Dictionary of records, where the keys are record_ids and
the values are dictionaries with the keys being
field names
common_key -- The name of the record field that uniquely identifies
a match
training_size -- the rough limit of the number of training examples,
defaults to 50000
Warning:
Every match must be identified by the sharing of a common key.
This function assumes that if two records do not share a common key
then they are distinct records.
'''
identified_records = collections.defaultdict(list)
matched_pairs = set()
distinct_pairs = set()
unique_record_ids = set()
# a list of record_ids associated with each common_key
for record_id, record in data.items():
unique_record_ids.add(record_id)
identified_records[record[common_key]].append(record_id)
# all combinations of matched_pairs from each common_key group
for record_ids in identified_records.values():
if len(record_ids) > 1:
matched_pairs.update(itertools.combinations(sorted(record_ids), 2))
# calculate indices using dedupe.core.randomPairs to avoid
# the memory cost of enumerating all possible pairs
unique_record_ids = list(unique_record_ids)
pair_indices = randomPairs(len(unique_record_ids), training_size)
distinct_pairs = set()
for i, j in pair_indices:
distinct_pairs.add((unique_record_ids[i],
unique_record_ids[j]))
distinct_pairs -= matched_pairs
matched_records = [(data[key_1], data[key_2])
for key_1, key_2 in matched_pairs]
distinct_records = [(data[key_1], data[key_2])
for key_1, key_2 in distinct_pairs]
training_pairs = {'match': matched_records,
'distinct': distinct_records}
return training_pairs | [
"def",
"trainingDataDedupe",
"(",
"data",
",",
"common_key",
",",
"training_size",
"=",
"50000",
")",
":",
"# pragma: nocover",
"identified_records",
"=",
"collections",
".",
"defaultdict",
"(",
"list",
")",
"matched_pairs",
"=",
"set",
"(",
")",
"distinct_pairs",... | Construct training data for consumption by the ActiveLearning
markPairs method from an already deduplicated dataset.
Arguments :
data -- Dictionary of records, where the keys are record_ids and
the values are dictionaries with the keys being
field names
common_key -- The name of the record field that uniquely identifies
a match
training_size -- the rough limit of the number of training examples,
defaults to 50000
Warning:
Every match must be identified by the sharing of a common key.
This function assumes that if two records do not share a common key
then they are distinct records. | [
"Construct",
"training",
"data",
"for",
"consumption",
"by",
"the",
"ActiveLearning",
"markPairs",
"method",
"from",
"an",
"already",
"deduplicated",
"dataset",
"."
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/convenience.py#L170-L228 |
233,927 | dedupeio/dedupe | dedupe/labeler.py | unique | def unique(seq):
"""Return the unique elements of a collection even if those elements are
unhashable and unsortable, like dicts and sets"""
cleaned = []
for each in seq:
if each not in cleaned:
cleaned.append(each)
return cleaned | python | def unique(seq):
cleaned = []
for each in seq:
if each not in cleaned:
cleaned.append(each)
return cleaned | [
"def",
"unique",
"(",
"seq",
")",
":",
"cleaned",
"=",
"[",
"]",
"for",
"each",
"in",
"seq",
":",
"if",
"each",
"not",
"in",
"cleaned",
":",
"cleaned",
".",
"append",
"(",
"each",
")",
"return",
"cleaned"
] | Return the unique elements of a collection even if those elements are
unhashable and unsortable, like dicts and sets | [
"Return",
"the",
"unique",
"elements",
"of",
"a",
"collection",
"even",
"if",
"those",
"elements",
"are",
"unhashable",
"and",
"unsortable",
"like",
"dicts",
"and",
"sets"
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/labeler.py#L383-L390 |
233,928 | dedupeio/dedupe | dedupe/training.py | BlockLearner.learn | def learn(self, matches, recall):
'''
Takes in a set of training pairs and predicates and tries to find
a good set of blocking rules.
'''
compound_length = 2
dupe_cover = Cover(self.blocker.predicates, matches)
dupe_cover.dominators(cost=self.total_cover)
dupe_cover.compound(compound_length)
comparison_count = self.comparisons(dupe_cover, compound_length)
dupe_cover.dominators(cost=comparison_count, comparison=True)
coverable_dupes = set.union(*viewvalues(dupe_cover))
uncoverable_dupes = [pair for i, pair in enumerate(matches)
if i not in coverable_dupes]
epsilon = int((1.0 - recall) * len(matches))
if len(uncoverable_dupes) > epsilon:
logger.warning(OUT_OF_PREDICATES_WARNING)
logger.debug(uncoverable_dupes)
epsilon = 0
else:
epsilon -= len(uncoverable_dupes)
for pred in dupe_cover:
pred.count = comparison_count[pred]
searcher = BranchBound(len(coverable_dupes) - epsilon, 2500)
final_predicates = searcher.search(dupe_cover)
logger.info('Final predicate set:')
for predicate in final_predicates:
logger.info(predicate)
return final_predicates | python | def learn(self, matches, recall):
'''
Takes in a set of training pairs and predicates and tries to find
a good set of blocking rules.
'''
compound_length = 2
dupe_cover = Cover(self.blocker.predicates, matches)
dupe_cover.dominators(cost=self.total_cover)
dupe_cover.compound(compound_length)
comparison_count = self.comparisons(dupe_cover, compound_length)
dupe_cover.dominators(cost=comparison_count, comparison=True)
coverable_dupes = set.union(*viewvalues(dupe_cover))
uncoverable_dupes = [pair for i, pair in enumerate(matches)
if i not in coverable_dupes]
epsilon = int((1.0 - recall) * len(matches))
if len(uncoverable_dupes) > epsilon:
logger.warning(OUT_OF_PREDICATES_WARNING)
logger.debug(uncoverable_dupes)
epsilon = 0
else:
epsilon -= len(uncoverable_dupes)
for pred in dupe_cover:
pred.count = comparison_count[pred]
searcher = BranchBound(len(coverable_dupes) - epsilon, 2500)
final_predicates = searcher.search(dupe_cover)
logger.info('Final predicate set:')
for predicate in final_predicates:
logger.info(predicate)
return final_predicates | [
"def",
"learn",
"(",
"self",
",",
"matches",
",",
"recall",
")",
":",
"compound_length",
"=",
"2",
"dupe_cover",
"=",
"Cover",
"(",
"self",
".",
"blocker",
".",
"predicates",
",",
"matches",
")",
"dupe_cover",
".",
"dominators",
"(",
"cost",
"=",
"self",... | Takes in a set of training pairs and predicates and tries to find
a good set of blocking rules. | [
"Takes",
"in",
"a",
"set",
"of",
"training",
"pairs",
"and",
"predicates",
"and",
"tries",
"to",
"find",
"a",
"good",
"set",
"of",
"blocking",
"rules",
"."
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/training.py#L24-L62 |
233,929 | dedupeio/dedupe | dedupe/blocking.py | Blocker.unindex | def unindex(self, data, field):
'''Remove index of a given set of data'''
indices = extractIndices(self.index_fields[field])
for doc in data:
if doc:
for _, index, preprocess in indices:
index.unindex(preprocess(doc))
for index_type, index, _ in indices:
index._index.initSearch()
for predicate in self.index_fields[field][index_type]:
logger.debug("Canopy: %s", str(predicate))
predicate.index = index | python | def unindex(self, data, field):
'''Remove index of a given set of data'''
indices = extractIndices(self.index_fields[field])
for doc in data:
if doc:
for _, index, preprocess in indices:
index.unindex(preprocess(doc))
for index_type, index, _ in indices:
index._index.initSearch()
for predicate in self.index_fields[field][index_type]:
logger.debug("Canopy: %s", str(predicate))
predicate.index = index | [
"def",
"unindex",
"(",
"self",
",",
"data",
",",
"field",
")",
":",
"indices",
"=",
"extractIndices",
"(",
"self",
".",
"index_fields",
"[",
"field",
"]",
")",
"for",
"doc",
"in",
"data",
":",
"if",
"doc",
":",
"for",
"_",
",",
"index",
",",
"prepr... | Remove index of a given set of data | [
"Remove",
"index",
"of",
"a",
"given",
"set",
"of",
"data"
] | 9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b | https://github.com/dedupeio/dedupe/blob/9f7c9f84473a4bcacf0f2b11152d8ed3eb35d48b/dedupe/blocking.py#L80-L95 |
233,930 | marcgibbons/django-rest-swagger | example_app/snippets/models.py | Snippet.save | def save(self, *args, **kwargs):
"""
Use the `pygments` library to create a highlighted HTML
representation of the code snippet.
"""
lexer = get_lexer_by_name(self.language)
linenos = self.linenos and 'table' or False
options = self.title and {'title': self.title} or {}
formatter = HtmlFormatter(style=self.style, linenos=linenos,
full=True, **options)
self.highlighted = highlight(self.code, lexer, formatter)
super(Snippet, self).save(*args, **kwargs)
# limit the number of instances retained
snippets = Snippet.objects.all()
if len(snippets) > 100:
snippets[0].delete() | python | def save(self, *args, **kwargs):
lexer = get_lexer_by_name(self.language)
linenos = self.linenos and 'table' or False
options = self.title and {'title': self.title} or {}
formatter = HtmlFormatter(style=self.style, linenos=linenos,
full=True, **options)
self.highlighted = highlight(self.code, lexer, formatter)
super(Snippet, self).save(*args, **kwargs)
# limit the number of instances retained
snippets = Snippet.objects.all()
if len(snippets) > 100:
snippets[0].delete() | [
"def",
"save",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"lexer",
"=",
"get_lexer_by_name",
"(",
"self",
".",
"language",
")",
"linenos",
"=",
"self",
".",
"linenos",
"and",
"'table'",
"or",
"False",
"options",
"=",
"self",
".... | Use the `pygments` library to create a highlighted HTML
representation of the code snippet. | [
"Use",
"the",
"pygments",
"library",
"to",
"create",
"a",
"highlighted",
"HTML",
"representation",
"of",
"the",
"code",
"snippet",
"."
] | 102d22eaefb7898342ba2fb5af5618b9e3a32f1d | https://github.com/marcgibbons/django-rest-swagger/blob/102d22eaefb7898342ba2fb5af5618b9e3a32f1d/example_app/snippets/models.py#L38-L54 |
233,931 | adafruit/Adafruit_Blinka | src/adafruit_blinka/microcontroller/generic_linux/i2c.py | I2C.scan | def scan(self):
"""Try to read a byte from each address, if you get an OSError it means the device isnt there"""
found = []
for addr in range(0,0x80):
try:
self._i2c_bus.read_byte(addr)
except OSError:
continue
found.append(addr)
return found | python | def scan(self):
found = []
for addr in range(0,0x80):
try:
self._i2c_bus.read_byte(addr)
except OSError:
continue
found.append(addr)
return found | [
"def",
"scan",
"(",
"self",
")",
":",
"found",
"=",
"[",
"]",
"for",
"addr",
"in",
"range",
"(",
"0",
",",
"0x80",
")",
":",
"try",
":",
"self",
".",
"_i2c_bus",
".",
"read_byte",
"(",
"addr",
")",
"except",
"OSError",
":",
"continue",
"found",
"... | Try to read a byte from each address, if you get an OSError it means the device isnt there | [
"Try",
"to",
"read",
"a",
"byte",
"from",
"each",
"address",
"if",
"you",
"get",
"an",
"OSError",
"it",
"means",
"the",
"device",
"isnt",
"there"
] | b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff | https://github.com/adafruit/Adafruit_Blinka/blob/b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff/src/adafruit_blinka/microcontroller/generic_linux/i2c.py#L24-L33 |
233,932 | adafruit/Adafruit_Blinka | src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py | final | def final():
"""In case the program is cancelled or quit, we need to clean up the PulseIn
helper process and also the message queue, this is called at exit to do so"""
if DEBUG:
print("Cleaning up message queues", queues)
print("Cleaning up processes", procs)
for q in queues:
q.remove()
for proc in procs:
proc.terminate() | python | def final():
if DEBUG:
print("Cleaning up message queues", queues)
print("Cleaning up processes", procs)
for q in queues:
q.remove()
for proc in procs:
proc.terminate() | [
"def",
"final",
"(",
")",
":",
"if",
"DEBUG",
":",
"print",
"(",
"\"Cleaning up message queues\"",
",",
"queues",
")",
"print",
"(",
"\"Cleaning up processes\"",
",",
"procs",
")",
"for",
"q",
"in",
"queues",
":",
"q",
".",
"remove",
"(",
")",
"for",
"pr... | In case the program is cancelled or quit, we need to clean up the PulseIn
helper process and also the message queue, this is called at exit to do so | [
"In",
"case",
"the",
"program",
"is",
"cancelled",
"or",
"quit",
"we",
"need",
"to",
"clean",
"up",
"the",
"PulseIn",
"helper",
"process",
"and",
"also",
"the",
"message",
"queue",
"this",
"is",
"called",
"at",
"exit",
"to",
"do",
"so"
] | b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff | https://github.com/adafruit/Adafruit_Blinka/blob/b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff/src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py#L16-L25 |
233,933 | adafruit/Adafruit_Blinka | src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py | PulseIn._wait_receive_msg | def _wait_receive_msg(self, timeout=0.25, type=2):
"""Internal helper that will wait for new messages of a given type,
and throw an exception on timeout"""
stamp = time.monotonic()
while (time.monotonic() - stamp) < timeout:
try:
message = self._mq.receive(block=False, type=2)
return message
except sysv_ipc.BusyError:
time.sleep(0.001) # wait a bit then retry!
# uh-oh timed out
raise RuntimeError("Timed out waiting for PulseIn message") | python | def _wait_receive_msg(self, timeout=0.25, type=2):
stamp = time.monotonic()
while (time.monotonic() - stamp) < timeout:
try:
message = self._mq.receive(block=False, type=2)
return message
except sysv_ipc.BusyError:
time.sleep(0.001) # wait a bit then retry!
# uh-oh timed out
raise RuntimeError("Timed out waiting for PulseIn message") | [
"def",
"_wait_receive_msg",
"(",
"self",
",",
"timeout",
"=",
"0.25",
",",
"type",
"=",
"2",
")",
":",
"stamp",
"=",
"time",
".",
"monotonic",
"(",
")",
"while",
"(",
"time",
".",
"monotonic",
"(",
")",
"-",
"stamp",
")",
"<",
"timeout",
":",
"try"... | Internal helper that will wait for new messages of a given type,
and throw an exception on timeout | [
"Internal",
"helper",
"that",
"will",
"wait",
"for",
"new",
"messages",
"of",
"a",
"given",
"type",
"and",
"throw",
"an",
"exception",
"on",
"timeout"
] | b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff | https://github.com/adafruit/Adafruit_Blinka/blob/b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff/src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py#L69-L80 |
233,934 | adafruit/Adafruit_Blinka | src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py | PulseIn.deinit | def deinit(self):
"""Deinitialises the PulseIn and releases any hardware and software
resources for reuse."""
# Clean up after ourselves
self._process.terminate()
procs.remove(self._process)
self._mq.remove()
queues.remove(self._mq) | python | def deinit(self):
# Clean up after ourselves
self._process.terminate()
procs.remove(self._process)
self._mq.remove()
queues.remove(self._mq) | [
"def",
"deinit",
"(",
"self",
")",
":",
"# Clean up after ourselves",
"self",
".",
"_process",
".",
"terminate",
"(",
")",
"procs",
".",
"remove",
"(",
"self",
".",
"_process",
")",
"self",
".",
"_mq",
".",
"remove",
"(",
")",
"queues",
".",
"remove",
... | Deinitialises the PulseIn and releases any hardware and software
resources for reuse. | [
"Deinitialises",
"the",
"PulseIn",
"and",
"releases",
"any",
"hardware",
"and",
"software",
"resources",
"for",
"reuse",
"."
] | b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff | https://github.com/adafruit/Adafruit_Blinka/blob/b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff/src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py#L82-L89 |
233,935 | adafruit/Adafruit_Blinka | src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py | PulseIn.resume | def resume(self, trigger_duration=0):
"""Resumes pulse capture after an optional trigger pulse."""
if trigger_duration != 0:
self._mq.send("t%d" % trigger_duration, True, type=1)
else:
self._mq.send("r", True, type=1)
self._paused = False | python | def resume(self, trigger_duration=0):
if trigger_duration != 0:
self._mq.send("t%d" % trigger_duration, True, type=1)
else:
self._mq.send("r", True, type=1)
self._paused = False | [
"def",
"resume",
"(",
"self",
",",
"trigger_duration",
"=",
"0",
")",
":",
"if",
"trigger_duration",
"!=",
"0",
":",
"self",
".",
"_mq",
".",
"send",
"(",
"\"t%d\"",
"%",
"trigger_duration",
",",
"True",
",",
"type",
"=",
"1",
")",
"else",
":",
"self... | Resumes pulse capture after an optional trigger pulse. | [
"Resumes",
"pulse",
"capture",
"after",
"an",
"optional",
"trigger",
"pulse",
"."
] | b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff | https://github.com/adafruit/Adafruit_Blinka/blob/b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff/src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py#L99-L105 |
233,936 | adafruit/Adafruit_Blinka | src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py | PulseIn.pause | def pause(self):
"""Pause pulse capture"""
self._mq.send("p", True, type=1)
self._paused = True | python | def pause(self):
self._mq.send("p", True, type=1)
self._paused = True | [
"def",
"pause",
"(",
"self",
")",
":",
"self",
".",
"_mq",
".",
"send",
"(",
"\"p\"",
",",
"True",
",",
"type",
"=",
"1",
")",
"self",
".",
"_paused",
"=",
"True"
] | Pause pulse capture | [
"Pause",
"pulse",
"capture"
] | b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff | https://github.com/adafruit/Adafruit_Blinka/blob/b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff/src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py#L107-L110 |
233,937 | adafruit/Adafruit_Blinka | src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py | PulseIn.popleft | def popleft(self):
"""Removes and returns the oldest read pulse."""
self._mq.send("^", True, type=1)
message = self._wait_receive_msg()
reply = int(message[0].decode('utf-8'))
#print(reply)
if reply == -1:
raise IndexError("pop from empty list")
return reply | python | def popleft(self):
self._mq.send("^", True, type=1)
message = self._wait_receive_msg()
reply = int(message[0].decode('utf-8'))
#print(reply)
if reply == -1:
raise IndexError("pop from empty list")
return reply | [
"def",
"popleft",
"(",
"self",
")",
":",
"self",
".",
"_mq",
".",
"send",
"(",
"\"^\"",
",",
"True",
",",
"type",
"=",
"1",
")",
"message",
"=",
"self",
".",
"_wait_receive_msg",
"(",
")",
"reply",
"=",
"int",
"(",
"message",
"[",
"0",
"]",
".",
... | Removes and returns the oldest read pulse. | [
"Removes",
"and",
"returns",
"the",
"oldest",
"read",
"pulse",
"."
] | b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff | https://github.com/adafruit/Adafruit_Blinka/blob/b4a2b3bf7d8cc88477027b827bd0a8e9b19588ff/src/adafruit_blinka/microcontroller/bcm283x/pulseio/PulseIn.py#L128-L136 |
233,938 | jupyter-widgets/ipyleaflet | ipyleaflet/leaflet.py | LayerGroup._validate_layers | def _validate_layers(self, proposal):
'''Validate layers list.
Makes sure only one instance of any given layer can exist in the
layers list.
'''
self._layer_ids = [l.model_id for l in proposal.value]
if len(set(self._layer_ids)) != len(self._layer_ids):
raise LayerException('duplicate layer detected, only use each layer once')
return proposal.value | python | def _validate_layers(self, proposal):
'''Validate layers list.
Makes sure only one instance of any given layer can exist in the
layers list.
'''
self._layer_ids = [l.model_id for l in proposal.value]
if len(set(self._layer_ids)) != len(self._layer_ids):
raise LayerException('duplicate layer detected, only use each layer once')
return proposal.value | [
"def",
"_validate_layers",
"(",
"self",
",",
"proposal",
")",
":",
"self",
".",
"_layer_ids",
"=",
"[",
"l",
".",
"model_id",
"for",
"l",
"in",
"proposal",
".",
"value",
"]",
"if",
"len",
"(",
"set",
"(",
"self",
".",
"_layer_ids",
")",
")",
"!=",
... | Validate layers list.
Makes sure only one instance of any given layer can exist in the
layers list. | [
"Validate",
"layers",
"list",
"."
] | 74488d4699a5663fc28aabf94ebf08d956a30598 | https://github.com/jupyter-widgets/ipyleaflet/blob/74488d4699a5663fc28aabf94ebf08d956a30598/ipyleaflet/leaflet.py#L431-L440 |
233,939 | jupyter-widgets/ipyleaflet | ipyleaflet/leaflet.py | GeoJSON.on_hover | def on_hover(self, callback, remove=False):
'''
The hover callback takes an unpacked set of keyword arguments.
'''
self._hover_callbacks.register_callback(callback, remove=remove) | python | def on_hover(self, callback, remove=False):
'''
The hover callback takes an unpacked set of keyword arguments.
'''
self._hover_callbacks.register_callback(callback, remove=remove) | [
"def",
"on_hover",
"(",
"self",
",",
"callback",
",",
"remove",
"=",
"False",
")",
":",
"self",
".",
"_hover_callbacks",
".",
"register_callback",
"(",
"callback",
",",
"remove",
"=",
"remove",
")"
] | The hover callback takes an unpacked set of keyword arguments. | [
"The",
"hover",
"callback",
"takes",
"an",
"unpacked",
"set",
"of",
"keyword",
"arguments",
"."
] | 74488d4699a5663fc28aabf94ebf08d956a30598 | https://github.com/jupyter-widgets/ipyleaflet/blob/74488d4699a5663fc28aabf94ebf08d956a30598/ipyleaflet/leaflet.py#L490-L494 |
233,940 | jupyter-widgets/ipyleaflet | ipyleaflet/leaflet.py | Map._validate_controls | def _validate_controls(self, proposal):
'''Validate controls list.
Makes sure only one instance of any given layer can exist in the
controls list.
'''
self._control_ids = [c.model_id for c in proposal.value]
if len(set(self._control_ids)) != len(self._control_ids):
raise ControlException('duplicate control detected, only use each control once')
return proposal.value | python | def _validate_controls(self, proposal):
'''Validate controls list.
Makes sure only one instance of any given layer can exist in the
controls list.
'''
self._control_ids = [c.model_id for c in proposal.value]
if len(set(self._control_ids)) != len(self._control_ids):
raise ControlException('duplicate control detected, only use each control once')
return proposal.value | [
"def",
"_validate_controls",
"(",
"self",
",",
"proposal",
")",
":",
"self",
".",
"_control_ids",
"=",
"[",
"c",
".",
"model_id",
"for",
"c",
"in",
"proposal",
".",
"value",
"]",
"if",
"len",
"(",
"set",
"(",
"self",
".",
"_control_ids",
")",
")",
"!... | Validate controls list.
Makes sure only one instance of any given layer can exist in the
controls list. | [
"Validate",
"controls",
"list",
"."
] | 74488d4699a5663fc28aabf94ebf08d956a30598 | https://github.com/jupyter-widgets/ipyleaflet/blob/74488d4699a5663fc28aabf94ebf08d956a30598/ipyleaflet/leaflet.py#L881-L890 |
233,941 | mbedmicro/pyOCD | pyocd/debug/context.py | DebugContext.read_core_register | def read_core_register(self, reg):
"""
read CPU register
Unpack floating point register values
"""
regIndex = register_name_to_index(reg)
regValue = self.read_core_register_raw(regIndex)
# Convert int to float.
if is_single_float_register(regIndex):
regValue = conversion.u32_to_float32(regValue)
elif is_double_float_register(regIndex):
regValue = conversion.u64_to_float64(regValue)
return regValue | python | def read_core_register(self, reg):
regIndex = register_name_to_index(reg)
regValue = self.read_core_register_raw(regIndex)
# Convert int to float.
if is_single_float_register(regIndex):
regValue = conversion.u32_to_float32(regValue)
elif is_double_float_register(regIndex):
regValue = conversion.u64_to_float64(regValue)
return regValue | [
"def",
"read_core_register",
"(",
"self",
",",
"reg",
")",
":",
"regIndex",
"=",
"register_name_to_index",
"(",
"reg",
")",
"regValue",
"=",
"self",
".",
"read_core_register_raw",
"(",
"regIndex",
")",
"# Convert int to float.",
"if",
"is_single_float_register",
"("... | read CPU register
Unpack floating point register values | [
"read",
"CPU",
"register",
"Unpack",
"floating",
"point",
"register",
"values"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/debug/context.py#L58-L70 |
233,942 | mbedmicro/pyOCD | pyocd/debug/context.py | DebugContext.write_core_register | def write_core_register(self, reg, data):
"""
write a CPU register.
Will need to pack floating point register values before writing.
"""
regIndex = register_name_to_index(reg)
# Convert float to int.
if is_single_float_register(regIndex) and type(data) is float:
data = conversion.float32_to_u32(data)
elif is_double_float_register(regIndex) and type(data) is float:
data = conversion.float64_to_u64(data)
self.write_core_register_raw(regIndex, data) | python | def write_core_register(self, reg, data):
regIndex = register_name_to_index(reg)
# Convert float to int.
if is_single_float_register(regIndex) and type(data) is float:
data = conversion.float32_to_u32(data)
elif is_double_float_register(regIndex) and type(data) is float:
data = conversion.float64_to_u64(data)
self.write_core_register_raw(regIndex, data) | [
"def",
"write_core_register",
"(",
"self",
",",
"reg",
",",
"data",
")",
":",
"regIndex",
"=",
"register_name_to_index",
"(",
"reg",
")",
"# Convert float to int.",
"if",
"is_single_float_register",
"(",
"regIndex",
")",
"and",
"type",
"(",
"data",
")",
"is",
... | write a CPU register.
Will need to pack floating point register values before writing. | [
"write",
"a",
"CPU",
"register",
".",
"Will",
"need",
"to",
"pack",
"floating",
"point",
"register",
"values",
"before",
"writing",
"."
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/debug/context.py#L84-L95 |
233,943 | mbedmicro/pyOCD | pyocd/probe/cmsis_dap_probe.py | CMSISDAPProbe.connect | def connect(self, protocol=None):
"""Initialize DAP IO pins for JTAG or SWD"""
# Convert protocol to port enum.
if protocol is not None:
port = self.PORT_MAP[protocol]
else:
port = DAPAccess.PORT.DEFAULT
try:
self._link.connect(port)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc)
# Read the current mode and save it.
actualMode = self._link.get_swj_mode()
self._protocol = self.PORT_MAP[actualMode]
self._invalidate_cached_registers() | python | def connect(self, protocol=None):
# Convert protocol to port enum.
if protocol is not None:
port = self.PORT_MAP[protocol]
else:
port = DAPAccess.PORT.DEFAULT
try:
self._link.connect(port)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc)
# Read the current mode and save it.
actualMode = self._link.get_swj_mode()
self._protocol = self.PORT_MAP[actualMode]
self._invalidate_cached_registers() | [
"def",
"connect",
"(",
"self",
",",
"protocol",
"=",
"None",
")",
":",
"# Convert protocol to port enum.",
"if",
"protocol",
"is",
"not",
"None",
":",
"port",
"=",
"self",
".",
"PORT_MAP",
"[",
"protocol",
"]",
"else",
":",
"port",
"=",
"DAPAccess",
".",
... | Initialize DAP IO pins for JTAG or SWD | [
"Initialize",
"DAP",
"IO",
"pins",
"for",
"JTAG",
"or",
"SWD"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/cmsis_dap_probe.py#L165-L182 |
233,944 | mbedmicro/pyOCD | pyocd/probe/cmsis_dap_probe.py | CMSISDAPProbe.set_clock | def set_clock(self, frequency):
"""Set the frequency for JTAG and SWD in Hz
This function is safe to call before connect is called.
"""
try:
self._link.set_clock(frequency)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) | python | def set_clock(self, frequency):
try:
self._link.set_clock(frequency)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) | [
"def",
"set_clock",
"(",
"self",
",",
"frequency",
")",
":",
"try",
":",
"self",
".",
"_link",
".",
"set_clock",
"(",
"frequency",
")",
"except",
"DAPAccess",
".",
"Error",
"as",
"exc",
":",
"six",
".",
"raise_from",
"(",
"self",
".",
"_convert_exception... | Set the frequency for JTAG and SWD in Hz
This function is safe to call before connect is called. | [
"Set",
"the",
"frequency",
"for",
"JTAG",
"and",
"SWD",
"in",
"Hz"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/cmsis_dap_probe.py#L201-L209 |
233,945 | mbedmicro/pyOCD | pyocd/probe/cmsis_dap_probe.py | CMSISDAPProbe.reset | def reset(self):
"""Reset the target"""
try:
self._invalidate_cached_registers()
self._link.reset()
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) | python | def reset(self):
try:
self._invalidate_cached_registers()
self._link.reset()
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) | [
"def",
"reset",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"_invalidate_cached_registers",
"(",
")",
"self",
".",
"_link",
".",
"reset",
"(",
")",
"except",
"DAPAccess",
".",
"Error",
"as",
"exc",
":",
"six",
".",
"raise_from",
"(",
"self",
".",
... | Reset the target | [
"Reset",
"the",
"target"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/cmsis_dap_probe.py#L211-L217 |
233,946 | mbedmicro/pyOCD | pyocd/probe/cmsis_dap_probe.py | CMSISDAPProbe.assert_reset | def assert_reset(self, asserted):
"""Assert or de-assert target reset line"""
try:
self._invalidate_cached_registers()
self._link.assert_reset(asserted)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) | python | def assert_reset(self, asserted):
try:
self._invalidate_cached_registers()
self._link.assert_reset(asserted)
except DAPAccess.Error as exc:
six.raise_from(self._convert_exception(exc), exc) | [
"def",
"assert_reset",
"(",
"self",
",",
"asserted",
")",
":",
"try",
":",
"self",
".",
"_invalidate_cached_registers",
"(",
")",
"self",
".",
"_link",
".",
"assert_reset",
"(",
"asserted",
")",
"except",
"DAPAccess",
".",
"Error",
"as",
"exc",
":",
"six",... | Assert or de-assert target reset line | [
"Assert",
"or",
"de",
"-",
"assert",
"target",
"reset",
"line"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/cmsis_dap_probe.py#L219-L225 |
233,947 | mbedmicro/pyOCD | pyocd/debug/svd/parser.py | _get_text | def _get_text(node, tag, default=None):
"""Get the text for the provided tag from the provided node"""
try:
return node.find(tag).text
except AttributeError:
return default | python | def _get_text(node, tag, default=None):
try:
return node.find(tag).text
except AttributeError:
return default | [
"def",
"_get_text",
"(",
"node",
",",
"tag",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"return",
"node",
".",
"find",
"(",
"tag",
")",
".",
"text",
"except",
"AttributeError",
":",
"return",
"default"
] | Get the text for the provided tag from the provided node | [
"Get",
"the",
"text",
"for",
"the",
"provided",
"tag",
"from",
"the",
"provided",
"node"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/debug/svd/parser.py#L33-L38 |
233,948 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.init | def init(self):
"""
Cortex M initialization. The bus must be accessible when this method is called.
"""
if not self.call_delegate('will_start_debug_core', core=self):
if self.halt_on_connect:
self.halt()
self._read_core_type()
self._check_for_fpu()
self.build_target_xml()
self.sw_bp.init()
self.call_delegate('did_start_debug_core', core=self) | python | def init(self):
if not self.call_delegate('will_start_debug_core', core=self):
if self.halt_on_connect:
self.halt()
self._read_core_type()
self._check_for_fpu()
self.build_target_xml()
self.sw_bp.init()
self.call_delegate('did_start_debug_core', core=self) | [
"def",
"init",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"call_delegate",
"(",
"'will_start_debug_core'",
",",
"core",
"=",
"self",
")",
":",
"if",
"self",
".",
"halt_on_connect",
":",
"self",
".",
"halt",
"(",
")",
"self",
".",
"_read_core_type",... | Cortex M initialization. The bus must be accessible when this method is called. | [
"Cortex",
"M",
"initialization",
".",
"The",
"bus",
"must",
"be",
"accessible",
"when",
"this",
"method",
"is",
"called",
"."
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L478-L490 |
233,949 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.write_memory | def write_memory(self, addr, value, transfer_size=32):
"""
write a memory location.
By default the transfer size is a word
"""
self.ap.write_memory(addr, value, transfer_size) | python | def write_memory(self, addr, value, transfer_size=32):
self.ap.write_memory(addr, value, transfer_size) | [
"def",
"write_memory",
"(",
"self",
",",
"addr",
",",
"value",
",",
"transfer_size",
"=",
"32",
")",
":",
"self",
".",
"ap",
".",
"write_memory",
"(",
"addr",
",",
"value",
",",
"transfer_size",
")"
] | write a memory location.
By default the transfer size is a word | [
"write",
"a",
"memory",
"location",
".",
"By",
"default",
"the",
"transfer",
"size",
"is",
"a",
"word"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L592-L597 |
233,950 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.read_memory | def read_memory(self, addr, transfer_size=32, now=True):
"""
read a memory location. By default, a word will
be read
"""
result = self.ap.read_memory(addr, transfer_size, now)
# Read callback returned for async reads.
def read_memory_cb():
return self.bp_manager.filter_memory(addr, transfer_size, result())
if now:
return self.bp_manager.filter_memory(addr, transfer_size, result)
else:
return read_memory_cb | python | def read_memory(self, addr, transfer_size=32, now=True):
result = self.ap.read_memory(addr, transfer_size, now)
# Read callback returned for async reads.
def read_memory_cb():
return self.bp_manager.filter_memory(addr, transfer_size, result())
if now:
return self.bp_manager.filter_memory(addr, transfer_size, result)
else:
return read_memory_cb | [
"def",
"read_memory",
"(",
"self",
",",
"addr",
",",
"transfer_size",
"=",
"32",
",",
"now",
"=",
"True",
")",
":",
"result",
"=",
"self",
".",
"ap",
".",
"read_memory",
"(",
"addr",
",",
"transfer_size",
",",
"now",
")",
"# Read callback returned for asyn... | read a memory location. By default, a word will
be read | [
"read",
"a",
"memory",
"location",
".",
"By",
"default",
"a",
"word",
"will",
"be",
"read"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L599-L613 |
233,951 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.read_memory_block8 | def read_memory_block8(self, addr, size):
"""
read a block of unaligned bytes in memory. Returns
an array of byte values
"""
data = self.ap.read_memory_block8(addr, size)
return self.bp_manager.filter_memory_unaligned_8(addr, size, data) | python | def read_memory_block8(self, addr, size):
data = self.ap.read_memory_block8(addr, size)
return self.bp_manager.filter_memory_unaligned_8(addr, size, data) | [
"def",
"read_memory_block8",
"(",
"self",
",",
"addr",
",",
"size",
")",
":",
"data",
"=",
"self",
".",
"ap",
".",
"read_memory_block8",
"(",
"addr",
",",
"size",
")",
"return",
"self",
".",
"bp_manager",
".",
"filter_memory_unaligned_8",
"(",
"addr",
",",... | read a block of unaligned bytes in memory. Returns
an array of byte values | [
"read",
"a",
"block",
"of",
"unaligned",
"bytes",
"in",
"memory",
".",
"Returns",
"an",
"array",
"of",
"byte",
"values"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L615-L621 |
233,952 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.read_memory_block32 | def read_memory_block32(self, addr, size):
"""
read a block of aligned words in memory. Returns
an array of word values
"""
data = self.ap.read_memory_block32(addr, size)
return self.bp_manager.filter_memory_aligned_32(addr, size, data) | python | def read_memory_block32(self, addr, size):
data = self.ap.read_memory_block32(addr, size)
return self.bp_manager.filter_memory_aligned_32(addr, size, data) | [
"def",
"read_memory_block32",
"(",
"self",
",",
"addr",
",",
"size",
")",
":",
"data",
"=",
"self",
".",
"ap",
".",
"read_memory_block32",
"(",
"addr",
",",
"size",
")",
"return",
"self",
".",
"bp_manager",
".",
"filter_memory_aligned_32",
"(",
"addr",
","... | read a block of aligned words in memory. Returns
an array of word values | [
"read",
"a",
"block",
"of",
"aligned",
"words",
"in",
"memory",
".",
"Returns",
"an",
"array",
"of",
"word",
"values"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L635-L641 |
233,953 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.halt | def halt(self):
"""
halt the core
"""
self.notify(Notification(event=Target.EVENT_PRE_HALT, source=self, data=Target.HALT_REASON_USER))
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_HALT)
self.flush()
self.notify(Notification(event=Target.EVENT_POST_HALT, source=self, data=Target.HALT_REASON_USER)) | python | def halt(self):
self.notify(Notification(event=Target.EVENT_PRE_HALT, source=self, data=Target.HALT_REASON_USER))
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_HALT)
self.flush()
self.notify(Notification(event=Target.EVENT_POST_HALT, source=self, data=Target.HALT_REASON_USER)) | [
"def",
"halt",
"(",
"self",
")",
":",
"self",
".",
"notify",
"(",
"Notification",
"(",
"event",
"=",
"Target",
".",
"EVENT_PRE_HALT",
",",
"source",
"=",
"self",
",",
"data",
"=",
"Target",
".",
"HALT_REASON_USER",
")",
")",
"self",
".",
"write_memory",
... | halt the core | [
"halt",
"the",
"core"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L643-L650 |
233,954 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.step | def step(self, disable_interrupts=True, start=0, end=0):
"""
perform an instruction level step. This function preserves the previous
interrupt mask state
"""
# Was 'if self.get_state() != TARGET_HALTED:'
# but now value of dhcsr is saved
dhcsr = self.read_memory(CortexM.DHCSR)
if not (dhcsr & (CortexM.C_STEP | CortexM.C_HALT)):
logging.error('cannot step: target not halted')
return
self.notify(Notification(event=Target.EVENT_PRE_RUN, source=self, data=Target.RUN_TYPE_STEP))
self.clear_debug_cause_bits()
# Save previous interrupt mask state
interrupts_masked = (CortexM.C_MASKINTS & dhcsr) != 0
# Mask interrupts - C_HALT must be set when changing to C_MASKINTS
if not interrupts_masked and disable_interrupts:
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_HALT | CortexM.C_MASKINTS)
# Single step using current C_MASKINTS setting
while True:
if disable_interrupts or interrupts_masked:
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_MASKINTS | CortexM.C_STEP)
else:
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_STEP)
# Wait for halt to auto set (This should be done before the first read)
while not self.read_memory(CortexM.DHCSR) & CortexM.C_HALT:
pass
# Range is empty, 'range step' will degenerate to 'step'
if start == end:
break
# Read program counter and compare to [start, end)
program_counter = self.read_core_register(CORE_REGISTER['pc'])
if program_counter < start or end <= program_counter:
break
# Check other stop reasons
if self.read_memory(CortexM.DFSR) & (CortexM.DFSR_DWTTRAP | CortexM.DFSR_BKPT):
break
# Restore interrupt mask state
if not interrupts_masked and disable_interrupts:
# Unmask interrupts - C_HALT must be set when changing to C_MASKINTS
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_HALT)
self.flush()
self._run_token += 1
self.notify(Notification(event=Target.EVENT_POST_RUN, source=self, data=Target.RUN_TYPE_STEP)) | python | def step(self, disable_interrupts=True, start=0, end=0):
# Was 'if self.get_state() != TARGET_HALTED:'
# but now value of dhcsr is saved
dhcsr = self.read_memory(CortexM.DHCSR)
if not (dhcsr & (CortexM.C_STEP | CortexM.C_HALT)):
logging.error('cannot step: target not halted')
return
self.notify(Notification(event=Target.EVENT_PRE_RUN, source=self, data=Target.RUN_TYPE_STEP))
self.clear_debug_cause_bits()
# Save previous interrupt mask state
interrupts_masked = (CortexM.C_MASKINTS & dhcsr) != 0
# Mask interrupts - C_HALT must be set when changing to C_MASKINTS
if not interrupts_masked and disable_interrupts:
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_HALT | CortexM.C_MASKINTS)
# Single step using current C_MASKINTS setting
while True:
if disable_interrupts or interrupts_masked:
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_MASKINTS | CortexM.C_STEP)
else:
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_STEP)
# Wait for halt to auto set (This should be done before the first read)
while not self.read_memory(CortexM.DHCSR) & CortexM.C_HALT:
pass
# Range is empty, 'range step' will degenerate to 'step'
if start == end:
break
# Read program counter and compare to [start, end)
program_counter = self.read_core_register(CORE_REGISTER['pc'])
if program_counter < start or end <= program_counter:
break
# Check other stop reasons
if self.read_memory(CortexM.DFSR) & (CortexM.DFSR_DWTTRAP | CortexM.DFSR_BKPT):
break
# Restore interrupt mask state
if not interrupts_masked and disable_interrupts:
# Unmask interrupts - C_HALT must be set when changing to C_MASKINTS
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN | CortexM.C_HALT)
self.flush()
self._run_token += 1
self.notify(Notification(event=Target.EVENT_POST_RUN, source=self, data=Target.RUN_TYPE_STEP)) | [
"def",
"step",
"(",
"self",
",",
"disable_interrupts",
"=",
"True",
",",
"start",
"=",
"0",
",",
"end",
"=",
"0",
")",
":",
"# Was 'if self.get_state() != TARGET_HALTED:'",
"# but now value of dhcsr is saved",
"dhcsr",
"=",
"self",
".",
"read_memory",
"(",
"Cortex... | perform an instruction level step. This function preserves the previous
interrupt mask state | [
"perform",
"an",
"instruction",
"level",
"step",
".",
"This",
"function",
"preserves",
"the",
"previous",
"interrupt",
"mask",
"state"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L652-L708 |
233,955 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.reset_and_halt | def reset_and_halt(self, reset_type=None):
"""
perform a reset and stop the core on the reset handler
"""
delegateResult = self.call_delegate('set_reset_catch', core=self, reset_type=reset_type)
# halt the target
if not delegateResult:
self.halt()
# Save CortexM.DEMCR
demcr = self.read_memory(CortexM.DEMCR)
# enable the vector catch
if not delegateResult:
self.write_memory(CortexM.DEMCR, demcr | CortexM.DEMCR_VC_CORERESET)
self.reset(reset_type)
# wait until the unit resets
with timeout.Timeout(2.0) as t_o:
while t_o.check():
if self.get_state() not in (Target.TARGET_RESET, Target.TARGET_RUNNING):
break
sleep(0.01)
# Make sure the thumb bit is set in XPSR in case the reset handler
# points to an invalid address.
xpsr = self.read_core_register('xpsr')
if xpsr & self.XPSR_THUMB == 0:
self.write_core_register('xpsr', xpsr | self.XPSR_THUMB)
self.call_delegate('clear_reset_catch', core=self, reset_type=reset_type)
# restore vector catch setting
self.write_memory(CortexM.DEMCR, demcr) | python | def reset_and_halt(self, reset_type=None):
delegateResult = self.call_delegate('set_reset_catch', core=self, reset_type=reset_type)
# halt the target
if not delegateResult:
self.halt()
# Save CortexM.DEMCR
demcr = self.read_memory(CortexM.DEMCR)
# enable the vector catch
if not delegateResult:
self.write_memory(CortexM.DEMCR, demcr | CortexM.DEMCR_VC_CORERESET)
self.reset(reset_type)
# wait until the unit resets
with timeout.Timeout(2.0) as t_o:
while t_o.check():
if self.get_state() not in (Target.TARGET_RESET, Target.TARGET_RUNNING):
break
sleep(0.01)
# Make sure the thumb bit is set in XPSR in case the reset handler
# points to an invalid address.
xpsr = self.read_core_register('xpsr')
if xpsr & self.XPSR_THUMB == 0:
self.write_core_register('xpsr', xpsr | self.XPSR_THUMB)
self.call_delegate('clear_reset_catch', core=self, reset_type=reset_type)
# restore vector catch setting
self.write_memory(CortexM.DEMCR, demcr) | [
"def",
"reset_and_halt",
"(",
"self",
",",
"reset_type",
"=",
"None",
")",
":",
"delegateResult",
"=",
"self",
".",
"call_delegate",
"(",
"'set_reset_catch'",
",",
"core",
"=",
"self",
",",
"reset_type",
"=",
"reset_type",
")",
"# halt the target",
"if",
"not"... | perform a reset and stop the core on the reset handler | [
"perform",
"a",
"reset",
"and",
"stop",
"the",
"core",
"on",
"the",
"reset",
"handler"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L889-L925 |
233,956 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.resume | def resume(self):
"""
resume the execution
"""
if self.get_state() != Target.TARGET_HALTED:
logging.debug('cannot resume: target not halted')
return
self.notify(Notification(event=Target.EVENT_PRE_RUN, source=self, data=Target.RUN_TYPE_RESUME))
self._run_token += 1
self.clear_debug_cause_bits()
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN)
self.flush()
self.notify(Notification(event=Target.EVENT_POST_RUN, source=self, data=Target.RUN_TYPE_RESUME)) | python | def resume(self):
if self.get_state() != Target.TARGET_HALTED:
logging.debug('cannot resume: target not halted')
return
self.notify(Notification(event=Target.EVENT_PRE_RUN, source=self, data=Target.RUN_TYPE_RESUME))
self._run_token += 1
self.clear_debug_cause_bits()
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN)
self.flush()
self.notify(Notification(event=Target.EVENT_POST_RUN, source=self, data=Target.RUN_TYPE_RESUME)) | [
"def",
"resume",
"(",
"self",
")",
":",
"if",
"self",
".",
"get_state",
"(",
")",
"!=",
"Target",
".",
"TARGET_HALTED",
":",
"logging",
".",
"debug",
"(",
"'cannot resume: target not halted'",
")",
"return",
"self",
".",
"notify",
"(",
"Notification",
"(",
... | resume the execution | [
"resume",
"the",
"execution"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L956-L968 |
233,957 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.read_core_registers_raw | def read_core_registers_raw(self, reg_list):
"""
Read one or more core registers
Read core registers in reg_list and return a list of values.
If any register in reg_list is a string, find the number
associated to this register in the lookup table CORE_REGISTER.
"""
# convert to index only
reg_list = [register_name_to_index(reg) for reg in reg_list]
# Sanity check register values
for reg in reg_list:
if reg not in CORE_REGISTER.values():
raise ValueError("unknown reg: %d" % reg)
elif is_fpu_register(reg) and (not self.has_fpu):
raise ValueError("attempt to read FPU register without FPU")
# Handle doubles.
doubles = [reg for reg in reg_list if is_double_float_register(reg)]
hasDoubles = len(doubles) > 0
if hasDoubles:
originalRegList = reg_list
# Strip doubles from reg_list.
reg_list = [reg for reg in reg_list if not is_double_float_register(reg)]
# Read float regs required to build doubles.
singleRegList = []
for reg in doubles:
singleRegList += (-reg, -reg + 1)
singleValues = self.read_core_registers_raw(singleRegList)
# Begin all reads and writes
dhcsr_cb_list = []
reg_cb_list = []
for reg in reg_list:
if is_cfbp_subregister(reg):
reg = CORE_REGISTER['cfbp']
elif is_psr_subregister(reg):
reg = CORE_REGISTER['xpsr']
# write id in DCRSR
self.write_memory(CortexM.DCRSR, reg)
# Technically, we need to poll S_REGRDY in DHCSR here before reading DCRDR. But
# we're running so slow compared to the target that it's not necessary.
# Read it and assert that S_REGRDY is set
dhcsr_cb = self.read_memory(CortexM.DHCSR, now=False)
reg_cb = self.read_memory(CortexM.DCRDR, now=False)
dhcsr_cb_list.append(dhcsr_cb)
reg_cb_list.append(reg_cb)
# Read all results
reg_vals = []
for reg, reg_cb, dhcsr_cb in zip(reg_list, reg_cb_list, dhcsr_cb_list):
dhcsr_val = dhcsr_cb()
assert dhcsr_val & CortexM.S_REGRDY
val = reg_cb()
# Special handling for registers that are combined into a single DCRSR number.
if is_cfbp_subregister(reg):
val = (val >> ((-reg - 1) * 8)) & 0xff
elif is_psr_subregister(reg):
val &= sysm_to_psr_mask(reg)
reg_vals.append(val)
# Merge double regs back into result list.
if hasDoubles:
results = []
for reg in originalRegList:
# Double
if is_double_float_register(reg):
doubleIndex = doubles.index(reg)
singleLow = singleValues[doubleIndex * 2]
singleHigh = singleValues[doubleIndex * 2 + 1]
double = (singleHigh << 32) | singleLow
results.append(double)
# Other register
else:
results.append(reg_vals[reg_list.index(reg)])
reg_vals = results
return reg_vals | python | def read_core_registers_raw(self, reg_list):
# convert to index only
reg_list = [register_name_to_index(reg) for reg in reg_list]
# Sanity check register values
for reg in reg_list:
if reg not in CORE_REGISTER.values():
raise ValueError("unknown reg: %d" % reg)
elif is_fpu_register(reg) and (not self.has_fpu):
raise ValueError("attempt to read FPU register without FPU")
# Handle doubles.
doubles = [reg for reg in reg_list if is_double_float_register(reg)]
hasDoubles = len(doubles) > 0
if hasDoubles:
originalRegList = reg_list
# Strip doubles from reg_list.
reg_list = [reg for reg in reg_list if not is_double_float_register(reg)]
# Read float regs required to build doubles.
singleRegList = []
for reg in doubles:
singleRegList += (-reg, -reg + 1)
singleValues = self.read_core_registers_raw(singleRegList)
# Begin all reads and writes
dhcsr_cb_list = []
reg_cb_list = []
for reg in reg_list:
if is_cfbp_subregister(reg):
reg = CORE_REGISTER['cfbp']
elif is_psr_subregister(reg):
reg = CORE_REGISTER['xpsr']
# write id in DCRSR
self.write_memory(CortexM.DCRSR, reg)
# Technically, we need to poll S_REGRDY in DHCSR here before reading DCRDR. But
# we're running so slow compared to the target that it's not necessary.
# Read it and assert that S_REGRDY is set
dhcsr_cb = self.read_memory(CortexM.DHCSR, now=False)
reg_cb = self.read_memory(CortexM.DCRDR, now=False)
dhcsr_cb_list.append(dhcsr_cb)
reg_cb_list.append(reg_cb)
# Read all results
reg_vals = []
for reg, reg_cb, dhcsr_cb in zip(reg_list, reg_cb_list, dhcsr_cb_list):
dhcsr_val = dhcsr_cb()
assert dhcsr_val & CortexM.S_REGRDY
val = reg_cb()
# Special handling for registers that are combined into a single DCRSR number.
if is_cfbp_subregister(reg):
val = (val >> ((-reg - 1) * 8)) & 0xff
elif is_psr_subregister(reg):
val &= sysm_to_psr_mask(reg)
reg_vals.append(val)
# Merge double regs back into result list.
if hasDoubles:
results = []
for reg in originalRegList:
# Double
if is_double_float_register(reg):
doubleIndex = doubles.index(reg)
singleLow = singleValues[doubleIndex * 2]
singleHigh = singleValues[doubleIndex * 2 + 1]
double = (singleHigh << 32) | singleLow
results.append(double)
# Other register
else:
results.append(reg_vals[reg_list.index(reg)])
reg_vals = results
return reg_vals | [
"def",
"read_core_registers_raw",
"(",
"self",
",",
"reg_list",
")",
":",
"# convert to index only",
"reg_list",
"=",
"[",
"register_name_to_index",
"(",
"reg",
")",
"for",
"reg",
"in",
"reg_list",
"]",
"# Sanity check register values",
"for",
"reg",
"in",
"reg_list... | Read one or more core registers
Read core registers in reg_list and return a list of values.
If any register in reg_list is a string, find the number
associated to this register in the lookup table CORE_REGISTER. | [
"Read",
"one",
"or",
"more",
"core",
"registers"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L996-L1081 |
233,958 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.write_core_registers_raw | def write_core_registers_raw(self, reg_list, data_list):
"""
Write one or more core registers
Write core registers in reg_list with the associated value in
data_list. If any register in reg_list is a string, find the number
associated to this register in the lookup table CORE_REGISTER.
"""
assert len(reg_list) == len(data_list)
# convert to index only
reg_list = [register_name_to_index(reg) for reg in reg_list]
# Sanity check register values
for reg in reg_list:
if reg not in CORE_REGISTER.values():
raise ValueError("unknown reg: %d" % reg)
elif is_fpu_register(reg) and (not self.has_fpu):
raise ValueError("attempt to write FPU register without FPU")
# Read special register if it is present in the list and
# convert doubles to single float register writes.
cfbpValue = None
xpsrValue = None
reg_data_list = []
for reg, data in zip(reg_list, data_list):
if is_double_float_register(reg):
# Replace double with two single float register writes. For instance,
# a write of D2 gets converted to writes to S4 and S5.
singleLow = data & 0xffffffff
singleHigh = (data >> 32) & 0xffffffff
reg_data_list += [(-reg, singleLow), (-reg + 1, singleHigh)]
elif is_cfbp_subregister(reg) and cfbpValue is None:
cfbpValue = self.read_core_register_raw(CORE_REGISTER['cfbp'])
elif is_psr_subregister(reg) and xpsrValue is None:
xpsrValue = self.read_core_register_raw(CORE_REGISTER['xpsr'])
else:
# Other register, just copy directly.
reg_data_list.append((reg, data))
# Write out registers
dhcsr_cb_list = []
for reg, data in reg_data_list:
if is_cfbp_subregister(reg):
# Mask in the new special register value so we don't modify the other register
# values that share the same DCRSR number.
shift = (-reg - 1) * 8
mask = 0xffffffff ^ (0xff << shift)
data = (cfbpValue & mask) | ((data & 0xff) << shift)
cfbpValue = data # update special register for other writes that might be in the list
reg = CORE_REGISTER['cfbp']
elif is_psr_subregister(reg):
mask = sysm_to_psr_mask(reg)
data = (xpsrValue & (0xffffffff ^ mask)) | (data & mask)
xpsrValue = data
reg = CORE_REGISTER['xpsr']
# write DCRDR
self.write_memory(CortexM.DCRDR, data)
# write id in DCRSR and flag to start write transfer
self.write_memory(CortexM.DCRSR, reg | CortexM.DCRSR_REGWnR)
# Technically, we need to poll S_REGRDY in DHCSR here to ensure the
# register write has completed.
# Read it and assert that S_REGRDY is set
dhcsr_cb = self.read_memory(CortexM.DHCSR, now=False)
dhcsr_cb_list.append(dhcsr_cb)
# Make sure S_REGRDY was set for all register
# writes
for dhcsr_cb in dhcsr_cb_list:
dhcsr_val = dhcsr_cb()
assert dhcsr_val & CortexM.S_REGRDY | python | def write_core_registers_raw(self, reg_list, data_list):
assert len(reg_list) == len(data_list)
# convert to index only
reg_list = [register_name_to_index(reg) for reg in reg_list]
# Sanity check register values
for reg in reg_list:
if reg not in CORE_REGISTER.values():
raise ValueError("unknown reg: %d" % reg)
elif is_fpu_register(reg) and (not self.has_fpu):
raise ValueError("attempt to write FPU register without FPU")
# Read special register if it is present in the list and
# convert doubles to single float register writes.
cfbpValue = None
xpsrValue = None
reg_data_list = []
for reg, data in zip(reg_list, data_list):
if is_double_float_register(reg):
# Replace double with two single float register writes. For instance,
# a write of D2 gets converted to writes to S4 and S5.
singleLow = data & 0xffffffff
singleHigh = (data >> 32) & 0xffffffff
reg_data_list += [(-reg, singleLow), (-reg + 1, singleHigh)]
elif is_cfbp_subregister(reg) and cfbpValue is None:
cfbpValue = self.read_core_register_raw(CORE_REGISTER['cfbp'])
elif is_psr_subregister(reg) and xpsrValue is None:
xpsrValue = self.read_core_register_raw(CORE_REGISTER['xpsr'])
else:
# Other register, just copy directly.
reg_data_list.append((reg, data))
# Write out registers
dhcsr_cb_list = []
for reg, data in reg_data_list:
if is_cfbp_subregister(reg):
# Mask in the new special register value so we don't modify the other register
# values that share the same DCRSR number.
shift = (-reg - 1) * 8
mask = 0xffffffff ^ (0xff << shift)
data = (cfbpValue & mask) | ((data & 0xff) << shift)
cfbpValue = data # update special register for other writes that might be in the list
reg = CORE_REGISTER['cfbp']
elif is_psr_subregister(reg):
mask = sysm_to_psr_mask(reg)
data = (xpsrValue & (0xffffffff ^ mask)) | (data & mask)
xpsrValue = data
reg = CORE_REGISTER['xpsr']
# write DCRDR
self.write_memory(CortexM.DCRDR, data)
# write id in DCRSR and flag to start write transfer
self.write_memory(CortexM.DCRSR, reg | CortexM.DCRSR_REGWnR)
# Technically, we need to poll S_REGRDY in DHCSR here to ensure the
# register write has completed.
# Read it and assert that S_REGRDY is set
dhcsr_cb = self.read_memory(CortexM.DHCSR, now=False)
dhcsr_cb_list.append(dhcsr_cb)
# Make sure S_REGRDY was set for all register
# writes
for dhcsr_cb in dhcsr_cb_list:
dhcsr_val = dhcsr_cb()
assert dhcsr_val & CortexM.S_REGRDY | [
"def",
"write_core_registers_raw",
"(",
"self",
",",
"reg_list",
",",
"data_list",
")",
":",
"assert",
"len",
"(",
"reg_list",
")",
"==",
"len",
"(",
"data_list",
")",
"# convert to index only",
"reg_list",
"=",
"[",
"register_name_to_index",
"(",
"reg",
")",
... | Write one or more core registers
Write core registers in reg_list with the associated value in
data_list. If any register in reg_list is a string, find the number
associated to this register in the lookup table CORE_REGISTER. | [
"Write",
"one",
"or",
"more",
"core",
"registers"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L1104-L1176 |
233,959 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.set_watchpoint | def set_watchpoint(self, addr, size, type):
"""
set a hardware watchpoint
"""
return self.dwt.set_watchpoint(addr, size, type) | python | def set_watchpoint(self, addr, size, type):
return self.dwt.set_watchpoint(addr, size, type) | [
"def",
"set_watchpoint",
"(",
"self",
",",
"addr",
",",
"size",
",",
"type",
")",
":",
"return",
"self",
".",
"dwt",
".",
"set_watchpoint",
"(",
"addr",
",",
"size",
",",
"type",
")"
] | set a hardware watchpoint | [
"set",
"a",
"hardware",
"watchpoint"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L1199-L1203 |
233,960 | mbedmicro/pyOCD | pyocd/coresight/cortex_m.py | CortexM.remove_watchpoint | def remove_watchpoint(self, addr, size, type):
"""
remove a hardware watchpoint
"""
return self.dwt.remove_watchpoint(addr, size, type) | python | def remove_watchpoint(self, addr, size, type):
return self.dwt.remove_watchpoint(addr, size, type) | [
"def",
"remove_watchpoint",
"(",
"self",
",",
"addr",
",",
"size",
",",
"type",
")",
":",
"return",
"self",
".",
"dwt",
".",
"remove_watchpoint",
"(",
"addr",
",",
"size",
",",
"type",
")"
] | remove a hardware watchpoint | [
"remove",
"a",
"hardware",
"watchpoint"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/cortex_m.py#L1205-L1209 |
233,961 | mbedmicro/pyOCD | pyocd/target/builtin/target_CC3220SF.py | Flash_cc3220sf.init | def init(self):
"""
Download the flash algorithm in RAM
"""
self.target.halt()
self.target.reset_and_halt()
# update core register to execute the init subroutine
result = self._call_function_and_wait(self.flash_algo['pc_init'], init=True)
# check the return code
if result != 0:
logging.error('init error: %i', result)
# erase the cookie which take up one page
self.erase_sector(0x01000000)
time.sleep(.5)
#do a hardware reset which will put the pc looping in rom
self.target.dp.reset()
time.sleep(1.3)
# reconnect to the board
self.target.dp.init()
self.target.dp.power_up_debug()
self.target.halt()
self.target.reset_and_halt()
# update core register to execute the init subroutine
result = self._call_function_and_wait(self.flash_algo['pc_init'], init=True)
# check the return code
if result != 0:
logging.error('init error: %i', result) | python | def init(self):
self.target.halt()
self.target.reset_and_halt()
# update core register to execute the init subroutine
result = self._call_function_and_wait(self.flash_algo['pc_init'], init=True)
# check the return code
if result != 0:
logging.error('init error: %i', result)
# erase the cookie which take up one page
self.erase_sector(0x01000000)
time.sleep(.5)
#do a hardware reset which will put the pc looping in rom
self.target.dp.reset()
time.sleep(1.3)
# reconnect to the board
self.target.dp.init()
self.target.dp.power_up_debug()
self.target.halt()
self.target.reset_and_halt()
# update core register to execute the init subroutine
result = self._call_function_and_wait(self.flash_algo['pc_init'], init=True)
# check the return code
if result != 0:
logging.error('init error: %i', result) | [
"def",
"init",
"(",
"self",
")",
":",
"self",
".",
"target",
".",
"halt",
"(",
")",
"self",
".",
"target",
".",
"reset_and_halt",
"(",
")",
"# update core register to execute the init subroutine",
"result",
"=",
"self",
".",
"_call_function_and_wait",
"(",
"self... | Download the flash algorithm in RAM | [
"Download",
"the",
"flash",
"algorithm",
"in",
"RAM"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/target/builtin/target_CC3220SF.py#L75-L111 |
233,962 | mbedmicro/pyOCD | pyocd/debug/svd/model.py | _check_type | def _check_type(value, expected_type):
"""Perform type checking on the provided value
This is a helper that will raise ``TypeError`` if the provided value is
not an instance of the provided type. This method should be used sparingly
but can be good for preventing problems earlier when you want to restrict
duck typing to make the types of fields more obvious.
If the value passed the type check it will be returned from the call.
"""
if not isinstance(value, expected_type):
raise TypeError("Value {value!r} has unexpected type {actual_type!r}, expected {expected_type!r}".format(
value=value,
expected_type=expected_type,
actual_type=type(value),
))
return value | python | def _check_type(value, expected_type):
if not isinstance(value, expected_type):
raise TypeError("Value {value!r} has unexpected type {actual_type!r}, expected {expected_type!r}".format(
value=value,
expected_type=expected_type,
actual_type=type(value),
))
return value | [
"def",
"_check_type",
"(",
"value",
",",
"expected_type",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"expected_type",
")",
":",
"raise",
"TypeError",
"(",
"\"Value {value!r} has unexpected type {actual_type!r}, expected {expected_type!r}\"",
".",
"format",
... | Perform type checking on the provided value
This is a helper that will raise ``TypeError`` if the provided value is
not an instance of the provided type. This method should be used sparingly
but can be good for preventing problems earlier when you want to restrict
duck typing to make the types of fields more obvious.
If the value passed the type check it will be returned from the call. | [
"Perform",
"type",
"checking",
"on",
"the",
"provided",
"value"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/debug/svd/model.py#L26-L42 |
233,963 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/windows.py | _get_cached_mounted_points | def _get_cached_mounted_points():
"""! Get the volumes present on the system
@return List of mount points and their associated target id
Ex. [{ 'mount_point': 'D:', 'target_id_usb_id': 'xxxx'}, ...]
"""
result = []
try:
# Open the registry key for mounted devices
mounted_devices_key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE, "SYSTEM\\MountedDevices"
)
for v in _iter_vals(mounted_devices_key):
# Valid entries have the following format: \DosDevices\D:
if "DosDevices" not in v[0]:
continue
volume_string = v[1].decode("utf-16le", "ignore")
if not _is_mbed_volume(volume_string):
continue
mount_point_match = re.match(".*\\\\(.:)$", v[0])
if not mount_point_match:
logger.debug("Invalid disk pattern for entry %s, skipping", v[0])
continue
mount_point = mount_point_match.group(1)
result.append({"mount_point": mount_point, "volume_string": volume_string})
except OSError:
logger.error('Failed to open "MountedDevices" in registry')
return result | python | def _get_cached_mounted_points():
result = []
try:
# Open the registry key for mounted devices
mounted_devices_key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE, "SYSTEM\\MountedDevices"
)
for v in _iter_vals(mounted_devices_key):
# Valid entries have the following format: \DosDevices\D:
if "DosDevices" not in v[0]:
continue
volume_string = v[1].decode("utf-16le", "ignore")
if not _is_mbed_volume(volume_string):
continue
mount_point_match = re.match(".*\\\\(.:)$", v[0])
if not mount_point_match:
logger.debug("Invalid disk pattern for entry %s, skipping", v[0])
continue
mount_point = mount_point_match.group(1)
result.append({"mount_point": mount_point, "volume_string": volume_string})
except OSError:
logger.error('Failed to open "MountedDevices" in registry')
return result | [
"def",
"_get_cached_mounted_points",
"(",
")",
":",
"result",
"=",
"[",
"]",
"try",
":",
"# Open the registry key for mounted devices",
"mounted_devices_key",
"=",
"winreg",
".",
"OpenKey",
"(",
"winreg",
".",
"HKEY_LOCAL_MACHINE",
",",
"\"SYSTEM\\\\MountedDevices\"",
"... | ! Get the volumes present on the system
@return List of mount points and their associated target id
Ex. [{ 'mount_point': 'D:', 'target_id_usb_id': 'xxxx'}, ...] | [
"!",
"Get",
"the",
"volumes",
"present",
"on",
"the",
"system"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/windows.py#L64-L96 |
233,964 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/windows.py | _vid_pid_path_to_usb_info | def _vid_pid_path_to_usb_info(vid_pid_path):
"""! Provide the vendor ID and product ID of a device based on its entry in the registry
@return Returns {'vendor_id': '<vendor ID>', 'product': '<product ID>'}
@details If the vendor ID or product ID can't be determined, they will be returned
as None.
"""
result = {"vendor_id": None, "product_id": None}
for component in vid_pid_path.split("&"):
component_part = component.lower().split("_")
if len(component_part) != 2:
logger.debug("Unexpected VID/PID string structure %s", component)
break
if component_part[0] == "vid":
result["vendor_id"] = component_part[1]
elif component_part[0] == "pid":
result["product_id"] = component_part[1]
return result | python | def _vid_pid_path_to_usb_info(vid_pid_path):
result = {"vendor_id": None, "product_id": None}
for component in vid_pid_path.split("&"):
component_part = component.lower().split("_")
if len(component_part) != 2:
logger.debug("Unexpected VID/PID string structure %s", component)
break
if component_part[0] == "vid":
result["vendor_id"] = component_part[1]
elif component_part[0] == "pid":
result["product_id"] = component_part[1]
return result | [
"def",
"_vid_pid_path_to_usb_info",
"(",
"vid_pid_path",
")",
":",
"result",
"=",
"{",
"\"vendor_id\"",
":",
"None",
",",
"\"product_id\"",
":",
"None",
"}",
"for",
"component",
"in",
"vid_pid_path",
".",
"split",
"(",
"\"&\"",
")",
":",
"component_part",
"=",... | ! Provide the vendor ID and product ID of a device based on its entry in the registry
@return Returns {'vendor_id': '<vendor ID>', 'product': '<product ID>'}
@details If the vendor ID or product ID can't be determined, they will be returned
as None. | [
"!",
"Provide",
"the",
"vendor",
"ID",
"and",
"product",
"ID",
"of",
"a",
"device",
"based",
"on",
"its",
"entry",
"in",
"the",
"registry"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/windows.py#L189-L209 |
233,965 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/windows.py | _iter_keys_as_str | def _iter_keys_as_str(key):
"""! Iterate over subkeys of a key returning subkey as string
"""
for i in range(winreg.QueryInfoKey(key)[0]):
yield winreg.EnumKey(key, i) | python | def _iter_keys_as_str(key):
for i in range(winreg.QueryInfoKey(key)[0]):
yield winreg.EnumKey(key, i) | [
"def",
"_iter_keys_as_str",
"(",
"key",
")",
":",
"for",
"i",
"in",
"range",
"(",
"winreg",
".",
"QueryInfoKey",
"(",
"key",
")",
"[",
"0",
"]",
")",
":",
"yield",
"winreg",
".",
"EnumKey",
"(",
"key",
",",
"i",
")"
] | ! Iterate over subkeys of a key returning subkey as string | [
"!",
"Iterate",
"over",
"subkeys",
"of",
"a",
"key",
"returning",
"subkey",
"as",
"string"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/windows.py#L212-L216 |
233,966 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/windows.py | _iter_keys | def _iter_keys(key):
"""! Iterate over subkeys of a key
"""
for i in range(winreg.QueryInfoKey(key)[0]):
yield winreg.OpenKey(key, winreg.EnumKey(key, i)) | python | def _iter_keys(key):
for i in range(winreg.QueryInfoKey(key)[0]):
yield winreg.OpenKey(key, winreg.EnumKey(key, i)) | [
"def",
"_iter_keys",
"(",
"key",
")",
":",
"for",
"i",
"in",
"range",
"(",
"winreg",
".",
"QueryInfoKey",
"(",
"key",
")",
"[",
"0",
"]",
")",
":",
"yield",
"winreg",
".",
"OpenKey",
"(",
"key",
",",
"winreg",
".",
"EnumKey",
"(",
"key",
",",
"i"... | ! Iterate over subkeys of a key | [
"!",
"Iterate",
"over",
"subkeys",
"of",
"a",
"key"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/windows.py#L219-L223 |
233,967 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/windows.py | _iter_vals | def _iter_vals(key):
"""! Iterate over values of a key
"""
for i in range(winreg.QueryInfoKey(key)[1]):
yield winreg.EnumValue(key, i) | python | def _iter_vals(key):
for i in range(winreg.QueryInfoKey(key)[1]):
yield winreg.EnumValue(key, i) | [
"def",
"_iter_vals",
"(",
"key",
")",
":",
"for",
"i",
"in",
"range",
"(",
"winreg",
".",
"QueryInfoKey",
"(",
"key",
")",
"[",
"1",
"]",
")",
":",
"yield",
"winreg",
".",
"EnumValue",
"(",
"key",
",",
"i",
")"
] | ! Iterate over values of a key | [
"!",
"Iterate",
"over",
"values",
"of",
"a",
"key"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/windows.py#L226-L230 |
233,968 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/windows.py | MbedLsToolsWindows.mount_point_ready | def mount_point_ready(self, path):
"""! Check if a mount point is ready for file operations
@return Returns True if the given path exists, False otherwise
@details Calling the Windows command `dir` instead of using the python
`os.path.exists`. The latter causes a Python error box to appear claiming
there is "No Disk" for some devices that are in the ejected state. Calling
`dir` prevents this since it uses the Windows API to determine if the
device is ready before accessing the file system.
"""
stdout, stderr, retcode = self._run_cli_process("dir %s" % path)
result = True if retcode == 0 else False
return result | python | def mount_point_ready(self, path):
stdout, stderr, retcode = self._run_cli_process("dir %s" % path)
result = True if retcode == 0 else False
return result | [
"def",
"mount_point_ready",
"(",
"self",
",",
"path",
")",
":",
"stdout",
",",
"stderr",
",",
"retcode",
"=",
"self",
".",
"_run_cli_process",
"(",
"\"dir %s\"",
"%",
"path",
")",
"result",
"=",
"True",
"if",
"retcode",
"==",
"0",
"else",
"False",
"retur... | ! Check if a mount point is ready for file operations
@return Returns True if the given path exists, False otherwise
@details Calling the Windows command `dir` instead of using the python
`os.path.exists`. The latter causes a Python error box to appear claiming
there is "No Disk" for some devices that are in the ejected state. Calling
`dir` prevents this since it uses the Windows API to determine if the
device is ready before accessing the file system. | [
"!",
"Check",
"if",
"a",
"mount",
"point",
"is",
"ready",
"for",
"file",
"operations"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/windows.py#L478-L490 |
233,969 | mbedmicro/pyOCD | pyocd/target/pack/flash_algo.py | PackFlashAlgo._create_algo_bin | def _create_algo_bin(self, ro_rw_zi):
"""Create a binary blob of the flash algo which can execute from ram"""
sect_ro, sect_rw, sect_zi = ro_rw_zi
algo_size = sect_ro.length + sect_rw.length + sect_zi.length
algo_data = bytearray(algo_size)
for section in (sect_ro, sect_rw):
start = section.start
size = section.length
data = section.data
assert len(data) == size
algo_data[start:start + size] = data
return algo_data | python | def _create_algo_bin(self, ro_rw_zi):
sect_ro, sect_rw, sect_zi = ro_rw_zi
algo_size = sect_ro.length + sect_rw.length + sect_zi.length
algo_data = bytearray(algo_size)
for section in (sect_ro, sect_rw):
start = section.start
size = section.length
data = section.data
assert len(data) == size
algo_data[start:start + size] = data
return algo_data | [
"def",
"_create_algo_bin",
"(",
"self",
",",
"ro_rw_zi",
")",
":",
"sect_ro",
",",
"sect_rw",
",",
"sect_zi",
"=",
"ro_rw_zi",
"algo_size",
"=",
"sect_ro",
".",
"length",
"+",
"sect_rw",
".",
"length",
"+",
"sect_zi",
".",
"length",
"algo_data",
"=",
"byte... | Create a binary blob of the flash algo which can execute from ram | [
"Create",
"a",
"binary",
"blob",
"of",
"the",
"flash",
"algo",
"which",
"can",
"execute",
"from",
"ram"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/target/pack/flash_algo.py#L232-L243 |
233,970 | mbedmicro/pyOCD | pyocd/target/pack/flash_algo.py | PackFlashInfo._sector_and_sz_itr | def _sector_and_sz_itr(self, elf, data_start):
"""Iterator which returns starting address and sector size"""
for entry_start in itertools.count(data_start, self.FLASH_SECTORS_STRUCT_SIZE):
data = elf.read(entry_start, self.FLASH_SECTORS_STRUCT_SIZE)
size, start = struct.unpack(self.FLASH_SECTORS_STRUCT, data)
start_and_size = start, size
if start_and_size == (self.SECTOR_END, self.SECTOR_END):
return
yield start_and_size | python | def _sector_and_sz_itr(self, elf, data_start):
for entry_start in itertools.count(data_start, self.FLASH_SECTORS_STRUCT_SIZE):
data = elf.read(entry_start, self.FLASH_SECTORS_STRUCT_SIZE)
size, start = struct.unpack(self.FLASH_SECTORS_STRUCT, data)
start_and_size = start, size
if start_and_size == (self.SECTOR_END, self.SECTOR_END):
return
yield start_and_size | [
"def",
"_sector_and_sz_itr",
"(",
"self",
",",
"elf",
",",
"data_start",
")",
":",
"for",
"entry_start",
"in",
"itertools",
".",
"count",
"(",
"data_start",
",",
"self",
".",
"FLASH_SECTORS_STRUCT_SIZE",
")",
":",
"data",
"=",
"elf",
".",
"read",
"(",
"ent... | Iterator which returns starting address and sector size | [
"Iterator",
"which",
"returns",
"starting",
"address",
"and",
"sector",
"size"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/target/pack/flash_algo.py#L292-L300 |
233,971 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/darwin.py | _prune | def _prune(current, keys):
""" Reduce the amount of data we have to sift through to only
include the specified keys, and children that contain the
specified keys
"""
pruned_current = {k: current[k] for k in keys if k in current}
pruned_children = list(
filter(
None, [_prune(c, keys) for c in current.get("IORegistryEntryChildren", [])]
)
)
keep_current = any(k in current for k in keys) or pruned_children
if keep_current:
if pruned_children:
pruned_current["IORegistryEntryChildren"] = pruned_children
return pruned_current
else:
return {} | python | def _prune(current, keys):
pruned_current = {k: current[k] for k in keys if k in current}
pruned_children = list(
filter(
None, [_prune(c, keys) for c in current.get("IORegistryEntryChildren", [])]
)
)
keep_current = any(k in current for k in keys) or pruned_children
if keep_current:
if pruned_children:
pruned_current["IORegistryEntryChildren"] = pruned_children
return pruned_current
else:
return {} | [
"def",
"_prune",
"(",
"current",
",",
"keys",
")",
":",
"pruned_current",
"=",
"{",
"k",
":",
"current",
"[",
"k",
"]",
"for",
"k",
"in",
"keys",
"if",
"k",
"in",
"current",
"}",
"pruned_children",
"=",
"list",
"(",
"filter",
"(",
"None",
",",
"[",... | Reduce the amount of data we have to sift through to only
include the specified keys, and children that contain the
specified keys | [
"Reduce",
"the",
"amount",
"of",
"data",
"we",
"have",
"to",
"sift",
"through",
"to",
"only",
"include",
"the",
"specified",
"keys",
"and",
"children",
"that",
"contain",
"the",
"specified",
"keys"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/darwin.py#L46-L63 |
233,972 | mbedmicro/pyOCD | pyocd/gdbserver/context_facade.py | GDBDebugContextFacade.get_register_context | def get_register_context(self):
"""
return hexadecimal dump of registers as expected by GDB
"""
logging.debug("GDB getting register context")
resp = b''
reg_num_list = [reg.reg_num for reg in self._register_list]
vals = self._context.read_core_registers_raw(reg_num_list)
#print("Vals: %s" % vals)
for reg, regValue in zip(self._register_list, vals):
if reg.bitsize == 64:
resp += six.b(conversion.u64_to_hex16le(regValue))
else:
resp += six.b(conversion.u32_to_hex8le(regValue))
logging.debug("GDB reg: %s = 0x%X", reg.name, regValue)
return resp | python | def get_register_context(self):
logging.debug("GDB getting register context")
resp = b''
reg_num_list = [reg.reg_num for reg in self._register_list]
vals = self._context.read_core_registers_raw(reg_num_list)
#print("Vals: %s" % vals)
for reg, regValue in zip(self._register_list, vals):
if reg.bitsize == 64:
resp += six.b(conversion.u64_to_hex16le(regValue))
else:
resp += six.b(conversion.u32_to_hex8le(regValue))
logging.debug("GDB reg: %s = 0x%X", reg.name, regValue)
return resp | [
"def",
"get_register_context",
"(",
"self",
")",
":",
"logging",
".",
"debug",
"(",
"\"GDB getting register context\"",
")",
"resp",
"=",
"b''",
"reg_num_list",
"=",
"[",
"reg",
".",
"reg_num",
"for",
"reg",
"in",
"self",
".",
"_register_list",
"]",
"vals",
... | return hexadecimal dump of registers as expected by GDB | [
"return",
"hexadecimal",
"dump",
"of",
"registers",
"as",
"expected",
"by",
"GDB"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/gdbserver/context_facade.py#L60-L76 |
233,973 | mbedmicro/pyOCD | pyocd/gdbserver/context_facade.py | GDBDebugContextFacade.set_register_context | def set_register_context(self, data):
"""
Set registers from GDB hexadecimal string.
"""
logging.debug("GDB setting register context")
reg_num_list = []
reg_data_list = []
for reg in self._register_list:
if reg.bitsize == 64:
regValue = conversion.hex16_to_u64be(data)
data = data[16:]
else:
regValue = conversion.hex8_to_u32be(data)
data = data[8:]
reg_num_list.append(reg.reg_num)
reg_data_list.append(regValue)
logging.debug("GDB reg: %s = 0x%X", reg.name, regValue)
self._context.write_core_registers_raw(reg_num_list, reg_data_list) | python | def set_register_context(self, data):
logging.debug("GDB setting register context")
reg_num_list = []
reg_data_list = []
for reg in self._register_list:
if reg.bitsize == 64:
regValue = conversion.hex16_to_u64be(data)
data = data[16:]
else:
regValue = conversion.hex8_to_u32be(data)
data = data[8:]
reg_num_list.append(reg.reg_num)
reg_data_list.append(regValue)
logging.debug("GDB reg: %s = 0x%X", reg.name, regValue)
self._context.write_core_registers_raw(reg_num_list, reg_data_list) | [
"def",
"set_register_context",
"(",
"self",
",",
"data",
")",
":",
"logging",
".",
"debug",
"(",
"\"GDB setting register context\"",
")",
"reg_num_list",
"=",
"[",
"]",
"reg_data_list",
"=",
"[",
"]",
"for",
"reg",
"in",
"self",
".",
"_register_list",
":",
"... | Set registers from GDB hexadecimal string. | [
"Set",
"registers",
"from",
"GDB",
"hexadecimal",
"string",
"."
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/gdbserver/context_facade.py#L78-L95 |
233,974 | mbedmicro/pyOCD | pyocd/gdbserver/context_facade.py | GDBDebugContextFacade.set_register | def set_register(self, reg, data):
"""
Set single register from GDB hexadecimal string.
reg parameter is the index of register in targetXML sent to GDB.
"""
if reg < 0:
return
elif reg < len(self._register_list):
regName = self._register_list[reg].name
regBits = self._register_list[reg].bitsize
if regBits == 64:
value = conversion.hex16_to_u64be(data)
else:
value = conversion.hex8_to_u32be(data)
logging.debug("GDB: write reg %s: 0x%X", regName, value)
self._context.write_core_register_raw(regName, value) | python | def set_register(self, reg, data):
if reg < 0:
return
elif reg < len(self._register_list):
regName = self._register_list[reg].name
regBits = self._register_list[reg].bitsize
if regBits == 64:
value = conversion.hex16_to_u64be(data)
else:
value = conversion.hex8_to_u32be(data)
logging.debug("GDB: write reg %s: 0x%X", regName, value)
self._context.write_core_register_raw(regName, value) | [
"def",
"set_register",
"(",
"self",
",",
"reg",
",",
"data",
")",
":",
"if",
"reg",
"<",
"0",
":",
"return",
"elif",
"reg",
"<",
"len",
"(",
"self",
".",
"_register_list",
")",
":",
"regName",
"=",
"self",
".",
"_register_list",
"[",
"reg",
"]",
".... | Set single register from GDB hexadecimal string.
reg parameter is the index of register in targetXML sent to GDB. | [
"Set",
"single",
"register",
"from",
"GDB",
"hexadecimal",
"string",
".",
"reg",
"parameter",
"is",
"the",
"index",
"of",
"register",
"in",
"targetXML",
"sent",
"to",
"GDB",
"."
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/gdbserver/context_facade.py#L97-L112 |
233,975 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | _get_interfaces | def _get_interfaces():
"""Get the connected USB devices"""
# Get CMSIS-DAPv1 interfaces.
v1_interfaces = INTERFACE[USB_BACKEND].get_all_connected_interfaces()
# Get CMSIS-DAPv2 interfaces.
v2_interfaces = INTERFACE[USB_BACKEND_V2].get_all_connected_interfaces()
# Prefer v2 over v1 if a device provides both.
devices_in_both = [v1 for v1 in v1_interfaces for v2 in v2_interfaces
if _get_unique_id(v1) == _get_unique_id(v2)]
for dev in devices_in_both:
v1_interfaces.remove(dev)
# Return the combined list.
return v1_interfaces + v2_interfaces | python | def _get_interfaces():
# Get CMSIS-DAPv1 interfaces.
v1_interfaces = INTERFACE[USB_BACKEND].get_all_connected_interfaces()
# Get CMSIS-DAPv2 interfaces.
v2_interfaces = INTERFACE[USB_BACKEND_V2].get_all_connected_interfaces()
# Prefer v2 over v1 if a device provides both.
devices_in_both = [v1 for v1 in v1_interfaces for v2 in v2_interfaces
if _get_unique_id(v1) == _get_unique_id(v2)]
for dev in devices_in_both:
v1_interfaces.remove(dev)
# Return the combined list.
return v1_interfaces + v2_interfaces | [
"def",
"_get_interfaces",
"(",
")",
":",
"# Get CMSIS-DAPv1 interfaces.",
"v1_interfaces",
"=",
"INTERFACE",
"[",
"USB_BACKEND",
"]",
".",
"get_all_connected_interfaces",
"(",
")",
"# Get CMSIS-DAPv2 interfaces.",
"v2_interfaces",
"=",
"INTERFACE",
"[",
"USB_BACKEND_V2",
... | Get the connected USB devices | [
"Get",
"the",
"connected",
"USB",
"devices"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L51-L66 |
233,976 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | _Transfer.add_response | def add_response(self, data):
"""
Add data read from the remote device to this object.
The size of data added must match exactly the size
that get_data_size returns.
"""
assert len(data) == self._size_bytes
result = []
for i in range(0, self._size_bytes, 4):
word = ((data[0 + i] << 0) | (data[1 + i] << 8) |
(data[2 + i] << 16) | (data[3 + i] << 24))
result.append(word)
self._result = result | python | def add_response(self, data):
assert len(data) == self._size_bytes
result = []
for i in range(0, self._size_bytes, 4):
word = ((data[0 + i] << 0) | (data[1 + i] << 8) |
(data[2 + i] << 16) | (data[3 + i] << 24))
result.append(word)
self._result = result | [
"def",
"add_response",
"(",
"self",
",",
"data",
")",
":",
"assert",
"len",
"(",
"data",
")",
"==",
"self",
".",
"_size_bytes",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"self",
".",
"_size_bytes",
",",
"4",
")",
":",
"wo... | Add data read from the remote device to this object.
The size of data added must match exactly the size
that get_data_size returns. | [
"Add",
"data",
"read",
"from",
"the",
"remote",
"device",
"to",
"this",
"object",
"."
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L107-L120 |
233,977 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | _Transfer.get_result | def get_result(self):
"""
Get the result of this transfer.
"""
while self._result is None:
if len(self.daplink._commands_to_read) > 0:
self.daplink._read_packet()
else:
assert not self.daplink._crnt_cmd.get_empty()
self.daplink.flush()
if self._error is not None:
# Pylint is confused and thinks self._error is None
# since that is what it is initialized to.
# Supress warnings for this.
# pylint: disable=raising-bad-type
raise self._error
assert self._result is not None
return self._result | python | def get_result(self):
while self._result is None:
if len(self.daplink._commands_to_read) > 0:
self.daplink._read_packet()
else:
assert not self.daplink._crnt_cmd.get_empty()
self.daplink.flush()
if self._error is not None:
# Pylint is confused and thinks self._error is None
# since that is what it is initialized to.
# Supress warnings for this.
# pylint: disable=raising-bad-type
raise self._error
assert self._result is not None
return self._result | [
"def",
"get_result",
"(",
"self",
")",
":",
"while",
"self",
".",
"_result",
"is",
"None",
":",
"if",
"len",
"(",
"self",
".",
"daplink",
".",
"_commands_to_read",
")",
">",
"0",
":",
"self",
".",
"daplink",
".",
"_read_packet",
"(",
")",
"else",
":"... | Get the result of this transfer. | [
"Get",
"the",
"result",
"of",
"this",
"transfer",
"."
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L129-L148 |
233,978 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | _Command._get_free_words | def _get_free_words(self, blockAllowed, isRead):
"""
Return the number of words free in the transmit packet
"""
if blockAllowed:
# DAP_TransferBlock request packet:
# BYTE | BYTE *****| SHORT**********| BYTE *************| WORD *********|
# > 0x06 | DAP Index | Transfer Count | Transfer Request | Transfer Data |
# ******|***********|****************|*******************|+++++++++++++++|
send = self._size - 5 - 4 * self._write_count
# DAP_TransferBlock response packet:
# BYTE | SHORT *********| BYTE *************| WORD *********|
# < 0x06 | Transfer Count | Transfer Response | Transfer Data |
# ******|****************|*******************|+++++++++++++++|
recv = self._size - 4 - 4 * self._read_count
if isRead:
return recv // 4
else:
return send // 4
else:
# DAP_Transfer request packet:
# BYTE | BYTE *****| BYTE **********| BYTE *************| WORD *********|
# > 0x05 | DAP Index | Transfer Count | Transfer Request | Transfer Data |
# ******|***********|****************|+++++++++++++++++++++++++++++++++++|
send = self._size - 3 - 1 * self._read_count - 5 * self._write_count
# DAP_Transfer response packet:
# BYTE | BYTE **********| BYTE *************| WORD *********|
# < 0x05 | Transfer Count | Transfer Response | Transfer Data |
# ******|****************|*******************|+++++++++++++++|
recv = self._size - 3 - 4 * self._read_count
if isRead:
# 1 request byte in request packet, 4 data bytes in response packet
return min(send, recv // 4)
else:
# 1 request byte + 4 data bytes
return send // 5 | python | def _get_free_words(self, blockAllowed, isRead):
if blockAllowed:
# DAP_TransferBlock request packet:
# BYTE | BYTE *****| SHORT**********| BYTE *************| WORD *********|
# > 0x06 | DAP Index | Transfer Count | Transfer Request | Transfer Data |
# ******|***********|****************|*******************|+++++++++++++++|
send = self._size - 5 - 4 * self._write_count
# DAP_TransferBlock response packet:
# BYTE | SHORT *********| BYTE *************| WORD *********|
# < 0x06 | Transfer Count | Transfer Response | Transfer Data |
# ******|****************|*******************|+++++++++++++++|
recv = self._size - 4 - 4 * self._read_count
if isRead:
return recv // 4
else:
return send // 4
else:
# DAP_Transfer request packet:
# BYTE | BYTE *****| BYTE **********| BYTE *************| WORD *********|
# > 0x05 | DAP Index | Transfer Count | Transfer Request | Transfer Data |
# ******|***********|****************|+++++++++++++++++++++++++++++++++++|
send = self._size - 3 - 1 * self._read_count - 5 * self._write_count
# DAP_Transfer response packet:
# BYTE | BYTE **********| BYTE *************| WORD *********|
# < 0x05 | Transfer Count | Transfer Response | Transfer Data |
# ******|****************|*******************|+++++++++++++++|
recv = self._size - 3 - 4 * self._read_count
if isRead:
# 1 request byte in request packet, 4 data bytes in response packet
return min(send, recv // 4)
else:
# 1 request byte + 4 data bytes
return send // 5 | [
"def",
"_get_free_words",
"(",
"self",
",",
"blockAllowed",
",",
"isRead",
")",
":",
"if",
"blockAllowed",
":",
"# DAP_TransferBlock request packet:",
"# BYTE | BYTE *****| SHORT**********| BYTE *************| WORD *********|",
"# > 0x06 | DAP Index | Transfer Count | Transfer Reques... | Return the number of words free in the transmit packet | [
"Return",
"the",
"number",
"of",
"words",
"free",
"in",
"the",
"transmit",
"packet"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L176-L215 |
233,979 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | _Command.add | def add(self, count, request, data, dap_index):
"""
Add a single or block register transfer operation to this command
"""
assert self._data_encoded is False
if self._dap_index is None:
self._dap_index = dap_index
assert self._dap_index == dap_index
if self._block_request is None:
self._block_request = request
elif request != self._block_request:
self._block_allowed = False
assert not self._block_allowed or self._block_request == request
if request & READ:
self._read_count += count
else:
self._write_count += count
self._data.append((count, request, data))
if LOG_PACKET_BUILDS:
self._logger.debug("add(%d, %02x:%s) -> [wc=%d, rc=%d, ba=%d]" %
(count, request, 'r' if (request & READ) else 'w', self._write_count, self._read_count, self._block_allowed)) | python | def add(self, count, request, data, dap_index):
assert self._data_encoded is False
if self._dap_index is None:
self._dap_index = dap_index
assert self._dap_index == dap_index
if self._block_request is None:
self._block_request = request
elif request != self._block_request:
self._block_allowed = False
assert not self._block_allowed or self._block_request == request
if request & READ:
self._read_count += count
else:
self._write_count += count
self._data.append((count, request, data))
if LOG_PACKET_BUILDS:
self._logger.debug("add(%d, %02x:%s) -> [wc=%d, rc=%d, ba=%d]" %
(count, request, 'r' if (request & READ) else 'w', self._write_count, self._read_count, self._block_allowed)) | [
"def",
"add",
"(",
"self",
",",
"count",
",",
"request",
",",
"data",
",",
"dap_index",
")",
":",
"assert",
"self",
".",
"_data_encoded",
"is",
"False",
"if",
"self",
".",
"_dap_index",
"is",
"None",
":",
"self",
".",
"_dap_index",
"=",
"dap_index",
"a... | Add a single or block register transfer operation to this command | [
"Add",
"a",
"single",
"or",
"block",
"register",
"transfer",
"operation",
"to",
"this",
"command"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L261-L284 |
233,980 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | _Command.decode_data | def decode_data(self, data):
"""
Decode the response data
"""
assert self.get_empty() is False
assert self._data_encoded is True
if self._block_allowed:
data = self._decode_transfer_block_data(data)
else:
data = self._decode_transfer_data(data)
return data | python | def decode_data(self, data):
assert self.get_empty() is False
assert self._data_encoded is True
if self._block_allowed:
data = self._decode_transfer_block_data(data)
else:
data = self._decode_transfer_data(data)
return data | [
"def",
"decode_data",
"(",
"self",
",",
"data",
")",
":",
"assert",
"self",
".",
"get_empty",
"(",
")",
"is",
"False",
"assert",
"self",
".",
"_data_encoded",
"is",
"True",
"if",
"self",
".",
"_block_allowed",
":",
"data",
"=",
"self",
".",
"_decode_tran... | Decode the response data | [
"Decode",
"the",
"response",
"data"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L429-L439 |
233,981 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | DAPAccessCMSISDAP.get_connected_devices | def get_connected_devices():
"""
Return an array of all mbed boards connected
"""
all_daplinks = []
all_interfaces = _get_interfaces()
for interface in all_interfaces:
try:
new_daplink = DAPAccessCMSISDAP(None, interface=interface)
all_daplinks.append(new_daplink)
except DAPAccessIntf.TransferError:
logger = logging.getLogger(__name__)
logger.error('Failed to get unique id', exc_info=session.Session.get_current().log_tracebacks)
return all_daplinks | python | def get_connected_devices():
all_daplinks = []
all_interfaces = _get_interfaces()
for interface in all_interfaces:
try:
new_daplink = DAPAccessCMSISDAP(None, interface=interface)
all_daplinks.append(new_daplink)
except DAPAccessIntf.TransferError:
logger = logging.getLogger(__name__)
logger.error('Failed to get unique id', exc_info=session.Session.get_current().log_tracebacks)
return all_daplinks | [
"def",
"get_connected_devices",
"(",
")",
":",
"all_daplinks",
"=",
"[",
"]",
"all_interfaces",
"=",
"_get_interfaces",
"(",
")",
"for",
"interface",
"in",
"all_interfaces",
":",
"try",
":",
"new_daplink",
"=",
"DAPAccessCMSISDAP",
"(",
"None",
",",
"interface",... | Return an array of all mbed boards connected | [
"Return",
"an",
"array",
"of",
"all",
"mbed",
"boards",
"connected"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L452-L465 |
233,982 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | DAPAccessCMSISDAP.set_deferred_transfer | def set_deferred_transfer(self, enable):
"""
Allow transfers to be delayed and buffered
By default deferred transfers are turned off. All reads and
writes will be completed by the time the function returns.
When enabled packets are buffered and sent all at once, which
increases speed. When memory is written to, the transfer
might take place immediately, or might take place on a future
memory write. This means that an invalid write could cause an
exception to occur on a later, unrelated write. To guarantee
that previous writes are complete call the flush() function.
The behaviour of read operations is determined by the modes
READ_START, READ_NOW and READ_END. The option READ_NOW is the
default and will cause the read to flush all previous writes,
and read the data immediately. To improve performance, multiple
reads can be made using READ_START and finished later with READ_NOW.
This allows the reads to be buffered and sent at once. Note - All
READ_ENDs must be called before a call using READ_NOW can be made.
"""
if self._deferred_transfer and not enable:
self.flush()
self._deferred_transfer = enable | python | def set_deferred_transfer(self, enable):
if self._deferred_transfer and not enable:
self.flush()
self._deferred_transfer = enable | [
"def",
"set_deferred_transfer",
"(",
"self",
",",
"enable",
")",
":",
"if",
"self",
".",
"_deferred_transfer",
"and",
"not",
"enable",
":",
"self",
".",
"flush",
"(",
")",
"self",
".",
"_deferred_transfer",
"=",
"enable"
] | Allow transfers to be delayed and buffered
By default deferred transfers are turned off. All reads and
writes will be completed by the time the function returns.
When enabled packets are buffered and sent all at once, which
increases speed. When memory is written to, the transfer
might take place immediately, or might take place on a future
memory write. This means that an invalid write could cause an
exception to occur on a later, unrelated write. To guarantee
that previous writes are complete call the flush() function.
The behaviour of read operations is determined by the modes
READ_START, READ_NOW and READ_END. The option READ_NOW is the
default and will cause the read to flush all previous writes,
and read the data immediately. To improve performance, multiple
reads can be made using READ_START and finished later with READ_NOW.
This allows the reads to be buffered and sent at once. Note - All
READ_ENDs must be called before a call using READ_NOW can be made. | [
"Allow",
"transfers",
"to",
"be",
"delayed",
"and",
"buffered"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L626-L650 |
233,983 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | DAPAccessCMSISDAP._init_deferred_buffers | def _init_deferred_buffers(self):
"""
Initialize or reinitalize all the deferred transfer buffers
Calling this method will drop all pending transactions
so use with care.
"""
# List of transfers that have been started, but
# not completed (started by write_reg, read_reg,
# reg_write_repeat and reg_read_repeat)
self._transfer_list = collections.deque()
# The current packet - this can contain multiple
# different transfers
self._crnt_cmd = _Command(self._packet_size)
# Packets that have been sent but not read
self._commands_to_read = collections.deque()
# Buffer for data returned for completed commands.
# This data will be added to transfers
self._command_response_buf = bytearray() | python | def _init_deferred_buffers(self):
# List of transfers that have been started, but
# not completed (started by write_reg, read_reg,
# reg_write_repeat and reg_read_repeat)
self._transfer_list = collections.deque()
# The current packet - this can contain multiple
# different transfers
self._crnt_cmd = _Command(self._packet_size)
# Packets that have been sent but not read
self._commands_to_read = collections.deque()
# Buffer for data returned for completed commands.
# This data will be added to transfers
self._command_response_buf = bytearray() | [
"def",
"_init_deferred_buffers",
"(",
"self",
")",
":",
"# List of transfers that have been started, but",
"# not completed (started by write_reg, read_reg,",
"# reg_write_repeat and reg_read_repeat)",
"self",
".",
"_transfer_list",
"=",
"collections",
".",
"deque",
"(",
")",
"# ... | Initialize or reinitalize all the deferred transfer buffers
Calling this method will drop all pending transactions
so use with care. | [
"Initialize",
"or",
"reinitalize",
"all",
"the",
"deferred",
"transfer",
"buffers"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L851-L869 |
233,984 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | DAPAccessCMSISDAP._read_packet | def _read_packet(self):
"""
Reads and decodes a single packet
Reads a single packet from the device and
stores the data from it in the current Command
object
"""
# Grab command, send it and decode response
cmd = self._commands_to_read.popleft()
try:
raw_data = self._interface.read()
raw_data = bytearray(raw_data)
decoded_data = cmd.decode_data(raw_data)
except Exception as exception:
self._abort_all_transfers(exception)
raise
decoded_data = bytearray(decoded_data)
self._command_response_buf.extend(decoded_data)
# Attach data to transfers
pos = 0
while True:
size_left = len(self._command_response_buf) - pos
if size_left == 0:
# If size left is 0 then the transfer list might
# be empty, so don't try to access element 0
break
transfer = self._transfer_list[0]
size = transfer.get_data_size()
if size > size_left:
break
self._transfer_list.popleft()
data = self._command_response_buf[pos:pos + size]
pos += size
transfer.add_response(data)
# Remove used data from _command_response_buf
if pos > 0:
self._command_response_buf = self._command_response_buf[pos:] | python | def _read_packet(self):
# Grab command, send it and decode response
cmd = self._commands_to_read.popleft()
try:
raw_data = self._interface.read()
raw_data = bytearray(raw_data)
decoded_data = cmd.decode_data(raw_data)
except Exception as exception:
self._abort_all_transfers(exception)
raise
decoded_data = bytearray(decoded_data)
self._command_response_buf.extend(decoded_data)
# Attach data to transfers
pos = 0
while True:
size_left = len(self._command_response_buf) - pos
if size_left == 0:
# If size left is 0 then the transfer list might
# be empty, so don't try to access element 0
break
transfer = self._transfer_list[0]
size = transfer.get_data_size()
if size > size_left:
break
self._transfer_list.popleft()
data = self._command_response_buf[pos:pos + size]
pos += size
transfer.add_response(data)
# Remove used data from _command_response_buf
if pos > 0:
self._command_response_buf = self._command_response_buf[pos:] | [
"def",
"_read_packet",
"(",
"self",
")",
":",
"# Grab command, send it and decode response",
"cmd",
"=",
"self",
".",
"_commands_to_read",
".",
"popleft",
"(",
")",
"try",
":",
"raw_data",
"=",
"self",
".",
"_interface",
".",
"read",
"(",
")",
"raw_data",
"=",... | Reads and decodes a single packet
Reads a single packet from the device and
stores the data from it in the current Command
object | [
"Reads",
"and",
"decodes",
"a",
"single",
"packet"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L871-L912 |
233,985 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | DAPAccessCMSISDAP._send_packet | def _send_packet(self):
"""
Send a single packet to the interface
This function guarentees that the number of packets
that are stored in daplink's buffer (the number of
packets written but not read) does not exceed the
number supported by the given device.
"""
cmd = self._crnt_cmd
if cmd.get_empty():
return
max_packets = self._interface.get_packet_count()
if len(self._commands_to_read) >= max_packets:
self._read_packet()
data = cmd.encode_data()
try:
self._interface.write(list(data))
except Exception as exception:
self._abort_all_transfers(exception)
raise
self._commands_to_read.append(cmd)
self._crnt_cmd = _Command(self._packet_size) | python | def _send_packet(self):
cmd = self._crnt_cmd
if cmd.get_empty():
return
max_packets = self._interface.get_packet_count()
if len(self._commands_to_read) >= max_packets:
self._read_packet()
data = cmd.encode_data()
try:
self._interface.write(list(data))
except Exception as exception:
self._abort_all_transfers(exception)
raise
self._commands_to_read.append(cmd)
self._crnt_cmd = _Command(self._packet_size) | [
"def",
"_send_packet",
"(",
"self",
")",
":",
"cmd",
"=",
"self",
".",
"_crnt_cmd",
"if",
"cmd",
".",
"get_empty",
"(",
")",
":",
"return",
"max_packets",
"=",
"self",
".",
"_interface",
".",
"get_packet_count",
"(",
")",
"if",
"len",
"(",
"self",
".",... | Send a single packet to the interface
This function guarentees that the number of packets
that are stored in daplink's buffer (the number of
packets written but not read) does not exceed the
number supported by the given device. | [
"Send",
"a",
"single",
"packet",
"to",
"the",
"interface"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L914-L937 |
233,986 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | DAPAccessCMSISDAP._write | def _write(self, dap_index, transfer_count,
transfer_request, transfer_data):
"""
Write one or more commands
"""
assert dap_index == 0 # dap index currently unsupported
assert isinstance(transfer_count, six.integer_types)
assert isinstance(transfer_request, six.integer_types)
assert transfer_data is None or len(transfer_data) > 0
# Create transfer and add to transfer list
transfer = None
if transfer_request & READ:
transfer = _Transfer(self, dap_index, transfer_count,
transfer_request, transfer_data)
self._transfer_list.append(transfer)
# Build physical packet by adding it to command
cmd = self._crnt_cmd
is_read = transfer_request & READ
size_to_transfer = transfer_count
trans_data_pos = 0
while size_to_transfer > 0:
# Get the size remaining in the current packet for the given request.
size = cmd.get_request_space(size_to_transfer, transfer_request, dap_index)
# This request doesn't fit in the packet so send it.
if size == 0:
if LOG_PACKET_BUILDS:
self._logger.debug("_write: send packet [size==0]")
self._send_packet()
cmd = self._crnt_cmd
continue
# Add request to packet.
if transfer_data is None:
data = None
else:
data = transfer_data[trans_data_pos:trans_data_pos + size]
cmd.add(size, transfer_request, data, dap_index)
size_to_transfer -= size
trans_data_pos += size
# Packet has been filled so send it
if cmd.get_full():
if LOG_PACKET_BUILDS:
self._logger.debug("_write: send packet [full]")
self._send_packet()
cmd = self._crnt_cmd
if not self._deferred_transfer:
self.flush()
return transfer | python | def _write(self, dap_index, transfer_count,
transfer_request, transfer_data):
assert dap_index == 0 # dap index currently unsupported
assert isinstance(transfer_count, six.integer_types)
assert isinstance(transfer_request, six.integer_types)
assert transfer_data is None or len(transfer_data) > 0
# Create transfer and add to transfer list
transfer = None
if transfer_request & READ:
transfer = _Transfer(self, dap_index, transfer_count,
transfer_request, transfer_data)
self._transfer_list.append(transfer)
# Build physical packet by adding it to command
cmd = self._crnt_cmd
is_read = transfer_request & READ
size_to_transfer = transfer_count
trans_data_pos = 0
while size_to_transfer > 0:
# Get the size remaining in the current packet for the given request.
size = cmd.get_request_space(size_to_transfer, transfer_request, dap_index)
# This request doesn't fit in the packet so send it.
if size == 0:
if LOG_PACKET_BUILDS:
self._logger.debug("_write: send packet [size==0]")
self._send_packet()
cmd = self._crnt_cmd
continue
# Add request to packet.
if transfer_data is None:
data = None
else:
data = transfer_data[trans_data_pos:trans_data_pos + size]
cmd.add(size, transfer_request, data, dap_index)
size_to_transfer -= size
trans_data_pos += size
# Packet has been filled so send it
if cmd.get_full():
if LOG_PACKET_BUILDS:
self._logger.debug("_write: send packet [full]")
self._send_packet()
cmd = self._crnt_cmd
if not self._deferred_transfer:
self.flush()
return transfer | [
"def",
"_write",
"(",
"self",
",",
"dap_index",
",",
"transfer_count",
",",
"transfer_request",
",",
"transfer_data",
")",
":",
"assert",
"dap_index",
"==",
"0",
"# dap index currently unsupported",
"assert",
"isinstance",
"(",
"transfer_count",
",",
"six",
".",
"... | Write one or more commands | [
"Write",
"one",
"or",
"more",
"commands"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L939-L992 |
233,987 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | DAPAccessCMSISDAP._jtag_to_swd | def _jtag_to_swd(self):
"""
Send the command to switch from SWD to jtag
"""
data = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]
self._protocol.swj_sequence(data)
data = [0x9e, 0xe7]
self._protocol.swj_sequence(data)
data = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]
self._protocol.swj_sequence(data)
data = [0x00]
self._protocol.swj_sequence(data) | python | def _jtag_to_swd(self):
data = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]
self._protocol.swj_sequence(data)
data = [0x9e, 0xe7]
self._protocol.swj_sequence(data)
data = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]
self._protocol.swj_sequence(data)
data = [0x00]
self._protocol.swj_sequence(data) | [
"def",
"_jtag_to_swd",
"(",
"self",
")",
":",
"data",
"=",
"[",
"0xff",
",",
"0xff",
",",
"0xff",
",",
"0xff",
",",
"0xff",
",",
"0xff",
",",
"0xff",
"]",
"self",
".",
"_protocol",
".",
"swj_sequence",
"(",
"data",
")",
"data",
"=",
"[",
"0x9e",
... | Send the command to switch from SWD to jtag | [
"Send",
"the",
"command",
"to",
"switch",
"from",
"SWD",
"to",
"jtag"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L994-L1008 |
233,988 | mbedmicro/pyOCD | pyocd/probe/pydapaccess/dap_access_cmsis_dap.py | DAPAccessCMSISDAP._abort_all_transfers | def _abort_all_transfers(self, exception):
"""
Abort any ongoing transfers and clear all buffers
"""
pending_reads = len(self._commands_to_read)
# invalidate _transfer_list
for transfer in self._transfer_list:
transfer.add_error(exception)
# clear all deferred buffers
self._init_deferred_buffers()
# finish all pending reads and ignore the data
# Only do this if the error is a tranfer error.
# Otherwise this could cause another exception
if isinstance(exception, DAPAccessIntf.TransferError):
for _ in range(pending_reads):
self._interface.read() | python | def _abort_all_transfers(self, exception):
pending_reads = len(self._commands_to_read)
# invalidate _transfer_list
for transfer in self._transfer_list:
transfer.add_error(exception)
# clear all deferred buffers
self._init_deferred_buffers()
# finish all pending reads and ignore the data
# Only do this if the error is a tranfer error.
# Otherwise this could cause another exception
if isinstance(exception, DAPAccessIntf.TransferError):
for _ in range(pending_reads):
self._interface.read() | [
"def",
"_abort_all_transfers",
"(",
"self",
",",
"exception",
")",
":",
"pending_reads",
"=",
"len",
"(",
"self",
".",
"_commands_to_read",
")",
"# invalidate _transfer_list",
"for",
"transfer",
"in",
"self",
".",
"_transfer_list",
":",
"transfer",
".",
"add_error... | Abort any ongoing transfers and clear all buffers | [
"Abort",
"any",
"ongoing",
"transfers",
"and",
"clear",
"all",
"buffers"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/pydapaccess/dap_access_cmsis_dap.py#L1010-L1025 |
233,989 | mbedmicro/pyOCD | pyocd/target/builtin/target_nRF51822_xxAA.py | NRF51.resetn | def resetn(self):
"""
reset a core. After a call to this function, the core
is running
"""
#Regular reset will kick NRF out of DBG mode
logging.debug("target_nrf51.reset: enable reset pin")
self.write_memory(RESET, RESET_ENABLE)
#reset
logging.debug("target_nrf51.reset: trigger nRST pin")
self.reset() | python | def resetn(self):
#Regular reset will kick NRF out of DBG mode
logging.debug("target_nrf51.reset: enable reset pin")
self.write_memory(RESET, RESET_ENABLE)
#reset
logging.debug("target_nrf51.reset: trigger nRST pin")
self.reset() | [
"def",
"resetn",
"(",
"self",
")",
":",
"#Regular reset will kick NRF out of DBG mode",
"logging",
".",
"debug",
"(",
"\"target_nrf51.reset: enable reset pin\"",
")",
"self",
".",
"write_memory",
"(",
"RESET",
",",
"RESET_ENABLE",
")",
"#reset",
"logging",
".",
"debug... | reset a core. After a call to this function, the core
is running | [
"reset",
"a",
"core",
".",
"After",
"a",
"call",
"to",
"this",
"function",
"the",
"core",
"is",
"running"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/target/builtin/target_nRF51822_xxAA.py#L67-L77 |
233,990 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/base.py | StlinkDetectBase.list_mbeds | def list_mbeds(self):
""" List details of connected devices
@return Returns list of structures with detailed info about each mbed
@details Function returns list of dictionaries with mbed attributes
'mount_point', TargetID name etc.
Function returns mbed list with platform names if possible
"""
platform_count = {}
candidates = list(self.find_candidates())
result = []
for device in candidates:
if not device.get("mount_point", None):
continue
device["target_id"] = device["target_id_usb_id"]
self._update_device_from_fs(device)
result.append(device)
return result | python | def list_mbeds(self):
platform_count = {}
candidates = list(self.find_candidates())
result = []
for device in candidates:
if not device.get("mount_point", None):
continue
device["target_id"] = device["target_id_usb_id"]
self._update_device_from_fs(device)
result.append(device)
return result | [
"def",
"list_mbeds",
"(",
"self",
")",
":",
"platform_count",
"=",
"{",
"}",
"candidates",
"=",
"list",
"(",
"self",
".",
"find_candidates",
"(",
")",
")",
"result",
"=",
"[",
"]",
"for",
"device",
"in",
"candidates",
":",
"if",
"not",
"device",
".",
... | List details of connected devices
@return Returns list of structures with detailed info about each mbed
@details Function returns list of dictionaries with mbed attributes
'mount_point', TargetID name etc.
Function returns mbed list with platform names if possible | [
"List",
"details",
"of",
"connected",
"devices"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/base.py#L52-L69 |
233,991 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/base.py | StlinkDetectBase._update_device_from_fs | def _update_device_from_fs(self, device):
""" Updates the device information based on files from its 'mount_point'
@param device Dictionary containing device information
"""
try:
directory_entries = listdir(device["mount_point"])
# Always try to update using daplink compatible boards processself.
# This is done for backwards compatibility.
lowercase_directory_entries = [e.lower() for e in directory_entries]
if self.MBED_HTM_NAME.lower() in lowercase_directory_entries:
self._update_device_from_htm(device)
except (OSError, IOError) as e:
logger.warning(
'Marking device with mount point "%s" as unmounted due to the '
"following error: %s",
device["mount_point"],
e,
)
device["mount_point"] = None | python | def _update_device_from_fs(self, device):
try:
directory_entries = listdir(device["mount_point"])
# Always try to update using daplink compatible boards processself.
# This is done for backwards compatibility.
lowercase_directory_entries = [e.lower() for e in directory_entries]
if self.MBED_HTM_NAME.lower() in lowercase_directory_entries:
self._update_device_from_htm(device)
except (OSError, IOError) as e:
logger.warning(
'Marking device with mount point "%s" as unmounted due to the '
"following error: %s",
device["mount_point"],
e,
)
device["mount_point"] = None | [
"def",
"_update_device_from_fs",
"(",
"self",
",",
"device",
")",
":",
"try",
":",
"directory_entries",
"=",
"listdir",
"(",
"device",
"[",
"\"mount_point\"",
"]",
")",
"# Always try to update using daplink compatible boards processself.",
"# This is done for backwards compat... | Updates the device information based on files from its 'mount_point'
@param device Dictionary containing device information | [
"Updates",
"the",
"device",
"information",
"based",
"on",
"files",
"from",
"its",
"mount_point"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/base.py#L71-L91 |
233,992 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/base.py | StlinkDetectBase._read_htm_ids | def _read_htm_ids(self, mount_point):
"""! Function scans mbed.htm to get information about TargetID.
@param mount_point mbed mount point (disk / drive letter)
@return Function returns targetID, in case of failure returns None.
@details Note: This function should be improved to scan variety of boards'
mbed.htm files
"""
result = {}
target_id = None
for line in self._htm_lines(mount_point):
target_id = target_id or self._target_id_from_htm(line)
return target_id, result | python | def _read_htm_ids(self, mount_point):
result = {}
target_id = None
for line in self._htm_lines(mount_point):
target_id = target_id or self._target_id_from_htm(line)
return target_id, result | [
"def",
"_read_htm_ids",
"(",
"self",
",",
"mount_point",
")",
":",
"result",
"=",
"{",
"}",
"target_id",
"=",
"None",
"for",
"line",
"in",
"self",
".",
"_htm_lines",
"(",
"mount_point",
")",
":",
"target_id",
"=",
"target_id",
"or",
"self",
".",
"_target... | ! Function scans mbed.htm to get information about TargetID.
@param mount_point mbed mount point (disk / drive letter)
@return Function returns targetID, in case of failure returns None.
@details Note: This function should be improved to scan variety of boards'
mbed.htm files | [
"!",
"Function",
"scans",
"mbed",
".",
"htm",
"to",
"get",
"information",
"about",
"TargetID",
"."
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/base.py#L109-L120 |
233,993 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/base.py | StlinkDetectBase._target_id_from_htm | def _target_id_from_htm(self, line):
"""! Extract Target id from htm line.
@return Target id or None
"""
# Detecting modern mbed.htm file format
m = re.search("\\?code=([a-fA-F0-9]+)", line)
if m:
result = m.groups()[0]
return result
# Last resort, we can try to see if old mbed.htm format is there
m = re.search("\\?auth=([a-fA-F0-9]+)", line)
if m:
result = m.groups()[0]
return result
return None | python | def _target_id_from_htm(self, line):
# Detecting modern mbed.htm file format
m = re.search("\\?code=([a-fA-F0-9]+)", line)
if m:
result = m.groups()[0]
return result
# Last resort, we can try to see if old mbed.htm format is there
m = re.search("\\?auth=([a-fA-F0-9]+)", line)
if m:
result = m.groups()[0]
return result
return None | [
"def",
"_target_id_from_htm",
"(",
"self",
",",
"line",
")",
":",
"# Detecting modern mbed.htm file format",
"m",
"=",
"re",
".",
"search",
"(",
"\"\\\\?code=([a-fA-F0-9]+)\"",
",",
"line",
")",
"if",
"m",
":",
"result",
"=",
"m",
".",
"groups",
"(",
")",
"[... | ! Extract Target id from htm line.
@return Target id or None | [
"!",
"Extract",
"Target",
"id",
"from",
"htm",
"line",
"."
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/base.py#L128-L143 |
233,994 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/base.py | StlinkDetectBase._run_cli_process | def _run_cli_process(cmd, shell=True):
"""! Runs command as a process and return stdout, stderr and ret code
@param cmd Command to execute
@return Tuple of (stdout, stderr, returncode)
"""
from subprocess import Popen, PIPE
p = Popen(cmd, shell=shell, stdout=PIPE, stderr=PIPE)
_stdout, _stderr = p.communicate()
return _stdout, _stderr, p.returncode | python | def _run_cli_process(cmd, shell=True):
from subprocess import Popen, PIPE
p = Popen(cmd, shell=shell, stdout=PIPE, stderr=PIPE)
_stdout, _stderr = p.communicate()
return _stdout, _stderr, p.returncode | [
"def",
"_run_cli_process",
"(",
"cmd",
",",
"shell",
"=",
"True",
")",
":",
"from",
"subprocess",
"import",
"Popen",
",",
"PIPE",
"p",
"=",
"Popen",
"(",
"cmd",
",",
"shell",
"=",
"shell",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
")",
... | ! Runs command as a process and return stdout, stderr and ret code
@param cmd Command to execute
@return Tuple of (stdout, stderr, returncode) | [
"!",
"Runs",
"command",
"as",
"a",
"process",
"and",
"return",
"stdout",
"stderr",
"and",
"ret",
"code"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/base.py#L151-L160 |
233,995 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/factory.py | create_mbed_detector | def create_mbed_detector(**kwargs):
"""! Factory used to create host OS specific mbed-lstools object
:param kwargs: keyword arguments to pass along to the constructors
@return Returns MbedLsTools object or None if host OS is not supported
"""
host_os = platform.system()
if host_os == "Windows":
from .windows import StlinkDetectWindows
return StlinkDetectWindows(**kwargs)
elif host_os == "Linux":
from .linux import StlinkDetectLinuxGeneric
return StlinkDetectLinuxGeneric(**kwargs)
elif host_os == "Darwin":
from .darwin import StlinkDetectDarwin
return StlinkDetectDarwin(**kwargs)
else:
return None | python | def create_mbed_detector(**kwargs):
host_os = platform.system()
if host_os == "Windows":
from .windows import StlinkDetectWindows
return StlinkDetectWindows(**kwargs)
elif host_os == "Linux":
from .linux import StlinkDetectLinuxGeneric
return StlinkDetectLinuxGeneric(**kwargs)
elif host_os == "Darwin":
from .darwin import StlinkDetectDarwin
return StlinkDetectDarwin(**kwargs)
else:
return None | [
"def",
"create_mbed_detector",
"(",
"*",
"*",
"kwargs",
")",
":",
"host_os",
"=",
"platform",
".",
"system",
"(",
")",
"if",
"host_os",
"==",
"\"Windows\"",
":",
"from",
".",
"windows",
"import",
"StlinkDetectWindows",
"return",
"StlinkDetectWindows",
"(",
"*"... | ! Factory used to create host OS specific mbed-lstools object
:param kwargs: keyword arguments to pass along to the constructors
@return Returns MbedLsTools object or None if host OS is not supported | [
"!",
"Factory",
"used",
"to",
"create",
"host",
"OS",
"specific",
"mbed",
"-",
"lstools",
"object"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/factory.py#L22-L43 |
233,996 | mbedmicro/pyOCD | pyocd/coresight/ap.py | _locked | def _locked(func):
"""! Decorator to automatically lock an AccessPort method."""
def _locking(self, *args, **kwargs):
try:
self.lock()
return func(self, *args, **kwargs)
finally:
self.unlock()
return _locking | python | def _locked(func):
def _locking(self, *args, **kwargs):
try:
self.lock()
return func(self, *args, **kwargs)
finally:
self.unlock()
return _locking | [
"def",
"_locked",
"(",
"func",
")",
":",
"def",
"_locking",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"self",
".",
"lock",
"(",
")",
"return",
"func",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
... | ! Decorator to automatically lock an AccessPort method. | [
"!",
"Decorator",
"to",
"automatically",
"lock",
"an",
"AccessPort",
"method",
"."
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/coresight/ap.py#L136-L144 |
233,997 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/linux.py | StlinkDetectLinuxGeneric._dev_by_id | def _dev_by_id(self, device_type):
"""! Get a dict, USBID -> device, for a device class
@param device_type The type of devices to search. For exmaple, "serial"
looks for all serial devices connected to this computer
@return A dict: Device USBID -> device file in /dev
"""
dir = os.path.join("/dev", device_type, "by-id")
if os.path.isdir(dir):
to_ret = dict(
self._hex_ids([os.path.join(dir, f) for f in os.listdir(dir)])
)
return to_ret
else:
logger.error(
"Could not get %s devices by id. "
"This could be because your Linux distribution "
"does not use udev, or does not create /dev/%s/by-id "
"symlinks. Please submit an issue to github.com/"
"armmbed/mbed-ls.",
device_type,
device_type,
)
return {} | python | def _dev_by_id(self, device_type):
dir = os.path.join("/dev", device_type, "by-id")
if os.path.isdir(dir):
to_ret = dict(
self._hex_ids([os.path.join(dir, f) for f in os.listdir(dir)])
)
return to_ret
else:
logger.error(
"Could not get %s devices by id. "
"This could be because your Linux distribution "
"does not use udev, or does not create /dev/%s/by-id "
"symlinks. Please submit an issue to github.com/"
"armmbed/mbed-ls.",
device_type,
device_type,
)
return {} | [
"def",
"_dev_by_id",
"(",
"self",
",",
"device_type",
")",
":",
"dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"\"/dev\"",
",",
"device_type",
",",
"\"by-id\"",
")",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"dir",
")",
":",
"to_ret",
"=",
"dic... | ! Get a dict, USBID -> device, for a device class
@param device_type The type of devices to search. For exmaple, "serial"
looks for all serial devices connected to this computer
@return A dict: Device USBID -> device file in /dev | [
"!",
"Get",
"a",
"dict",
"USBID",
"-",
">",
"device",
"for",
"a",
"device",
"class"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/linux.py#L63-L85 |
233,998 | mbedmicro/pyOCD | pyocd/probe/stlink/detect/linux.py | StlinkDetectLinuxGeneric._hex_ids | def _hex_ids(self, dev_list):
"""! Build a USBID map for a device list
@param disk_list List of disks in a system with USBID decoration
@return Returns map USBID -> device file in /dev
@details Uses regular expressions to get a USBID (TargeTIDs) a "by-id"
symbolic link
"""
for dl in dev_list:
match = self.nlp.search(dl)
if match:
yield match.group("usbid"), _readlink(dl) | python | def _hex_ids(self, dev_list):
for dl in dev_list:
match = self.nlp.search(dl)
if match:
yield match.group("usbid"), _readlink(dl) | [
"def",
"_hex_ids",
"(",
"self",
",",
"dev_list",
")",
":",
"for",
"dl",
"in",
"dev_list",
":",
"match",
"=",
"self",
".",
"nlp",
".",
"search",
"(",
"dl",
")",
"if",
"match",
":",
"yield",
"match",
".",
"group",
"(",
"\"usbid\"",
")",
",",
"_readli... | ! Build a USBID map for a device list
@param disk_list List of disks in a system with USBID decoration
@return Returns map USBID -> device file in /dev
@details Uses regular expressions to get a USBID (TargeTIDs) a "by-id"
symbolic link | [
"!",
"Build",
"a",
"USBID",
"map",
"for",
"a",
"device",
"list"
] | 41a174718a9739f3cbe785c2ba21cb7fd1310c6f | https://github.com/mbedmicro/pyOCD/blob/41a174718a9739f3cbe785c2ba21cb7fd1310c6f/pyocd/probe/stlink/detect/linux.py#L100-L110 |
233,999 | influxdata/influxdb-python | influxdb/line_protocol.py | _get_unicode | def _get_unicode(data, force=False):
"""Try to return a text aka unicode object from the given data."""
if isinstance(data, binary_type):
return data.decode('utf-8')
elif data is None:
return ''
elif force:
if PY2:
return unicode(data)
else:
return str(data)
else:
return data | python | def _get_unicode(data, force=False):
if isinstance(data, binary_type):
return data.decode('utf-8')
elif data is None:
return ''
elif force:
if PY2:
return unicode(data)
else:
return str(data)
else:
return data | [
"def",
"_get_unicode",
"(",
"data",
",",
"force",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"binary_type",
")",
":",
"return",
"data",
".",
"decode",
"(",
"'utf-8'",
")",
"elif",
"data",
"is",
"None",
":",
"return",
"''",
"elif",
... | Try to return a text aka unicode object from the given data. | [
"Try",
"to",
"return",
"a",
"text",
"aka",
"unicode",
"object",
"from",
"the",
"given",
"data",
"."
] | d5d12499f3755199d5eedd8b363450f1cf4073bd | https://github.com/influxdata/influxdb-python/blob/d5d12499f3755199d5eedd8b363450f1cf4073bd/influxdb/line_protocol.py#L104-L116 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.