repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
danielhrisca/asammdf | asammdf/blocks/utils.py | count_channel_groups | def count_channel_groups(stream, include_channels=False):
""" count all channel groups as fast as possible. This is used to provide
reliable progress information when loading a file using the GUI
Parameters
----------
stream : file handle
opened file handle
include_channels : bool
also count channels
Returns
-------
count : int
channel group count
"""
count = 0
ch_count = 0
stream.seek(64)
blk_id = stream.read(2)
if blk_id == b"HD":
version = 3
else:
blk_id += stream.read(2)
if blk_id == b"##HD":
version = 4
else:
raise MdfException(f'"{stream.name}" is not a valid MDF file')
if version >= 4:
stream.seek(88, 0)
dg_addr = UINT64_u(stream.read(8))[0]
while dg_addr:
stream.seek(dg_addr + 32)
cg_addr = UINT64_u(stream.read(8))[0]
while cg_addr:
count += 1
if include_channels:
stream.seek(cg_addr + 32)
ch_addr = UINT64_u(stream.read(8))[0]
while ch_addr:
ch_count += 1
stream.seek(ch_addr + 24)
ch_addr = UINT64_u(stream.read(8))[0]
stream.seek(cg_addr + 24)
cg_addr = UINT64_u(stream.read(8))[0]
stream.seek(dg_addr + 24)
dg_addr = UINT64_u(stream.read(8))[0]
else:
stream.seek(68, 0)
dg_addr = UINT32_u(stream.read(4))[0]
while dg_addr:
stream.seek(dg_addr + 8)
cg_addr = UINT32_u(stream.read(4))[0]
while cg_addr:
count += 1
if include_channels:
stream.seek(cg_addr + 8)
ch_addr = UINT32_u(stream.read(4))[0]
while ch_addr:
ch_count += 1
stream.seek(ch_addr + 4)
ch_addr = UINT32_u(stream.read(4))[0]
stream.seek(cg_addr + 4)
cg_addr = UINT32_u(stream.read(4))[0]
stream.seek(dg_addr + 4)
dg_addr = UINT32_u(stream.read(4))[0]
return count, ch_count | python | def count_channel_groups(stream, include_channels=False):
""" count all channel groups as fast as possible. This is used to provide
reliable progress information when loading a file using the GUI
Parameters
----------
stream : file handle
opened file handle
include_channels : bool
also count channels
Returns
-------
count : int
channel group count
"""
count = 0
ch_count = 0
stream.seek(64)
blk_id = stream.read(2)
if blk_id == b"HD":
version = 3
else:
blk_id += stream.read(2)
if blk_id == b"##HD":
version = 4
else:
raise MdfException(f'"{stream.name}" is not a valid MDF file')
if version >= 4:
stream.seek(88, 0)
dg_addr = UINT64_u(stream.read(8))[0]
while dg_addr:
stream.seek(dg_addr + 32)
cg_addr = UINT64_u(stream.read(8))[0]
while cg_addr:
count += 1
if include_channels:
stream.seek(cg_addr + 32)
ch_addr = UINT64_u(stream.read(8))[0]
while ch_addr:
ch_count += 1
stream.seek(ch_addr + 24)
ch_addr = UINT64_u(stream.read(8))[0]
stream.seek(cg_addr + 24)
cg_addr = UINT64_u(stream.read(8))[0]
stream.seek(dg_addr + 24)
dg_addr = UINT64_u(stream.read(8))[0]
else:
stream.seek(68, 0)
dg_addr = UINT32_u(stream.read(4))[0]
while dg_addr:
stream.seek(dg_addr + 8)
cg_addr = UINT32_u(stream.read(4))[0]
while cg_addr:
count += 1
if include_channels:
stream.seek(cg_addr + 8)
ch_addr = UINT32_u(stream.read(4))[0]
while ch_addr:
ch_count += 1
stream.seek(ch_addr + 4)
ch_addr = UINT32_u(stream.read(4))[0]
stream.seek(cg_addr + 4)
cg_addr = UINT32_u(stream.read(4))[0]
stream.seek(dg_addr + 4)
dg_addr = UINT32_u(stream.read(4))[0]
return count, ch_count | [
"def",
"count_channel_groups",
"(",
"stream",
",",
"include_channels",
"=",
"False",
")",
":",
"count",
"=",
"0",
"ch_count",
"=",
"0",
"stream",
".",
"seek",
"(",
"64",
")",
"blk_id",
"=",
"stream",
".",
"read",
"(",
"2",
")",
"if",
"blk_id",
"==",
... | count all channel groups as fast as possible. This is used to provide
reliable progress information when loading a file using the GUI
Parameters
----------
stream : file handle
opened file handle
include_channels : bool
also count channels
Returns
-------
count : int
channel group count | [
"count",
"all",
"channel",
"groups",
"as",
"fast",
"as",
"possible",
".",
"This",
"is",
"used",
"to",
"provide",
"reliable",
"progress",
"information",
"when",
"loading",
"a",
"file",
"using",
"the",
"GUI"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L646-L720 | train | 221,700 |
danielhrisca/asammdf | asammdf/blocks/utils.py | validate_version_argument | def validate_version_argument(version, hint=4):
""" validate the version argument against the supported MDF versions. The
default version used depends on the hint MDF major revision
Parameters
----------
version : str
requested MDF version
hint : int
MDF revision hint
Returns
-------
valid_version : str
valid version
"""
if version not in SUPPORTED_VERSIONS:
if hint == 2:
valid_version = "2.14"
elif hint == 3:
valid_version = "3.30"
else:
valid_version = "4.10"
message = (
'Unknown mdf version "{}".'
" The available versions are {};"
' automatically using version "{}"'
)
message = message.format(version, SUPPORTED_VERSIONS, valid_version)
logger.warning(message)
else:
valid_version = version
return valid_version | python | def validate_version_argument(version, hint=4):
""" validate the version argument against the supported MDF versions. The
default version used depends on the hint MDF major revision
Parameters
----------
version : str
requested MDF version
hint : int
MDF revision hint
Returns
-------
valid_version : str
valid version
"""
if version not in SUPPORTED_VERSIONS:
if hint == 2:
valid_version = "2.14"
elif hint == 3:
valid_version = "3.30"
else:
valid_version = "4.10"
message = (
'Unknown mdf version "{}".'
" The available versions are {};"
' automatically using version "{}"'
)
message = message.format(version, SUPPORTED_VERSIONS, valid_version)
logger.warning(message)
else:
valid_version = version
return valid_version | [
"def",
"validate_version_argument",
"(",
"version",
",",
"hint",
"=",
"4",
")",
":",
"if",
"version",
"not",
"in",
"SUPPORTED_VERSIONS",
":",
"if",
"hint",
"==",
"2",
":",
"valid_version",
"=",
"\"2.14\"",
"elif",
"hint",
"==",
"3",
":",
"valid_version",
"... | validate the version argument against the supported MDF versions. The
default version used depends on the hint MDF major revision
Parameters
----------
version : str
requested MDF version
hint : int
MDF revision hint
Returns
-------
valid_version : str
valid version | [
"validate",
"the",
"version",
"argument",
"against",
"the",
"supported",
"MDF",
"versions",
".",
"The",
"default",
"version",
"used",
"depends",
"on",
"the",
"hint",
"MDF",
"major",
"revision"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L723-L756 | train | 221,701 |
danielhrisca/asammdf | asammdf/blocks/utils.py | cut_video_stream | def cut_video_stream(stream, start, end, fmt):
""" cut video stream from `start` to `end` time
Parameters
----------
stream : bytes
video file content
start : float
start time
end : float
end time
Returns
-------
result : bytes
content of cut video
"""
with TemporaryDirectory() as tmp:
in_file = Path(tmp) / f"in{fmt}"
out_file = Path(tmp) / f"out{fmt}"
in_file.write_bytes(stream)
try:
ret = subprocess.run(
[
"ffmpeg",
"-ss",
f"{start}",
"-i",
f"{in_file}",
"-to",
f"{end}",
"-c",
"copy",
f"{out_file}",
],
capture_output=True,
)
except FileNotFoundError:
result = stream
else:
if ret.returncode:
result = stream
else:
result = out_file.read_bytes()
return result | python | def cut_video_stream(stream, start, end, fmt):
""" cut video stream from `start` to `end` time
Parameters
----------
stream : bytes
video file content
start : float
start time
end : float
end time
Returns
-------
result : bytes
content of cut video
"""
with TemporaryDirectory() as tmp:
in_file = Path(tmp) / f"in{fmt}"
out_file = Path(tmp) / f"out{fmt}"
in_file.write_bytes(stream)
try:
ret = subprocess.run(
[
"ffmpeg",
"-ss",
f"{start}",
"-i",
f"{in_file}",
"-to",
f"{end}",
"-c",
"copy",
f"{out_file}",
],
capture_output=True,
)
except FileNotFoundError:
result = stream
else:
if ret.returncode:
result = stream
else:
result = out_file.read_bytes()
return result | [
"def",
"cut_video_stream",
"(",
"stream",
",",
"start",
",",
"end",
",",
"fmt",
")",
":",
"with",
"TemporaryDirectory",
"(",
")",
"as",
"tmp",
":",
"in_file",
"=",
"Path",
"(",
"tmp",
")",
"/",
"f\"in{fmt}\"",
"out_file",
"=",
"Path",
"(",
"tmp",
")",
... | cut video stream from `start` to `end` time
Parameters
----------
stream : bytes
video file content
start : float
start time
end : float
end time
Returns
-------
result : bytes
content of cut video | [
"cut",
"video",
"stream",
"from",
"start",
"to",
"end",
"time"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L878-L926 | train | 221,702 |
danielhrisca/asammdf | asammdf/blocks/utils.py | components | def components(channel, channel_name, unique_names, prefix="", master=None):
""" yield pandas Series and unique name based on the ndarray object
Parameters
----------
channel : numpy.ndarray
channel to be used foir Series
channel_name : str
channel name
unique_names : UniqueDB
unique names object
prefix : str
prefix used in case of nested recarrays
Returns
-------
name, series : (str, pandas.Series)
tuple of unqiue name and Series object
"""
names = channel.dtype.names
# channel arrays
if names[0] == channel_name:
name = names[0]
if prefix:
name_ = unique_names.get_unique_name(f"{prefix}.{name}")
else:
name_ = unique_names.get_unique_name(name)
values = channel[name]
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield name_, Series(values, index=master, dtype="O")
for name in names[1:]:
values = channel[name]
axis_name = unique_names.get_unique_name(f"{name_}.{name}")
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield axis_name, Series(values, index=master, dtype="O")
# structure composition
else:
for name in channel.dtype.names:
values = channel[name]
if values.dtype.names:
yield from components(
values, name, unique_names,
prefix=f"{prefix}.{channel_name}" if prefix else f"{channel_name}",
master=master
)
else:
name_ = unique_names.get_unique_name(
f"{prefix}.{channel_name}.{name}" if prefix else f"{channel_name}.{name}"
)
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield name_, Series(values, index=master) | python | def components(channel, channel_name, unique_names, prefix="", master=None):
""" yield pandas Series and unique name based on the ndarray object
Parameters
----------
channel : numpy.ndarray
channel to be used foir Series
channel_name : str
channel name
unique_names : UniqueDB
unique names object
prefix : str
prefix used in case of nested recarrays
Returns
-------
name, series : (str, pandas.Series)
tuple of unqiue name and Series object
"""
names = channel.dtype.names
# channel arrays
if names[0] == channel_name:
name = names[0]
if prefix:
name_ = unique_names.get_unique_name(f"{prefix}.{name}")
else:
name_ = unique_names.get_unique_name(name)
values = channel[name]
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield name_, Series(values, index=master, dtype="O")
for name in names[1:]:
values = channel[name]
axis_name = unique_names.get_unique_name(f"{name_}.{name}")
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield axis_name, Series(values, index=master, dtype="O")
# structure composition
else:
for name in channel.dtype.names:
values = channel[name]
if values.dtype.names:
yield from components(
values, name, unique_names,
prefix=f"{prefix}.{channel_name}" if prefix else f"{channel_name}",
master=master
)
else:
name_ = unique_names.get_unique_name(
f"{prefix}.{channel_name}.{name}" if prefix else f"{channel_name}.{name}"
)
if len(values.shape) > 1:
arr = [values]
types = [("", values.dtype, values.shape[1:])]
values = fromarrays(arr, dtype=types)
del arr
yield name_, Series(values, index=master) | [
"def",
"components",
"(",
"channel",
",",
"channel_name",
",",
"unique_names",
",",
"prefix",
"=",
"\"\"",
",",
"master",
"=",
"None",
")",
":",
"names",
"=",
"channel",
".",
"dtype",
".",
"names",
"# channel arrays",
"if",
"names",
"[",
"0",
"]",
"==",
... | yield pandas Series and unique name based on the ndarray object
Parameters
----------
channel : numpy.ndarray
channel to be used foir Series
channel_name : str
channel name
unique_names : UniqueDB
unique names object
prefix : str
prefix used in case of nested recarrays
Returns
-------
name, series : (str, pandas.Series)
tuple of unqiue name and Series object | [
"yield",
"pandas",
"Series",
"and",
"unique",
"name",
"based",
"on",
"the",
"ndarray",
"object"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L1035-L1104 | train | 221,703 |
danielhrisca/asammdf | asammdf/blocks/utils.py | master_using_raster | def master_using_raster(mdf, raster, endpoint=False):
""" get single master based on the raster
Parameters
----------
mdf : asammdf.MDF
measurement object
raster : float
new raster
endpoint=False : bool
include maximum time stamp in the new master
Returns
-------
master : np.array
new master
"""
if not raster:
master = np.array([], dtype='<f8')
else:
t_min = []
t_max = []
for i, group in enumerate(mdf.groups):
cycles_nr = group.channel_group.cycles_nr
if cycles_nr:
master_min = mdf.get_master(
i,
record_offset=0,
record_count=1,
)
if len(master_min):
t_min.append(master_min[0])
mdf._master_channel_cache.clear()
master_max = mdf.get_master(
i,
record_offset=cycles_nr-1,
record_count=1,
)
if len(master_max):
t_max.append(master_max[0])
mdf._master_channel_cache.clear()
if t_min:
t_min = np.amin(t_min)
t_max = np.amax(t_max)
num = float(np.float32((t_max - t_min) / raster))
if int(num) == num:
master = np.linspace(t_min, t_max, int(num) + 1)
else:
master = np.arange(t_min, t_max, raster)
if endpoint:
master = np.concatenate([master, [t_max]])
else:
master = np.array([], dtype='<f8')
return master | python | def master_using_raster(mdf, raster, endpoint=False):
""" get single master based on the raster
Parameters
----------
mdf : asammdf.MDF
measurement object
raster : float
new raster
endpoint=False : bool
include maximum time stamp in the new master
Returns
-------
master : np.array
new master
"""
if not raster:
master = np.array([], dtype='<f8')
else:
t_min = []
t_max = []
for i, group in enumerate(mdf.groups):
cycles_nr = group.channel_group.cycles_nr
if cycles_nr:
master_min = mdf.get_master(
i,
record_offset=0,
record_count=1,
)
if len(master_min):
t_min.append(master_min[0])
mdf._master_channel_cache.clear()
master_max = mdf.get_master(
i,
record_offset=cycles_nr-1,
record_count=1,
)
if len(master_max):
t_max.append(master_max[0])
mdf._master_channel_cache.clear()
if t_min:
t_min = np.amin(t_min)
t_max = np.amax(t_max)
num = float(np.float32((t_max - t_min) / raster))
if int(num) == num:
master = np.linspace(t_min, t_max, int(num) + 1)
else:
master = np.arange(t_min, t_max, raster)
if endpoint:
master = np.concatenate([master, [t_max]])
else:
master = np.array([], dtype='<f8')
return master | [
"def",
"master_using_raster",
"(",
"mdf",
",",
"raster",
",",
"endpoint",
"=",
"False",
")",
":",
"if",
"not",
"raster",
":",
"master",
"=",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"'<f8'",
")",
"else",
":",
"t_min",
"=",
"[",
"]",
... | get single master based on the raster
Parameters
----------
mdf : asammdf.MDF
measurement object
raster : float
new raster
endpoint=False : bool
include maximum time stamp in the new master
Returns
-------
master : np.array
new master | [
"get",
"single",
"master",
"based",
"on",
"the",
"raster"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L1178-L1237 | train | 221,704 |
danielhrisca/asammdf | asammdf/blocks/utils.py | ChannelsDB.add | def add(self, channel_name, entry):
""" add name to channels database and check if it contains a source
path
Parameters
----------
channel_name : str
name that needs to be added to the database
entry : tuple
(group index, channel index) pair
"""
if channel_name:
if channel_name not in self:
self[channel_name] = [entry]
else:
self[channel_name].append(entry)
if "\\" in channel_name:
channel_name = channel_name.split("\\")[0]
if channel_name not in self:
self[channel_name] = [entry]
else:
self[channel_name].append(entry) | python | def add(self, channel_name, entry):
""" add name to channels database and check if it contains a source
path
Parameters
----------
channel_name : str
name that needs to be added to the database
entry : tuple
(group index, channel index) pair
"""
if channel_name:
if channel_name not in self:
self[channel_name] = [entry]
else:
self[channel_name].append(entry)
if "\\" in channel_name:
channel_name = channel_name.split("\\")[0]
if channel_name not in self:
self[channel_name] = [entry]
else:
self[channel_name].append(entry) | [
"def",
"add",
"(",
"self",
",",
"channel_name",
",",
"entry",
")",
":",
"if",
"channel_name",
":",
"if",
"channel_name",
"not",
"in",
"self",
":",
"self",
"[",
"channel_name",
"]",
"=",
"[",
"entry",
"]",
"else",
":",
"self",
"[",
"channel_name",
"]",
... | add name to channels database and check if it contains a source
path
Parameters
----------
channel_name : str
name that needs to be added to the database
entry : tuple
(group index, channel index) pair | [
"add",
"name",
"to",
"channels",
"database",
"and",
"check",
"if",
"it",
"contains",
"a",
"source",
"path"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L768-L792 | train | 221,705 |
danielhrisca/asammdf | asammdf/blocks/utils.py | UniqueDB.get_unique_name | def get_unique_name(self, name):
""" returns an available unique name
Parameters
----------
name : str
name to be made unique
Returns
-------
unique_name : str
new unique name
"""
if name not in self._db:
self._db[name] = 0
return name
else:
index = self._db[name]
self._db[name] = index + 1
return f"{name}_{index}" | python | def get_unique_name(self, name):
""" returns an available unique name
Parameters
----------
name : str
name to be made unique
Returns
-------
unique_name : str
new unique name
"""
if name not in self._db:
self._db[name] = 0
return name
else:
index = self._db[name]
self._db[name] = index + 1
return f"{name}_{index}" | [
"def",
"get_unique_name",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_db",
":",
"self",
".",
"_db",
"[",
"name",
"]",
"=",
"0",
"return",
"name",
"else",
":",
"index",
"=",
"self",
".",
"_db",
"[",
"name",
"]",
... | returns an available unique name
Parameters
----------
name : str
name to be made unique
Returns
-------
unique_name : str
new unique name | [
"returns",
"an",
"available",
"unique",
"name"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/utils.py#L854-L875 | train | 221,706 |
danielhrisca/asammdf | asammdf/mdf.py | MDF.iter_get | def iter_get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
raw=False,
):
""" iterator over a channel
This is usefull in case of large files with a small number of channels.
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
raw : bool
return channel samples without appling the conversion rule; default
`False`
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
data = self._load_data(grp)
for fragment in data:
yield self.get(
group=gp_nr,
index=ch_nr,
raster=raster,
samples_only=samples_only,
data=fragment,
raw=raw,
) | python | def iter_get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
raw=False,
):
""" iterator over a channel
This is usefull in case of large files with a small number of channels.
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
raw : bool
return channel samples without appling the conversion rule; default
`False`
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
data = self._load_data(grp)
for fragment in data:
yield self.get(
group=gp_nr,
index=ch_nr,
raster=raster,
samples_only=samples_only,
data=fragment,
raw=raw,
) | [
"def",
"iter_get",
"(",
"self",
",",
"name",
"=",
"None",
",",
"group",
"=",
"None",
",",
"index",
"=",
"None",
",",
"raster",
"=",
"None",
",",
"samples_only",
"=",
"False",
",",
"raw",
"=",
"False",
",",
")",
":",
"gp_nr",
",",
"ch_nr",
"=",
"s... | iterator over a channel
This is usefull in case of large files with a small number of channels.
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
raw : bool
return channel samples without appling the conversion rule; default
`False` | [
"iterator",
"over",
"a",
"channel"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/mdf.py#L1770-L1818 | train | 221,707 |
danielhrisca/asammdf | asammdf/mdf.py | MDF.whereis | def whereis(self, channel):
""" get ocurrences of channel name in the file
Parameters
----------
channel : str
channel name string
Returns
-------
ocurrences : tuple
Examples
--------
>>> mdf = MDF(file_name)
>>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file
((1, 2), (2, 4))
>>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file
()
"""
if channel in self:
return tuple(self.channels_db[channel])
else:
return tuple() | python | def whereis(self, channel):
""" get ocurrences of channel name in the file
Parameters
----------
channel : str
channel name string
Returns
-------
ocurrences : tuple
Examples
--------
>>> mdf = MDF(file_name)
>>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file
((1, 2), (2, 4))
>>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file
()
"""
if channel in self:
return tuple(self.channels_db[channel])
else:
return tuple() | [
"def",
"whereis",
"(",
"self",
",",
"channel",
")",
":",
"if",
"channel",
"in",
"self",
":",
"return",
"tuple",
"(",
"self",
".",
"channels_db",
"[",
"channel",
"]",
")",
"else",
":",
"return",
"tuple",
"(",
")"
] | get ocurrences of channel name in the file
Parameters
----------
channel : str
channel name string
Returns
-------
ocurrences : tuple
Examples
--------
>>> mdf = MDF(file_name)
>>> mdf.whereis('VehicleSpeed') # "VehicleSpeed" exists in the file
((1, 2), (2, 4))
>>> mdf.whereis('VehicleSPD') # "VehicleSPD" doesn't exist in the file
() | [
"get",
"ocurrences",
"of",
"channel",
"name",
"in",
"the",
"file"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/mdf.py#L2966-L2991 | train | 221,708 |
danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.get_invalidation_bits | def get_invalidation_bits(self, group_index, channel, fragment):
""" get invalidation indexes for the channel
Parameters
----------
group_index : int
group index
channel : Channel
channel object
fragment : (bytes, int)
(fragment bytes, fragment offset)
Returns
-------
invalidation_bits : iterable
iterable of valid channel indexes; if all are valid `None` is
returned
"""
group = self.groups[group_index]
dtypes = group.types
data_bytes, offset, _count = fragment
try:
invalidation = self._invalidation_cache[(group_index, offset, _count)]
except KeyError:
record = group.record
if record is None:
dtypes = group.types
if dtypes.itemsize:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = None
invalidation = record["invalidation_bytes"].copy()
self._invalidation_cache[(group_index, offset, _count)] = invalidation
ch_invalidation_pos = channel.pos_invalidation_bit
pos_byte, pos_offset = divmod(ch_invalidation_pos, 8)
mask = 1 << pos_offset
invalidation_bits = invalidation[:, pos_byte] & mask
invalidation_bits = invalidation_bits.astype(bool)
return invalidation_bits | python | def get_invalidation_bits(self, group_index, channel, fragment):
""" get invalidation indexes for the channel
Parameters
----------
group_index : int
group index
channel : Channel
channel object
fragment : (bytes, int)
(fragment bytes, fragment offset)
Returns
-------
invalidation_bits : iterable
iterable of valid channel indexes; if all are valid `None` is
returned
"""
group = self.groups[group_index]
dtypes = group.types
data_bytes, offset, _count = fragment
try:
invalidation = self._invalidation_cache[(group_index, offset, _count)]
except KeyError:
record = group.record
if record is None:
dtypes = group.types
if dtypes.itemsize:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = None
invalidation = record["invalidation_bytes"].copy()
self._invalidation_cache[(group_index, offset, _count)] = invalidation
ch_invalidation_pos = channel.pos_invalidation_bit
pos_byte, pos_offset = divmod(ch_invalidation_pos, 8)
mask = 1 << pos_offset
invalidation_bits = invalidation[:, pos_byte] & mask
invalidation_bits = invalidation_bits.astype(bool)
return invalidation_bits | [
"def",
"get_invalidation_bits",
"(",
"self",
",",
"group_index",
",",
"channel",
",",
"fragment",
")",
":",
"group",
"=",
"self",
".",
"groups",
"[",
"group_index",
"]",
"dtypes",
"=",
"group",
".",
"types",
"data_bytes",
",",
"offset",
",",
"_count",
"=",... | get invalidation indexes for the channel
Parameters
----------
group_index : int
group index
channel : Channel
channel object
fragment : (bytes, int)
(fragment bytes, fragment offset)
Returns
-------
invalidation_bits : iterable
iterable of valid channel indexes; if all are valid `None` is
returned | [
"get",
"invalidation",
"indexes",
"for",
"the",
"channel"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L2182-L2227 | train | 221,709 |
danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.configure | def configure(
self,
*,
read_fragment_size=None,
write_fragment_size=None,
use_display_names=None,
single_bit_uint_as_bool=None,
integer_interpolation=None,
):
""" configure MDF parameters
Parameters
----------
read_fragment_size : int
size hint of split data blocks, default 8MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size
write_fragment_size : int
size hint of split data blocks, default 4MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size. Maximum size is 4MB to ensure
compatibility with CANape
use_display_names : bool
search for display name in the Channel XML comment
single_bit_uint_as_bool : bool
return single bit channels are np.bool arrays
integer_interpolation : int
interpolation mode for integer channels:
* 0 - repeat previous sample
* 1 - use linear interpolation
"""
if read_fragment_size is not None:
self._read_fragment_size = int(read_fragment_size)
if write_fragment_size:
self._write_fragment_size = min(int(write_fragment_size), 4 * 2 ** 20)
if use_display_names is not None:
self._use_display_names = bool(use_display_names)
if single_bit_uint_as_bool is not None:
self._single_bit_uint_as_bool = bool(single_bit_uint_as_bool)
if integer_interpolation in (0, 1):
self._integer_interpolation = int(integer_interpolation) | python | def configure(
self,
*,
read_fragment_size=None,
write_fragment_size=None,
use_display_names=None,
single_bit_uint_as_bool=None,
integer_interpolation=None,
):
""" configure MDF parameters
Parameters
----------
read_fragment_size : int
size hint of split data blocks, default 8MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size
write_fragment_size : int
size hint of split data blocks, default 4MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size. Maximum size is 4MB to ensure
compatibility with CANape
use_display_names : bool
search for display name in the Channel XML comment
single_bit_uint_as_bool : bool
return single bit channels are np.bool arrays
integer_interpolation : int
interpolation mode for integer channels:
* 0 - repeat previous sample
* 1 - use linear interpolation
"""
if read_fragment_size is not None:
self._read_fragment_size = int(read_fragment_size)
if write_fragment_size:
self._write_fragment_size = min(int(write_fragment_size), 4 * 2 ** 20)
if use_display_names is not None:
self._use_display_names = bool(use_display_names)
if single_bit_uint_as_bool is not None:
self._single_bit_uint_as_bool = bool(single_bit_uint_as_bool)
if integer_interpolation in (0, 1):
self._integer_interpolation = int(integer_interpolation) | [
"def",
"configure",
"(",
"self",
",",
"*",
",",
"read_fragment_size",
"=",
"None",
",",
"write_fragment_size",
"=",
"None",
",",
"use_display_names",
"=",
"None",
",",
"single_bit_uint_as_bool",
"=",
"None",
",",
"integer_interpolation",
"=",
"None",
",",
")",
... | configure MDF parameters
Parameters
----------
read_fragment_size : int
size hint of split data blocks, default 8MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size
write_fragment_size : int
size hint of split data blocks, default 4MB; if the initial size is
smaller, then no data list is used. The actual split size depends on
the data groups' records size. Maximum size is 4MB to ensure
compatibility with CANape
use_display_names : bool
search for display name in the Channel XML comment
single_bit_uint_as_bool : bool
return single bit channels are np.bool arrays
integer_interpolation : int
interpolation mode for integer channels:
* 0 - repeat previous sample
* 1 - use linear interpolation | [
"configure",
"MDF",
"parameters"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L2229-L2276 | train | 221,710 |
danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.extract_attachment | def extract_attachment(self, address=None, index=None):
""" extract attachment data by original address or by index. If it is an embedded attachment,
then this method creates the new file according to the attachment file
name information
Parameters
----------
address : int
attachment index; default *None*
index : int
attachment index; default *None*
Returns
-------
data : (bytes, pathlib.Path)
tuple of attachment data and path
"""
if address is None and index is None:
return b"", Path("")
if address is not None:
index = self._attachments_map[address]
attachment = self.attachments[index]
current_path = Path.cwd()
file_path = Path(attachment.file_name or "embedded")
try:
os.chdir(self.name.resolve().parent)
flags = attachment.flags
# for embedded attachments extrat data and create new files
if flags & v4c.FLAG_AT_EMBEDDED:
data = attachment.extract()
return data, file_path
else:
# for external attachments read the file and return the content
if flags & v4c.FLAG_AT_MD5_VALID:
data = open(file_path, "rb").read()
file_path = Path(f"FROM_{file_path}")
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
if attachment["md5_sum"] == md5_sum:
if attachment.mime.startswith("text"):
with open(file_path, "r") as f:
data = f.read()
return data, file_path
else:
message = (
f'ATBLOCK md5sum="{attachment["md5_sum"]}" '
f"and external attachment data ({file_path}) "
f'md5sum="{md5_sum}"'
)
logger.warning(message)
else:
if attachment.mime.startswith("text"):
mode = "r"
else:
mode = "rb"
with open(file_path, mode) as f:
file_path = Path(f"FROM_{file_path}")
data = f.read()
return data, file_path
except Exception as err:
os.chdir(current_path)
message = "Exception during attachment extraction: " + repr(err)
logger.warning(message)
return b"", file_path | python | def extract_attachment(self, address=None, index=None):
""" extract attachment data by original address or by index. If it is an embedded attachment,
then this method creates the new file according to the attachment file
name information
Parameters
----------
address : int
attachment index; default *None*
index : int
attachment index; default *None*
Returns
-------
data : (bytes, pathlib.Path)
tuple of attachment data and path
"""
if address is None and index is None:
return b"", Path("")
if address is not None:
index = self._attachments_map[address]
attachment = self.attachments[index]
current_path = Path.cwd()
file_path = Path(attachment.file_name or "embedded")
try:
os.chdir(self.name.resolve().parent)
flags = attachment.flags
# for embedded attachments extrat data and create new files
if flags & v4c.FLAG_AT_EMBEDDED:
data = attachment.extract()
return data, file_path
else:
# for external attachments read the file and return the content
if flags & v4c.FLAG_AT_MD5_VALID:
data = open(file_path, "rb").read()
file_path = Path(f"FROM_{file_path}")
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
if attachment["md5_sum"] == md5_sum:
if attachment.mime.startswith("text"):
with open(file_path, "r") as f:
data = f.read()
return data, file_path
else:
message = (
f'ATBLOCK md5sum="{attachment["md5_sum"]}" '
f"and external attachment data ({file_path}) "
f'md5sum="{md5_sum}"'
)
logger.warning(message)
else:
if attachment.mime.startswith("text"):
mode = "r"
else:
mode = "rb"
with open(file_path, mode) as f:
file_path = Path(f"FROM_{file_path}")
data = f.read()
return data, file_path
except Exception as err:
os.chdir(current_path)
message = "Exception during attachment extraction: " + repr(err)
logger.warning(message)
return b"", file_path | [
"def",
"extract_attachment",
"(",
"self",
",",
"address",
"=",
"None",
",",
"index",
"=",
"None",
")",
":",
"if",
"address",
"is",
"None",
"and",
"index",
"is",
"None",
":",
"return",
"b\"\"",
",",
"Path",
"(",
"\"\"",
")",
"if",
"address",
"is",
"no... | extract attachment data by original address or by index. If it is an embedded attachment,
then this method creates the new file according to the attachment file
name information
Parameters
----------
address : int
attachment index; default *None*
index : int
attachment index; default *None*
Returns
-------
data : (bytes, pathlib.Path)
tuple of attachment data and path | [
"extract",
"attachment",
"data",
"by",
"original",
"address",
"or",
"by",
"index",
".",
"If",
"it",
"is",
"an",
"embedded",
"attachment",
"then",
"this",
"method",
"creates",
"the",
"new",
"file",
"according",
"to",
"the",
"attachment",
"file",
"name",
"info... | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L3567-L3637 | train | 221,711 |
danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.get_channel_name | def get_channel_name(self, group, index):
"""Gets channel name.
Parameters
----------
group : int
0-based group index
index : int
0-based channel index
Returns
-------
name : str
found channel name
"""
gp_nr, ch_nr = self._validate_channel_selection(None, group, index)
return self.groups[gp_nr].channels[ch_nr].name | python | def get_channel_name(self, group, index):
"""Gets channel name.
Parameters
----------
group : int
0-based group index
index : int
0-based channel index
Returns
-------
name : str
found channel name
"""
gp_nr, ch_nr = self._validate_channel_selection(None, group, index)
return self.groups[gp_nr].channels[ch_nr].name | [
"def",
"get_channel_name",
"(",
"self",
",",
"group",
",",
"index",
")",
":",
"gp_nr",
",",
"ch_nr",
"=",
"self",
".",
"_validate_channel_selection",
"(",
"None",
",",
"group",
",",
"index",
")",
"return",
"self",
".",
"groups",
"[",
"gp_nr",
"]",
".",
... | Gets channel name.
Parameters
----------
group : int
0-based group index
index : int
0-based channel index
Returns
-------
name : str
found channel name | [
"Gets",
"channel",
"name",
"."
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L5803-L5821 | train | 221,712 |
danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.get_channel_unit | def get_channel_unit(self, name=None, group=None, index=None):
"""Gets channel unit.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
unit : str
found channel unit
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
channel = grp.channels[ch_nr]
conversion = channel.conversion
unit = conversion and conversion.unit or channel.unit or ""
return unit | python | def get_channel_unit(self, name=None, group=None, index=None):
"""Gets channel unit.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
unit : str
found channel unit
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
channel = grp.channels[ch_nr]
conversion = channel.conversion
unit = conversion and conversion.unit or channel.unit or ""
return unit | [
"def",
"get_channel_unit",
"(",
"self",
",",
"name",
"=",
"None",
",",
"group",
"=",
"None",
",",
"index",
"=",
"None",
")",
":",
"gp_nr",
",",
"ch_nr",
"=",
"self",
".",
"_validate_channel_selection",
"(",
"name",
",",
"group",
",",
"index",
")",
"grp... | Gets channel unit.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
unit : str
found channel unit | [
"Gets",
"channel",
"unit",
"."
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L5835-L5881 | train | 221,713 |
danielhrisca/asammdf | asammdf/blocks/mdf_v4.py | MDF4.get_channel_comment | def get_channel_comment(self, name=None, group=None, index=None):
"""Gets channel comment.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
comment : str
found channel comment
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
channel = grp.channels[ch_nr]
return extract_cncomment_xml(channel.comment) | python | def get_channel_comment(self, name=None, group=None, index=None):
"""Gets channel comment.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
comment : str
found channel comment
"""
gp_nr, ch_nr = self._validate_channel_selection(name, group, index)
grp = self.groups[gp_nr]
channel = grp.channels[ch_nr]
return extract_cncomment_xml(channel.comment) | [
"def",
"get_channel_comment",
"(",
"self",
",",
"name",
"=",
"None",
",",
"group",
"=",
"None",
",",
"index",
"=",
"None",
")",
":",
"gp_nr",
",",
"ch_nr",
"=",
"self",
".",
"_validate_channel_selection",
"(",
"name",
",",
"group",
",",
"index",
")",
"... | Gets channel comment.
Channel can be specified in two ways:
* using the first positional argument *name*
* if there are multiple occurrences for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurrences for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly.
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
Returns
-------
comment : str
found channel comment | [
"Gets",
"channel",
"comment",
"."
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v4.py#L5883-L5925 | train | 221,714 |
danielhrisca/asammdf | benchmarks/bench.py | _cmd_line_parser | def _cmd_line_parser():
'''
return a command line parser. It is used when generating the documentation
'''
parser = argparse.ArgumentParser()
parser.add_argument('--path',
help=('path to test files, '
'if not provided the script folder is used'))
parser.add_argument('--text_output',
action='store_true',
help='option to save the results to text file')
parser.add_argument('--format',
default='rst',
nargs='?',
choices=['rst', 'md'],
help='text formatting')
return parser | python | def _cmd_line_parser():
'''
return a command line parser. It is used when generating the documentation
'''
parser = argparse.ArgumentParser()
parser.add_argument('--path',
help=('path to test files, '
'if not provided the script folder is used'))
parser.add_argument('--text_output',
action='store_true',
help='option to save the results to text file')
parser.add_argument('--format',
default='rst',
nargs='?',
choices=['rst', 'md'],
help='text formatting')
return parser | [
"def",
"_cmd_line_parser",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
")",
"parser",
".",
"add_argument",
"(",
"'--path'",
",",
"help",
"=",
"(",
"'path to test files, '",
"'if not provided the script folder is used'",
")",
")",
"parser",
... | return a command line parser. It is used when generating the documentation | [
"return",
"a",
"command",
"line",
"parser",
".",
"It",
"is",
"used",
"when",
"generating",
"the",
"documentation"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/benchmarks/bench.py#L1100-L1118 | train | 221,715 |
danielhrisca/asammdf | benchmarks/bench.py | MyList.append | def append(self, item):
""" append item and print it to stdout """
print(item)
super(MyList, self).append(item) | python | def append(self, item):
""" append item and print it to stdout """
print(item)
super(MyList, self).append(item) | [
"def",
"append",
"(",
"self",
",",
"item",
")",
":",
"print",
"(",
"item",
")",
"super",
"(",
"MyList",
",",
"self",
")",
".",
"append",
"(",
"item",
")"
] | append item and print it to stdout | [
"append",
"item",
"and",
"print",
"it",
"to",
"stdout"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/benchmarks/bench.py#L42-L45 | train | 221,716 |
danielhrisca/asammdf | benchmarks/bench.py | MyList.extend | def extend(self, items):
""" extend items and print them to stdout
using the new line separator
"""
print('\n'.join(items))
super(MyList, self).extend(items) | python | def extend(self, items):
""" extend items and print them to stdout
using the new line separator
"""
print('\n'.join(items))
super(MyList, self).extend(items) | [
"def",
"extend",
"(",
"self",
",",
"items",
")",
":",
"print",
"(",
"'\\n'",
".",
"join",
"(",
"items",
")",
")",
"super",
"(",
"MyList",
",",
"self",
")",
".",
"extend",
"(",
"items",
")"
] | extend items and print them to stdout
using the new line separator | [
"extend",
"items",
"and",
"print",
"them",
"to",
"stdout",
"using",
"the",
"new",
"line",
"separator"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/benchmarks/bench.py#L47-L52 | train | 221,717 |
danielhrisca/asammdf | asammdf/signal.py | Signal.extend | def extend(self, other):
""" extend signal with samples from another signal
Parameters
----------
other : Signal
Returns
-------
signal : Signal
new extended *Signal*
"""
if len(self.timestamps):
last_stamp = self.timestamps[-1]
else:
last_stamp = 0
if len(other):
other_first_sample = other.timestamps[0]
if last_stamp >= other_first_sample:
timestamps = other.timestamps + last_stamp
else:
timestamps = other.timestamps
if self.invalidation_bits is None and other.invalidation_bits is None:
invalidation_bits = None
elif self.invalidation_bits is None and other.invalidation_bits is not None:
invalidation_bits = np.concatenate(
(np.zeros(len(self), dtype=bool), other.invalidation_bits)
)
elif self.invalidation_bits is not None and other.invalidation_bits is None:
invalidation_bits = np.concatenate(
(self.invalidation_bits, np.zeros(len(other), dtype=bool))
)
else:
invalidation_bits = np.append(
self.invalidation_bits, other.invalidation_bits
)
result = Signal(
np.append(self.samples, other.samples, axis=0),
np.append(self.timestamps, timestamps),
self.unit,
self.name,
self.conversion,
self.comment,
self.raw,
self.master_metadata,
self.display_name,
self.attachment,
self.source,
self.bit_count,
self.stream_sync,
invalidation_bits=invalidation_bits,
encoding=self.encoding,
)
else:
result = self
return result | python | def extend(self, other):
""" extend signal with samples from another signal
Parameters
----------
other : Signal
Returns
-------
signal : Signal
new extended *Signal*
"""
if len(self.timestamps):
last_stamp = self.timestamps[-1]
else:
last_stamp = 0
if len(other):
other_first_sample = other.timestamps[0]
if last_stamp >= other_first_sample:
timestamps = other.timestamps + last_stamp
else:
timestamps = other.timestamps
if self.invalidation_bits is None and other.invalidation_bits is None:
invalidation_bits = None
elif self.invalidation_bits is None and other.invalidation_bits is not None:
invalidation_bits = np.concatenate(
(np.zeros(len(self), dtype=bool), other.invalidation_bits)
)
elif self.invalidation_bits is not None and other.invalidation_bits is None:
invalidation_bits = np.concatenate(
(self.invalidation_bits, np.zeros(len(other), dtype=bool))
)
else:
invalidation_bits = np.append(
self.invalidation_bits, other.invalidation_bits
)
result = Signal(
np.append(self.samples, other.samples, axis=0),
np.append(self.timestamps, timestamps),
self.unit,
self.name,
self.conversion,
self.comment,
self.raw,
self.master_metadata,
self.display_name,
self.attachment,
self.source,
self.bit_count,
self.stream_sync,
invalidation_bits=invalidation_bits,
encoding=self.encoding,
)
else:
result = self
return result | [
"def",
"extend",
"(",
"self",
",",
"other",
")",
":",
"if",
"len",
"(",
"self",
".",
"timestamps",
")",
":",
"last_stamp",
"=",
"self",
".",
"timestamps",
"[",
"-",
"1",
"]",
"else",
":",
"last_stamp",
"=",
"0",
"if",
"len",
"(",
"other",
")",
":... | extend signal with samples from another signal
Parameters
----------
other : Signal
Returns
-------
signal : Signal
new extended *Signal* | [
"extend",
"signal",
"with",
"samples",
"from",
"another",
"signal"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/signal.py#L670-L729 | train | 221,718 |
danielhrisca/asammdf | asammdf/signal.py | Signal.physical | def physical(self):
"""
get the physical samples values
Returns
-------
phys : Signal
new *Signal* with physical values
"""
if not self.raw or self.conversion is None:
samples = self.samples.copy()
else:
samples = self.conversion.convert(self.samples)
return Signal(
samples,
self.timestamps.copy(),
unit=self.unit,
name=self.name,
conversion=self.conversion,
raw=False,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=self.invalidation_bits,
source=self.source,
encoding=self.encoding,
) | python | def physical(self):
"""
get the physical samples values
Returns
-------
phys : Signal
new *Signal* with physical values
"""
if not self.raw or self.conversion is None:
samples = self.samples.copy()
else:
samples = self.conversion.convert(self.samples)
return Signal(
samples,
self.timestamps.copy(),
unit=self.unit,
name=self.name,
conversion=self.conversion,
raw=False,
master_metadata=self.master_metadata,
display_name=self.display_name,
attachment=self.attachment,
stream_sync=self.stream_sync,
invalidation_bits=self.invalidation_bits,
source=self.source,
encoding=self.encoding,
) | [
"def",
"physical",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"raw",
"or",
"self",
".",
"conversion",
"is",
"None",
":",
"samples",
"=",
"self",
".",
"samples",
".",
"copy",
"(",
")",
"else",
":",
"samples",
"=",
"self",
".",
"conversion",
".... | get the physical samples values
Returns
-------
phys : Signal
new *Signal* with physical values | [
"get",
"the",
"physical",
"samples",
"values"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/signal.py#L1094-L1124 | train | 221,719 |
danielhrisca/asammdf | asammdf/blocks/v4_blocks.py | AttachmentBlock.extract | def extract(self):
"""extract attachment data
Returns
-------
data : bytes
"""
if self.flags & v4c.FLAG_AT_EMBEDDED:
if self.flags & v4c.FLAG_AT_COMPRESSED_EMBEDDED:
data = decompress(self.embedded_data)
else:
data = self.embedded_data
if self.flags & v4c.FLAG_AT_MD5_VALID:
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
if self.md5_sum == md5_sum:
return data
else:
message = f"ATBLOCK md5sum={self.md5_sum} and embedded data md5sum={md5_sum}"
logger.warning(message)
else:
return data
else:
logger.warning("external attachments not supported") | python | def extract(self):
"""extract attachment data
Returns
-------
data : bytes
"""
if self.flags & v4c.FLAG_AT_EMBEDDED:
if self.flags & v4c.FLAG_AT_COMPRESSED_EMBEDDED:
data = decompress(self.embedded_data)
else:
data = self.embedded_data
if self.flags & v4c.FLAG_AT_MD5_VALID:
md5_worker = md5()
md5_worker.update(data)
md5_sum = md5_worker.digest()
if self.md5_sum == md5_sum:
return data
else:
message = f"ATBLOCK md5sum={self.md5_sum} and embedded data md5sum={md5_sum}"
logger.warning(message)
else:
return data
else:
logger.warning("external attachments not supported") | [
"def",
"extract",
"(",
"self",
")",
":",
"if",
"self",
".",
"flags",
"&",
"v4c",
".",
"FLAG_AT_EMBEDDED",
":",
"if",
"self",
".",
"flags",
"&",
"v4c",
".",
"FLAG_AT_COMPRESSED_EMBEDDED",
":",
"data",
"=",
"decompress",
"(",
"self",
".",
"embedded_data",
... | extract attachment data
Returns
-------
data : bytes | [
"extract",
"attachment",
"data"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/v4_blocks.py#L236-L260 | train | 221,720 |
danielhrisca/asammdf | asammdf/blocks/v4_blocks.py | HeaderBlock.start_time | def start_time(self):
""" getter and setter the measurement start timestamp
Returns
-------
timestamp : datetime.datetime
start timestamp
"""
timestamp = self.abs_time / 10 ** 9
if self.time_flags & v4c.FLAG_HD_LOCAL_TIME:
timestamp = datetime.fromtimestamp(timestamp)
else:
timestamp = datetime.fromtimestamp(timestamp, timezone.utc)
return timestamp | python | def start_time(self):
""" getter and setter the measurement start timestamp
Returns
-------
timestamp : datetime.datetime
start timestamp
"""
timestamp = self.abs_time / 10 ** 9
if self.time_flags & v4c.FLAG_HD_LOCAL_TIME:
timestamp = datetime.fromtimestamp(timestamp)
else:
timestamp = datetime.fromtimestamp(timestamp, timezone.utc)
return timestamp | [
"def",
"start_time",
"(",
"self",
")",
":",
"timestamp",
"=",
"self",
".",
"abs_time",
"/",
"10",
"**",
"9",
"if",
"self",
".",
"time_flags",
"&",
"v4c",
".",
"FLAG_HD_LOCAL_TIME",
":",
"timestamp",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"timestamp",
... | getter and setter the measurement start timestamp
Returns
-------
timestamp : datetime.datetime
start timestamp | [
"getter",
"and",
"setter",
"the",
"measurement",
"start",
"timestamp"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/v4_blocks.py#L4366-L4382 | train | 221,721 |
danielhrisca/asammdf | asammdf/blocks/mdf_v3.py | MDF3._prepare_record | def _prepare_record(self, group):
""" compute record dtype and parents dict for this group
Parameters
----------
group : dict
MDF group dict
Returns
-------
parents, dtypes : dict, numpy.dtype
mapping of channels to records fields, records fiels dtype
"""
parents, dtypes = group.parents, group.types
if parents is None:
if group.data_location == v23c.LOCATION_ORIGINAL_FILE:
stream = self._file
else:
stream = self._tempfile
grp = group
record_size = grp.channel_group.samples_byte_nr << 3
next_byte_aligned_position = 0
types = []
current_parent = ""
parent_start_offset = 0
parents = {}
group_channels = UniqueDB()
# the channels are first sorted ascending (see __lt__ method of Channel
# class): a channel with lower start offset is smaller, when two
# channels havethe same start offset the one with higer bit size is
# considered smaller. The reason is that when the numpy record is built
# and there are overlapping channels, the parent fields mustbe bigger
# (bit size) than the embedded channels. For each channel the parent
# dict will have a (parent name, bit offset) pair: the channel value is
# computed using the values from the parent field, and the bit offset,
# which is the channel's bit offset within the parent bytes.
# This means all parents will have themselves as parent, and bit offset
# of 0. Gaps in the records are also considered. Non standard integers
# size is adjusted to the first higher standard integer size (eq. uint
# of 28bits will be adjusted to 32bits)
sortedchannels = sorted(enumerate(grp.channels), key=lambda i: i[1])
for original_index, new_ch in sortedchannels:
# skip channels with channel dependencies from the numpy record
if new_ch.component_addr:
continue
start_offset = new_ch.start_offset
try:
additional_byte_offset = new_ch.additional_byte_offset
start_offset += 8 * additional_byte_offset
except AttributeError:
pass
bit_offset = start_offset % 8
data_type = new_ch.data_type
bit_count = new_ch.bit_count
name = new_ch.name
# handle multiple occurance of same channel name
name = group_channels.get_unique_name(name)
if start_offset >= next_byte_aligned_position:
parent_start_offset = (start_offset // 8) * 8
# check if there are byte gaps in the record
gap = (parent_start_offset - next_byte_aligned_position) // 8
if gap:
types.append(("", f"V{gap}"))
# adjust size to 1, 2, 4 or 8 bytes for nonstandard integers
size = bit_offset + bit_count
if data_type == v23c.DATA_TYPE_STRING:
next_byte_aligned_position = parent_start_offset + size
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
elif data_type == v23c.DATA_TYPE_BYTEARRAY:
next_byte_aligned_position = parent_start_offset + size
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
else:
if size > 32:
next_byte_aligned_position = parent_start_offset + 64
elif size > 16:
next_byte_aligned_position = parent_start_offset + 32
elif size > 8:
next_byte_aligned_position = parent_start_offset + 16
else:
next_byte_aligned_position = parent_start_offset + 8
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
current_parent = name
else:
max_overlapping = next_byte_aligned_position - start_offset
if max_overlapping >= bit_count:
parents[original_index] = (
current_parent,
start_offset - parent_start_offset,
)
if next_byte_aligned_position > record_size:
break
gap = (record_size - next_byte_aligned_position) // 8
if gap:
dtype_pair = ("", f"V{gap}")
types.append(dtype_pair)
dtypes = dtype(types)
group.parents, group.types = parents, dtypes
return parents, dtypes | python | def _prepare_record(self, group):
""" compute record dtype and parents dict for this group
Parameters
----------
group : dict
MDF group dict
Returns
-------
parents, dtypes : dict, numpy.dtype
mapping of channels to records fields, records fiels dtype
"""
parents, dtypes = group.parents, group.types
if parents is None:
if group.data_location == v23c.LOCATION_ORIGINAL_FILE:
stream = self._file
else:
stream = self._tempfile
grp = group
record_size = grp.channel_group.samples_byte_nr << 3
next_byte_aligned_position = 0
types = []
current_parent = ""
parent_start_offset = 0
parents = {}
group_channels = UniqueDB()
# the channels are first sorted ascending (see __lt__ method of Channel
# class): a channel with lower start offset is smaller, when two
# channels havethe same start offset the one with higer bit size is
# considered smaller. The reason is that when the numpy record is built
# and there are overlapping channels, the parent fields mustbe bigger
# (bit size) than the embedded channels. For each channel the parent
# dict will have a (parent name, bit offset) pair: the channel value is
# computed using the values from the parent field, and the bit offset,
# which is the channel's bit offset within the parent bytes.
# This means all parents will have themselves as parent, and bit offset
# of 0. Gaps in the records are also considered. Non standard integers
# size is adjusted to the first higher standard integer size (eq. uint
# of 28bits will be adjusted to 32bits)
sortedchannels = sorted(enumerate(grp.channels), key=lambda i: i[1])
for original_index, new_ch in sortedchannels:
# skip channels with channel dependencies from the numpy record
if new_ch.component_addr:
continue
start_offset = new_ch.start_offset
try:
additional_byte_offset = new_ch.additional_byte_offset
start_offset += 8 * additional_byte_offset
except AttributeError:
pass
bit_offset = start_offset % 8
data_type = new_ch.data_type
bit_count = new_ch.bit_count
name = new_ch.name
# handle multiple occurance of same channel name
name = group_channels.get_unique_name(name)
if start_offset >= next_byte_aligned_position:
parent_start_offset = (start_offset // 8) * 8
# check if there are byte gaps in the record
gap = (parent_start_offset - next_byte_aligned_position) // 8
if gap:
types.append(("", f"V{gap}"))
# adjust size to 1, 2, 4 or 8 bytes for nonstandard integers
size = bit_offset + bit_count
if data_type == v23c.DATA_TYPE_STRING:
next_byte_aligned_position = parent_start_offset + size
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
elif data_type == v23c.DATA_TYPE_BYTEARRAY:
next_byte_aligned_position = parent_start_offset + size
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
else:
if size > 32:
next_byte_aligned_position = parent_start_offset + 64
elif size > 16:
next_byte_aligned_position = parent_start_offset + 32
elif size > 8:
next_byte_aligned_position = parent_start_offset + 16
else:
next_byte_aligned_position = parent_start_offset + 8
if next_byte_aligned_position <= record_size:
dtype_pair = (name, get_fmt_v3(data_type, size))
types.append(dtype_pair)
parents[original_index] = name, bit_offset
else:
next_byte_aligned_position = parent_start_offset
current_parent = name
else:
max_overlapping = next_byte_aligned_position - start_offset
if max_overlapping >= bit_count:
parents[original_index] = (
current_parent,
start_offset - parent_start_offset,
)
if next_byte_aligned_position > record_size:
break
gap = (record_size - next_byte_aligned_position) // 8
if gap:
dtype_pair = ("", f"V{gap}")
types.append(dtype_pair)
dtypes = dtype(types)
group.parents, group.types = parents, dtypes
return parents, dtypes | [
"def",
"_prepare_record",
"(",
"self",
",",
"group",
")",
":",
"parents",
",",
"dtypes",
"=",
"group",
".",
"parents",
",",
"group",
".",
"types",
"if",
"parents",
"is",
"None",
":",
"if",
"group",
".",
"data_location",
"==",
"v23c",
".",
"LOCATION_ORIGI... | compute record dtype and parents dict for this group
Parameters
----------
group : dict
MDF group dict
Returns
-------
parents, dtypes : dict, numpy.dtype
mapping of channels to records fields, records fiels dtype | [
"compute",
"record",
"dtype",
"and",
"parents",
"dict",
"for",
"this",
"group"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v3.py#L346-L475 | train | 221,722 |
danielhrisca/asammdf | asammdf/blocks/mdf_v3.py | MDF3.close | def close(self):
""" if the MDF was created with memory='minimum' and new
channels have been appended, then this must be called just before the
object is not used anymore to clean-up the temporary file
"""
if self._tempfile is not None:
self._tempfile.close()
if self._file is not None and not self._from_filelike:
self._file.close() | python | def close(self):
""" if the MDF was created with memory='minimum' and new
channels have been appended, then this must be called just before the
object is not used anymore to clean-up the temporary file
"""
if self._tempfile is not None:
self._tempfile.close()
if self._file is not None and not self._from_filelike:
self._file.close() | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"_tempfile",
"is",
"not",
"None",
":",
"self",
".",
"_tempfile",
".",
"close",
"(",
")",
"if",
"self",
".",
"_file",
"is",
"not",
"None",
"and",
"not",
"self",
".",
"_from_filelike",
":",
"s... | if the MDF was created with memory='minimum' and new
channels have been appended, then this must be called just before the
object is not used anymore to clean-up the temporary file | [
"if",
"the",
"MDF",
"was",
"created",
"with",
"memory",
"=",
"minimum",
"and",
"new",
"channels",
"have",
"been",
"appended",
"then",
"this",
"must",
"be",
"called",
"just",
"before",
"the",
"object",
"is",
"not",
"used",
"anymore",
"to",
"clean",
"-",
"... | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v3.py#L2114-L2123 | train | 221,723 |
danielhrisca/asammdf | asammdf/blocks/mdf_v3.py | MDF3.iter_get_triggers | def iter_get_triggers(self):
""" generator that yields triggers
Returns
-------
trigger_info : dict
trigger information with the following keys:
* comment : trigger comment
* time : trigger time
* pre_time : trigger pre time
* post_time : trigger post time
* index : trigger index
* group : data group index of trigger
"""
for i, gp in enumerate(self.groups):
trigger = gp.trigger
if trigger:
for j in range(trigger["trigger_events_nr"]):
trigger_info = {
"comment": trigger.comment,
"index": j,
"group": i,
"time": trigger[f"trigger_{j}_time"],
"pre_time": trigger[f"trigger_{j}_pretime"],
"post_time": trigger[f"trigger_{j}_posttime"],
}
yield trigger_info | python | def iter_get_triggers(self):
""" generator that yields triggers
Returns
-------
trigger_info : dict
trigger information with the following keys:
* comment : trigger comment
* time : trigger time
* pre_time : trigger pre time
* post_time : trigger post time
* index : trigger index
* group : data group index of trigger
"""
for i, gp in enumerate(self.groups):
trigger = gp.trigger
if trigger:
for j in range(trigger["trigger_events_nr"]):
trigger_info = {
"comment": trigger.comment,
"index": j,
"group": i,
"time": trigger[f"trigger_{j}_time"],
"pre_time": trigger[f"trigger_{j}_pretime"],
"post_time": trigger[f"trigger_{j}_posttime"],
}
yield trigger_info | [
"def",
"iter_get_triggers",
"(",
"self",
")",
":",
"for",
"i",
",",
"gp",
"in",
"enumerate",
"(",
"self",
".",
"groups",
")",
":",
"trigger",
"=",
"gp",
".",
"trigger",
"if",
"trigger",
":",
"for",
"j",
"in",
"range",
"(",
"trigger",
"[",
"\"trigger_... | generator that yields triggers
Returns
-------
trigger_info : dict
trigger information with the following keys:
* comment : trigger comment
* time : trigger time
* pre_time : trigger pre time
* post_time : trigger post time
* index : trigger index
* group : data group index of trigger | [
"generator",
"that",
"yields",
"triggers"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/blocks/mdf_v3.py#L3069-L3097 | train | 221,724 |
danielhrisca/asammdf | asammdf/gui/widgets/formated_axis.py | FormatedAxis.setLabel | def setLabel(self, text=None, units=None, unitPrefix=None, **args):
""" overwrites pyqtgraph setLabel
"""
show_label = False
if text is not None:
self.labelText = text
show_label = True
if units is not None:
self.labelUnits = units
show_label = True
if show_label:
self.showLabel()
if unitPrefix is not None:
self.labelUnitPrefix = unitPrefix
if len(args) > 0:
self.labelStyle = args
self.label.setHtml(self.labelString())
self._adjustSize()
self.picture = None
self.update() | python | def setLabel(self, text=None, units=None, unitPrefix=None, **args):
""" overwrites pyqtgraph setLabel
"""
show_label = False
if text is not None:
self.labelText = text
show_label = True
if units is not None:
self.labelUnits = units
show_label = True
if show_label:
self.showLabel()
if unitPrefix is not None:
self.labelUnitPrefix = unitPrefix
if len(args) > 0:
self.labelStyle = args
self.label.setHtml(self.labelString())
self._adjustSize()
self.picture = None
self.update() | [
"def",
"setLabel",
"(",
"self",
",",
"text",
"=",
"None",
",",
"units",
"=",
"None",
",",
"unitPrefix",
"=",
"None",
",",
"*",
"*",
"args",
")",
":",
"show_label",
"=",
"False",
"if",
"text",
"is",
"not",
"None",
":",
"self",
".",
"labelText",
"=",... | overwrites pyqtgraph setLabel | [
"overwrites",
"pyqtgraph",
"setLabel"
] | 3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66 | https://github.com/danielhrisca/asammdf/blob/3c7a1fd19c957ceebe4dcdbb2abf00806c2bdb66/asammdf/gui/widgets/formated_axis.py#L52-L72 | train | 221,725 |
bernardopires/django-tenant-schemas | tenant_schemas/utils.py | clean_tenant_url | def clean_tenant_url(url_string):
"""
Removes the TENANT_TOKEN from a particular string
"""
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'):
if (settings.PUBLIC_SCHEMA_URLCONF and
url_string.startswith(settings.PUBLIC_SCHEMA_URLCONF)):
url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):]
return url_string | python | def clean_tenant_url(url_string):
"""
Removes the TENANT_TOKEN from a particular string
"""
if hasattr(settings, 'PUBLIC_SCHEMA_URLCONF'):
if (settings.PUBLIC_SCHEMA_URLCONF and
url_string.startswith(settings.PUBLIC_SCHEMA_URLCONF)):
url_string = url_string[len(settings.PUBLIC_SCHEMA_URLCONF):]
return url_string | [
"def",
"clean_tenant_url",
"(",
"url_string",
")",
":",
"if",
"hasattr",
"(",
"settings",
",",
"'PUBLIC_SCHEMA_URLCONF'",
")",
":",
"if",
"(",
"settings",
".",
"PUBLIC_SCHEMA_URLCONF",
"and",
"url_string",
".",
"startswith",
"(",
"settings",
".",
"PUBLIC_SCHEMA_UR... | Removes the TENANT_TOKEN from a particular string | [
"Removes",
"the",
"TENANT_TOKEN",
"from",
"a",
"particular",
"string"
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/utils.py#L53-L61 | train | 221,726 |
bernardopires/django-tenant-schemas | tenant_schemas/utils.py | app_labels | def app_labels(apps_list):
"""
Returns a list of app labels of the given apps_list, now properly handles
new Django 1.7+ application registry.
https://docs.djangoproject.com/en/1.8/ref/applications/#django.apps.AppConfig.label
"""
if AppConfig is None:
return [app.split('.')[-1] for app in apps_list]
return [AppConfig.create(app).label for app in apps_list] | python | def app_labels(apps_list):
"""
Returns a list of app labels of the given apps_list, now properly handles
new Django 1.7+ application registry.
https://docs.djangoproject.com/en/1.8/ref/applications/#django.apps.AppConfig.label
"""
if AppConfig is None:
return [app.split('.')[-1] for app in apps_list]
return [AppConfig.create(app).label for app in apps_list] | [
"def",
"app_labels",
"(",
"apps_list",
")",
":",
"if",
"AppConfig",
"is",
"None",
":",
"return",
"[",
"app",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"for",
"app",
"in",
"apps_list",
"]",
"return",
"[",
"AppConfig",
".",
"create",
"(",
"a... | Returns a list of app labels of the given apps_list, now properly handles
new Django 1.7+ application registry.
https://docs.djangoproject.com/en/1.8/ref/applications/#django.apps.AppConfig.label | [
"Returns",
"a",
"list",
"of",
"app",
"labels",
"of",
"the",
"given",
"apps_list",
"now",
"properly",
"handles",
"new",
"Django",
"1",
".",
"7",
"+",
"application",
"registry",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/utils.py#L109-L118 | train | 221,727 |
bernardopires/django-tenant-schemas | tenant_schemas/storage.py | TenantStorageMixin.path | def path(self, name):
"""
Look for files in subdirectory of MEDIA_ROOT using the tenant's
domain_url value as the specifier.
"""
if name is None:
name = ''
try:
location = safe_join(self.location, connection.tenant.domain_url)
except AttributeError:
location = self.location
try:
path = safe_join(location, name)
except ValueError:
raise SuspiciousOperation(
"Attempted access to '%s' denied." % name)
return os.path.normpath(path) | python | def path(self, name):
"""
Look for files in subdirectory of MEDIA_ROOT using the tenant's
domain_url value as the specifier.
"""
if name is None:
name = ''
try:
location = safe_join(self.location, connection.tenant.domain_url)
except AttributeError:
location = self.location
try:
path = safe_join(location, name)
except ValueError:
raise SuspiciousOperation(
"Attempted access to '%s' denied." % name)
return os.path.normpath(path) | [
"def",
"path",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"is",
"None",
":",
"name",
"=",
"''",
"try",
":",
"location",
"=",
"safe_join",
"(",
"self",
".",
"location",
",",
"connection",
".",
"tenant",
".",
"domain_url",
")",
"except",
"Attrib... | Look for files in subdirectory of MEDIA_ROOT using the tenant's
domain_url value as the specifier. | [
"Look",
"for",
"files",
"in",
"subdirectory",
"of",
"MEDIA_ROOT",
"using",
"the",
"tenant",
"s",
"domain_url",
"value",
"as",
"the",
"specifier",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/storage.py#L27-L43 | train | 221,728 |
bernardopires/django-tenant-schemas | tenant_schemas/management/commands/tenant_command.py | Command.run_from_argv | def run_from_argv(self, argv):
"""
Changes the option_list to use the options from the wrapped command.
Adds schema parameter to specify which schema will be used when
executing the wrapped command.
"""
# load the command object.
try:
app_name = get_commands()[argv[2]]
except KeyError:
raise CommandError("Unknown command: %r" % argv[2])
if isinstance(app_name, BaseCommand):
# if the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, argv[2])
# Ugly, but works. Delete tenant_command from the argv, parse the schema manually
# and forward the rest of the arguments to the actual command being wrapped.
del argv[1]
schema_parser = argparse.ArgumentParser()
schema_parser.add_argument("-s", "--schema", dest="schema_name", help="specify tenant schema")
schema_namespace, args = schema_parser.parse_known_args(argv)
tenant = self.get_tenant_from_options_or_interactive(schema_name=schema_namespace.schema_name)
connection.set_tenant(tenant)
klass.run_from_argv(args) | python | def run_from_argv(self, argv):
"""
Changes the option_list to use the options from the wrapped command.
Adds schema parameter to specify which schema will be used when
executing the wrapped command.
"""
# load the command object.
try:
app_name = get_commands()[argv[2]]
except KeyError:
raise CommandError("Unknown command: %r" % argv[2])
if isinstance(app_name, BaseCommand):
# if the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, argv[2])
# Ugly, but works. Delete tenant_command from the argv, parse the schema manually
# and forward the rest of the arguments to the actual command being wrapped.
del argv[1]
schema_parser = argparse.ArgumentParser()
schema_parser.add_argument("-s", "--schema", dest="schema_name", help="specify tenant schema")
schema_namespace, args = schema_parser.parse_known_args(argv)
tenant = self.get_tenant_from_options_or_interactive(schema_name=schema_namespace.schema_name)
connection.set_tenant(tenant)
klass.run_from_argv(args) | [
"def",
"run_from_argv",
"(",
"self",
",",
"argv",
")",
":",
"# load the command object.",
"try",
":",
"app_name",
"=",
"get_commands",
"(",
")",
"[",
"argv",
"[",
"2",
"]",
"]",
"except",
"KeyError",
":",
"raise",
"CommandError",
"(",
"\"Unknown command: %r\""... | Changes the option_list to use the options from the wrapped command.
Adds schema parameter to specify which schema will be used when
executing the wrapped command. | [
"Changes",
"the",
"option_list",
"to",
"use",
"the",
"options",
"from",
"the",
"wrapped",
"command",
".",
"Adds",
"schema",
"parameter",
"to",
"specify",
"which",
"schema",
"will",
"be",
"used",
"when",
"executing",
"the",
"wrapped",
"command",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/management/commands/tenant_command.py#L11-L38 | train | 221,729 |
bernardopires/django-tenant-schemas | tenant_schemas/management/commands/__init__.py | BaseTenantCommand.handle | def handle(self, *args, **options):
"""
Iterates a command over all registered schemata.
"""
if options['schema_name']:
# only run on a particular schema
connection.set_schema_to_public()
self.execute_command(get_tenant_model().objects.get(schema_name=options['schema_name']), self.COMMAND_NAME,
*args, **options)
else:
for tenant in get_tenant_model().objects.all():
if not (options['skip_public'] and tenant.schema_name == get_public_schema_name()):
self.execute_command(tenant, self.COMMAND_NAME, *args, **options) | python | def handle(self, *args, **options):
"""
Iterates a command over all registered schemata.
"""
if options['schema_name']:
# only run on a particular schema
connection.set_schema_to_public()
self.execute_command(get_tenant_model().objects.get(schema_name=options['schema_name']), self.COMMAND_NAME,
*args, **options)
else:
for tenant in get_tenant_model().objects.all():
if not (options['skip_public'] and tenant.schema_name == get_public_schema_name()):
self.execute_command(tenant, self.COMMAND_NAME, *args, **options) | [
"def",
"handle",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"options",
")",
":",
"if",
"options",
"[",
"'schema_name'",
"]",
":",
"# only run on a particular schema",
"connection",
".",
"set_schema_to_public",
"(",
")",
"self",
".",
"execute_command",
"(",
... | Iterates a command over all registered schemata. | [
"Iterates",
"a",
"command",
"over",
"all",
"registered",
"schemata",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/management/commands/__init__.py#L69-L81 | train | 221,730 |
bernardopires/django-tenant-schemas | tenant_schemas/postgresql_backend/base.py | DatabaseWrapper._cursor | def _cursor(self, name=None):
"""
Here it happens. We hope every Django db operation using PostgreSQL
must go through this to get the cursor handle. We change the path.
"""
if name:
# Only supported and required by Django 1.11 (server-side cursor)
cursor = super(DatabaseWrapper, self)._cursor(name=name)
else:
cursor = super(DatabaseWrapper, self)._cursor()
# optionally limit the number of executions - under load, the execution
# of `set search_path` can be quite time consuming
if (not get_limit_set_calls()) or not self.search_path_set:
# Actual search_path modification for the cursor. Database will
# search schemata from left to right when looking for the object
# (table, index, sequence, etc.).
if not self.schema_name:
raise ImproperlyConfigured("Database schema not set. Did you forget "
"to call set_schema() or set_tenant()?")
_check_schema_name(self.schema_name)
public_schema_name = get_public_schema_name()
search_paths = []
if self.schema_name == public_schema_name:
search_paths = [public_schema_name]
elif self.include_public_schema:
search_paths = [self.schema_name, public_schema_name]
else:
search_paths = [self.schema_name]
search_paths.extend(EXTRA_SEARCH_PATHS)
if name:
# Named cursor can only be used once
cursor_for_search_path = self.connection.cursor()
else:
# Reuse
cursor_for_search_path = cursor
# In the event that an error already happened in this transaction and we are going
# to rollback we should just ignore database error when setting the search_path
# if the next instruction is not a rollback it will just fail also, so
# we do not have to worry that it's not the good one
try:
cursor_for_search_path.execute('SET search_path = {0}'.format(','.join(search_paths)))
except (django.db.utils.DatabaseError, psycopg2.InternalError):
self.search_path_set = False
else:
self.search_path_set = True
if name:
cursor_for_search_path.close()
return cursor | python | def _cursor(self, name=None):
"""
Here it happens. We hope every Django db operation using PostgreSQL
must go through this to get the cursor handle. We change the path.
"""
if name:
# Only supported and required by Django 1.11 (server-side cursor)
cursor = super(DatabaseWrapper, self)._cursor(name=name)
else:
cursor = super(DatabaseWrapper, self)._cursor()
# optionally limit the number of executions - under load, the execution
# of `set search_path` can be quite time consuming
if (not get_limit_set_calls()) or not self.search_path_set:
# Actual search_path modification for the cursor. Database will
# search schemata from left to right when looking for the object
# (table, index, sequence, etc.).
if not self.schema_name:
raise ImproperlyConfigured("Database schema not set. Did you forget "
"to call set_schema() or set_tenant()?")
_check_schema_name(self.schema_name)
public_schema_name = get_public_schema_name()
search_paths = []
if self.schema_name == public_schema_name:
search_paths = [public_schema_name]
elif self.include_public_schema:
search_paths = [self.schema_name, public_schema_name]
else:
search_paths = [self.schema_name]
search_paths.extend(EXTRA_SEARCH_PATHS)
if name:
# Named cursor can only be used once
cursor_for_search_path = self.connection.cursor()
else:
# Reuse
cursor_for_search_path = cursor
# In the event that an error already happened in this transaction and we are going
# to rollback we should just ignore database error when setting the search_path
# if the next instruction is not a rollback it will just fail also, so
# we do not have to worry that it's not the good one
try:
cursor_for_search_path.execute('SET search_path = {0}'.format(','.join(search_paths)))
except (django.db.utils.DatabaseError, psycopg2.InternalError):
self.search_path_set = False
else:
self.search_path_set = True
if name:
cursor_for_search_path.close()
return cursor | [
"def",
"_cursor",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
":",
"# Only supported and required by Django 1.11 (server-side cursor)",
"cursor",
"=",
"super",
"(",
"DatabaseWrapper",
",",
"self",
")",
".",
"_cursor",
"(",
"name",
"=",
"name",... | Here it happens. We hope every Django db operation using PostgreSQL
must go through this to get the cursor handle. We change the path. | [
"Here",
"it",
"happens",
".",
"We",
"hope",
"every",
"Django",
"db",
"operation",
"using",
"PostgreSQL",
"must",
"go",
"through",
"this",
"to",
"get",
"the",
"cursor",
"handle",
".",
"We",
"change",
"the",
"path",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/postgresql_backend/base.py#L112-L166 | train | 221,731 |
bernardopires/django-tenant-schemas | tenant_schemas/apps.py | best_practice | def best_practice(app_configs, **kwargs):
"""
Test for configuration recommendations. These are best practices, they
avoid hard to find bugs and unexpected behaviour.
"""
if app_configs is None:
app_configs = apps.get_app_configs()
# Take the app_configs and turn them into *old style* application names.
# This is what we expect in the SHARED_APPS and TENANT_APPS settings.
INSTALLED_APPS = [
config.name
for config in app_configs
]
if not hasattr(settings, 'TENANT_APPS'):
return [Critical('TENANT_APPS setting not set')]
if not hasattr(settings, 'TENANT_MODEL'):
return [Critical('TENANT_MODEL setting not set')]
if not hasattr(settings, 'SHARED_APPS'):
return [Critical('SHARED_APPS setting not set')]
if 'tenant_schemas.routers.TenantSyncRouter' not in settings.DATABASE_ROUTERS:
return [
Critical("DATABASE_ROUTERS setting must contain "
"'tenant_schemas.routers.TenantSyncRouter'.")
]
errors = []
django_index = next(i for i, s in enumerate(INSTALLED_APPS) if s.startswith('django.'))
if INSTALLED_APPS.index('tenant_schemas') > django_index:
errors.append(
Warning("You should put 'tenant_schemas' before any django "
"core applications in INSTALLED_APPS.",
obj="django.conf.settings",
hint="This is necessary to overwrite built-in django "
"management commands with their schema-aware "
"implementations.",
id="tenant_schemas.W001"))
if not settings.TENANT_APPS:
errors.append(
Error("TENANT_APPS is empty.",
hint="Maybe you don't need this app?",
id="tenant_schemas.E001"))
if hasattr(settings, 'PG_EXTRA_SEARCH_PATHS'):
if get_public_schema_name() in settings.PG_EXTRA_SEARCH_PATHS:
errors.append(Critical(
"%s can not be included on PG_EXTRA_SEARCH_PATHS."
% get_public_schema_name()))
# make sure no tenant schema is in settings.PG_EXTRA_SEARCH_PATHS
invalid_schemas = set(settings.PG_EXTRA_SEARCH_PATHS).intersection(
get_tenant_model().objects.all().values_list('schema_name', flat=True))
if invalid_schemas:
errors.append(Critical(
"Do not include tenant schemas (%s) on PG_EXTRA_SEARCH_PATHS."
% ", ".join(sorted(invalid_schemas))))
if not settings.SHARED_APPS:
errors.append(
Warning("SHARED_APPS is empty.",
id="tenant_schemas.W002"))
if not set(settings.TENANT_APPS).issubset(INSTALLED_APPS):
delta = set(settings.TENANT_APPS).difference(INSTALLED_APPS)
errors.append(
Error("You have TENANT_APPS that are not in INSTALLED_APPS",
hint=[a for a in settings.TENANT_APPS if a in delta],
id="tenant_schemas.E002"))
if not set(settings.SHARED_APPS).issubset(INSTALLED_APPS):
delta = set(settings.SHARED_APPS).difference(INSTALLED_APPS)
errors.append(
Error("You have SHARED_APPS that are not in INSTALLED_APPS",
hint=[a for a in settings.SHARED_APPS if a in delta],
id="tenant_schemas.E003"))
if not isinstance(default_storage, TenantStorageMixin):
errors.append(Warning(
"Your default storage engine is not tenant aware.",
hint="Set settings.DEFAULT_FILE_STORAGE to "
"'tenant_schemas.storage.TenantFileSystemStorage'",
id="tenant_schemas.W003"
))
return errors | python | def best_practice(app_configs, **kwargs):
"""
Test for configuration recommendations. These are best practices, they
avoid hard to find bugs and unexpected behaviour.
"""
if app_configs is None:
app_configs = apps.get_app_configs()
# Take the app_configs and turn them into *old style* application names.
# This is what we expect in the SHARED_APPS and TENANT_APPS settings.
INSTALLED_APPS = [
config.name
for config in app_configs
]
if not hasattr(settings, 'TENANT_APPS'):
return [Critical('TENANT_APPS setting not set')]
if not hasattr(settings, 'TENANT_MODEL'):
return [Critical('TENANT_MODEL setting not set')]
if not hasattr(settings, 'SHARED_APPS'):
return [Critical('SHARED_APPS setting not set')]
if 'tenant_schemas.routers.TenantSyncRouter' not in settings.DATABASE_ROUTERS:
return [
Critical("DATABASE_ROUTERS setting must contain "
"'tenant_schemas.routers.TenantSyncRouter'.")
]
errors = []
django_index = next(i for i, s in enumerate(INSTALLED_APPS) if s.startswith('django.'))
if INSTALLED_APPS.index('tenant_schemas') > django_index:
errors.append(
Warning("You should put 'tenant_schemas' before any django "
"core applications in INSTALLED_APPS.",
obj="django.conf.settings",
hint="This is necessary to overwrite built-in django "
"management commands with their schema-aware "
"implementations.",
id="tenant_schemas.W001"))
if not settings.TENANT_APPS:
errors.append(
Error("TENANT_APPS is empty.",
hint="Maybe you don't need this app?",
id="tenant_schemas.E001"))
if hasattr(settings, 'PG_EXTRA_SEARCH_PATHS'):
if get_public_schema_name() in settings.PG_EXTRA_SEARCH_PATHS:
errors.append(Critical(
"%s can not be included on PG_EXTRA_SEARCH_PATHS."
% get_public_schema_name()))
# make sure no tenant schema is in settings.PG_EXTRA_SEARCH_PATHS
invalid_schemas = set(settings.PG_EXTRA_SEARCH_PATHS).intersection(
get_tenant_model().objects.all().values_list('schema_name', flat=True))
if invalid_schemas:
errors.append(Critical(
"Do not include tenant schemas (%s) on PG_EXTRA_SEARCH_PATHS."
% ", ".join(sorted(invalid_schemas))))
if not settings.SHARED_APPS:
errors.append(
Warning("SHARED_APPS is empty.",
id="tenant_schemas.W002"))
if not set(settings.TENANT_APPS).issubset(INSTALLED_APPS):
delta = set(settings.TENANT_APPS).difference(INSTALLED_APPS)
errors.append(
Error("You have TENANT_APPS that are not in INSTALLED_APPS",
hint=[a for a in settings.TENANT_APPS if a in delta],
id="tenant_schemas.E002"))
if not set(settings.SHARED_APPS).issubset(INSTALLED_APPS):
delta = set(settings.SHARED_APPS).difference(INSTALLED_APPS)
errors.append(
Error("You have SHARED_APPS that are not in INSTALLED_APPS",
hint=[a for a in settings.SHARED_APPS if a in delta],
id="tenant_schemas.E003"))
if not isinstance(default_storage, TenantStorageMixin):
errors.append(Warning(
"Your default storage engine is not tenant aware.",
hint="Set settings.DEFAULT_FILE_STORAGE to "
"'tenant_schemas.storage.TenantFileSystemStorage'",
id="tenant_schemas.W003"
))
return errors | [
"def",
"best_practice",
"(",
"app_configs",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"app_configs",
"is",
"None",
":",
"app_configs",
"=",
"apps",
".",
"get_app_configs",
"(",
")",
"# Take the app_configs and turn them into *old style* application names.",
"# This is wh... | Test for configuration recommendations. These are best practices, they
avoid hard to find bugs and unexpected behaviour. | [
"Test",
"for",
"configuration",
"recommendations",
".",
"These",
"are",
"best",
"practices",
"they",
"avoid",
"hard",
"to",
"find",
"bugs",
"and",
"unexpected",
"behaviour",
"."
] | 75faf00834e1fb7ed017949bfb54531f6329a8dd | https://github.com/bernardopires/django-tenant-schemas/blob/75faf00834e1fb7ed017949bfb54531f6329a8dd/tenant_schemas/apps.py#L14-L104 | train | 221,732 |
google/openhtf | openhtf/plugs/device_wrapping.py | short_repr | def short_repr(obj, max_len=40):
"""Returns a short, term-friendly string representation of the object.
Args:
obj: An object for which to return a string representation.
max_len: Maximum length of the returned string. Longer reprs will be turned
into a brief descriptive string giving the type and length of obj.
"""
obj_repr = repr(obj)
if len(obj_repr) <= max_len:
return obj_repr
return '<{} of length {}>'.format(type(obj).__name__, len(obj_repr)) | python | def short_repr(obj, max_len=40):
"""Returns a short, term-friendly string representation of the object.
Args:
obj: An object for which to return a string representation.
max_len: Maximum length of the returned string. Longer reprs will be turned
into a brief descriptive string giving the type and length of obj.
"""
obj_repr = repr(obj)
if len(obj_repr) <= max_len:
return obj_repr
return '<{} of length {}>'.format(type(obj).__name__, len(obj_repr)) | [
"def",
"short_repr",
"(",
"obj",
",",
"max_len",
"=",
"40",
")",
":",
"obj_repr",
"=",
"repr",
"(",
"obj",
")",
"if",
"len",
"(",
"obj_repr",
")",
"<=",
"max_len",
":",
"return",
"obj_repr",
"return",
"'<{} of length {}>'",
".",
"format",
"(",
"type",
... | Returns a short, term-friendly string representation of the object.
Args:
obj: An object for which to return a string representation.
max_len: Maximum length of the returned string. Longer reprs will be turned
into a brief descriptive string giving the type and length of obj. | [
"Returns",
"a",
"short",
"term",
"-",
"friendly",
"string",
"representation",
"of",
"the",
"object",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/device_wrapping.py#L29-L40 | train | 221,733 |
google/openhtf | openhtf/core/measurements.py | measures | def measures(*measurements, **kwargs):
"""Decorator-maker used to declare measurements for phases.
See the measurements module docstring for examples of usage.
Args:
measurements: Measurement objects to declare, or a string name from which
to create a Measurement.
kwargs: Keyword arguments to pass to Measurement constructor if we're
constructing one. Note that if kwargs are provided, the length
of measurements must be 1, and that value must be a string containing
the measurement name. For valid kwargs, see the definition of the
Measurement class.
Returns:
A decorator that declares the measurement(s) for the decorated phase.
"""
def _maybe_make(meas):
"""Turn strings into Measurement objects if necessary."""
if isinstance(meas, Measurement):
return meas
elif isinstance(meas, six.string_types):
return Measurement(meas, **kwargs)
raise InvalidMeasurementType('Expected Measurement or string', meas)
# In case we're declaring a measurement inline, we can only declare one.
if kwargs and len(measurements) != 1:
raise InvalidMeasurementType(
'If @measures kwargs are provided, a single measurement name must be '
'provided as a positional arg first.')
# Unlikely, but let's make sure we don't allow overriding initial outcome.
if 'outcome' in kwargs:
raise ValueError('Cannot specify outcome in measurement declaration!')
measurements = [_maybe_make(meas) for meas in measurements]
# 'measurements' is guaranteed to be a list of Measurement objects here.
def decorate(wrapped_phase):
"""Phase decorator to be returned."""
phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(wrapped_phase)
duplicate_names = (set(m.name for m in measurements) &
set(m.name for m in phase.measurements))
if duplicate_names:
raise DuplicateNameError('Measurement names duplicated', duplicate_names)
phase.measurements.extend(measurements)
return phase
return decorate | python | def measures(*measurements, **kwargs):
"""Decorator-maker used to declare measurements for phases.
See the measurements module docstring for examples of usage.
Args:
measurements: Measurement objects to declare, or a string name from which
to create a Measurement.
kwargs: Keyword arguments to pass to Measurement constructor if we're
constructing one. Note that if kwargs are provided, the length
of measurements must be 1, and that value must be a string containing
the measurement name. For valid kwargs, see the definition of the
Measurement class.
Returns:
A decorator that declares the measurement(s) for the decorated phase.
"""
def _maybe_make(meas):
"""Turn strings into Measurement objects if necessary."""
if isinstance(meas, Measurement):
return meas
elif isinstance(meas, six.string_types):
return Measurement(meas, **kwargs)
raise InvalidMeasurementType('Expected Measurement or string', meas)
# In case we're declaring a measurement inline, we can only declare one.
if kwargs and len(measurements) != 1:
raise InvalidMeasurementType(
'If @measures kwargs are provided, a single measurement name must be '
'provided as a positional arg first.')
# Unlikely, but let's make sure we don't allow overriding initial outcome.
if 'outcome' in kwargs:
raise ValueError('Cannot specify outcome in measurement declaration!')
measurements = [_maybe_make(meas) for meas in measurements]
# 'measurements' is guaranteed to be a list of Measurement objects here.
def decorate(wrapped_phase):
"""Phase decorator to be returned."""
phase = phase_descriptor.PhaseDescriptor.wrap_or_copy(wrapped_phase)
duplicate_names = (set(m.name for m in measurements) &
set(m.name for m in phase.measurements))
if duplicate_names:
raise DuplicateNameError('Measurement names duplicated', duplicate_names)
phase.measurements.extend(measurements)
return phase
return decorate | [
"def",
"measures",
"(",
"*",
"measurements",
",",
"*",
"*",
"kwargs",
")",
":",
"def",
"_maybe_make",
"(",
"meas",
")",
":",
"\"\"\"Turn strings into Measurement objects if necessary.\"\"\"",
"if",
"isinstance",
"(",
"meas",
",",
"Measurement",
")",
":",
"return",... | Decorator-maker used to declare measurements for phases.
See the measurements module docstring for examples of usage.
Args:
measurements: Measurement objects to declare, or a string name from which
to create a Measurement.
kwargs: Keyword arguments to pass to Measurement constructor if we're
constructing one. Note that if kwargs are provided, the length
of measurements must be 1, and that value must be a string containing
the measurement name. For valid kwargs, see the definition of the
Measurement class.
Returns:
A decorator that declares the measurement(s) for the decorated phase. | [
"Decorator",
"-",
"maker",
"used",
"to",
"declare",
"measurements",
"for",
"phases",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L614-L662 | train | 221,734 |
google/openhtf | openhtf/core/measurements.py | Measurement.set_notification_callback | def set_notification_callback(self, notification_cb):
"""Set the notifier we'll call when measurements are set."""
self._notification_cb = notification_cb
if not notification_cb and self.dimensions:
self.measured_value.notify_value_set = None
return self | python | def set_notification_callback(self, notification_cb):
"""Set the notifier we'll call when measurements are set."""
self._notification_cb = notification_cb
if not notification_cb and self.dimensions:
self.measured_value.notify_value_set = None
return self | [
"def",
"set_notification_callback",
"(",
"self",
",",
"notification_cb",
")",
":",
"self",
".",
"_notification_cb",
"=",
"notification_cb",
"if",
"not",
"notification_cb",
"and",
"self",
".",
"dimensions",
":",
"self",
".",
"measured_value",
".",
"notify_value_set",... | Set the notifier we'll call when measurements are set. | [
"Set",
"the",
"notifier",
"we",
"ll",
"call",
"when",
"measurements",
"are",
"set",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L171-L176 | train | 221,735 |
google/openhtf | openhtf/core/measurements.py | Measurement._maybe_make_unit_desc | def _maybe_make_unit_desc(self, unit_desc):
"""Return the UnitDescriptor or convert a string to one."""
if isinstance(unit_desc, str) or unit_desc is None:
unit_desc = units.Unit(unit_desc)
if not isinstance(unit_desc, units.UnitDescriptor):
raise TypeError('Invalid units for measurement %s: %s' % (self.name,
unit_desc))
return unit_desc | python | def _maybe_make_unit_desc(self, unit_desc):
"""Return the UnitDescriptor or convert a string to one."""
if isinstance(unit_desc, str) or unit_desc is None:
unit_desc = units.Unit(unit_desc)
if not isinstance(unit_desc, units.UnitDescriptor):
raise TypeError('Invalid units for measurement %s: %s' % (self.name,
unit_desc))
return unit_desc | [
"def",
"_maybe_make_unit_desc",
"(",
"self",
",",
"unit_desc",
")",
":",
"if",
"isinstance",
"(",
"unit_desc",
",",
"str",
")",
"or",
"unit_desc",
"is",
"None",
":",
"unit_desc",
"=",
"units",
".",
"Unit",
"(",
"unit_desc",
")",
"if",
"not",
"isinstance",
... | Return the UnitDescriptor or convert a string to one. | [
"Return",
"the",
"UnitDescriptor",
"or",
"convert",
"a",
"string",
"to",
"one",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L191-L198 | train | 221,736 |
google/openhtf | openhtf/core/measurements.py | Measurement._maybe_make_dimension | def _maybe_make_dimension(self, dimension):
"""Return a `measurements.Dimension` instance."""
# For backwards compatibility the argument can be either a Dimension, a
# string or a `units.UnitDescriptor`.
if isinstance(dimension, Dimension):
return dimension
if isinstance(dimension, units.UnitDescriptor):
return Dimension.from_unit_descriptor(dimension)
if isinstance(dimension, str):
return Dimension.from_string(dimension)
raise TypeError('Cannot convert %s to a dimension', dimension) | python | def _maybe_make_dimension(self, dimension):
"""Return a `measurements.Dimension` instance."""
# For backwards compatibility the argument can be either a Dimension, a
# string or a `units.UnitDescriptor`.
if isinstance(dimension, Dimension):
return dimension
if isinstance(dimension, units.UnitDescriptor):
return Dimension.from_unit_descriptor(dimension)
if isinstance(dimension, str):
return Dimension.from_string(dimension)
raise TypeError('Cannot convert %s to a dimension', dimension) | [
"def",
"_maybe_make_dimension",
"(",
"self",
",",
"dimension",
")",
":",
"# For backwards compatibility the argument can be either a Dimension, a",
"# string or a `units.UnitDescriptor`.",
"if",
"isinstance",
"(",
"dimension",
",",
"Dimension",
")",
":",
"return",
"dimension",
... | Return a `measurements.Dimension` instance. | [
"Return",
"a",
"measurements",
".",
"Dimension",
"instance",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L200-L211 | train | 221,737 |
google/openhtf | openhtf/core/measurements.py | Measurement.with_dimensions | def with_dimensions(self, *dimensions):
"""Declare dimensions for this Measurement, returns self for chaining."""
self.dimensions = tuple(
self._maybe_make_dimension(dim) for dim in dimensions)
self._cached = None
return self | python | def with_dimensions(self, *dimensions):
"""Declare dimensions for this Measurement, returns self for chaining."""
self.dimensions = tuple(
self._maybe_make_dimension(dim) for dim in dimensions)
self._cached = None
return self | [
"def",
"with_dimensions",
"(",
"self",
",",
"*",
"dimensions",
")",
":",
"self",
".",
"dimensions",
"=",
"tuple",
"(",
"self",
".",
"_maybe_make_dimension",
"(",
"dim",
")",
"for",
"dim",
"in",
"dimensions",
")",
"self",
".",
"_cached",
"=",
"None",
"ret... | Declare dimensions for this Measurement, returns self for chaining. | [
"Declare",
"dimensions",
"for",
"this",
"Measurement",
"returns",
"self",
"for",
"chaining",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L218-L223 | train | 221,738 |
google/openhtf | openhtf/core/measurements.py | Measurement.with_validator | def with_validator(self, validator):
"""Add a validator callback to this Measurement, chainable."""
if not callable(validator):
raise ValueError('Validator must be callable', validator)
self.validators.append(validator)
self._cached = None
return self | python | def with_validator(self, validator):
"""Add a validator callback to this Measurement, chainable."""
if not callable(validator):
raise ValueError('Validator must be callable', validator)
self.validators.append(validator)
self._cached = None
return self | [
"def",
"with_validator",
"(",
"self",
",",
"validator",
")",
":",
"if",
"not",
"callable",
"(",
"validator",
")",
":",
"raise",
"ValueError",
"(",
"'Validator must be callable'",
",",
"validator",
")",
"self",
".",
"validators",
".",
"append",
"(",
"validator"... | Add a validator callback to this Measurement, chainable. | [
"Add",
"a",
"validator",
"callback",
"to",
"this",
"Measurement",
"chainable",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L225-L231 | train | 221,739 |
google/openhtf | openhtf/core/measurements.py | Measurement.with_args | def with_args(self, **kwargs):
"""String substitution for names and docstrings."""
validators = [
validator.with_args(**kwargs)
if hasattr(validator, 'with_args') else validator
for validator in self.validators
]
return mutablerecords.CopyRecord(
self, name=util.format_string(self.name, kwargs),
docstring=util.format_string(self.docstring, kwargs),
validators=validators,
_cached=None,
) | python | def with_args(self, **kwargs):
"""String substitution for names and docstrings."""
validators = [
validator.with_args(**kwargs)
if hasattr(validator, 'with_args') else validator
for validator in self.validators
]
return mutablerecords.CopyRecord(
self, name=util.format_string(self.name, kwargs),
docstring=util.format_string(self.docstring, kwargs),
validators=validators,
_cached=None,
) | [
"def",
"with_args",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"validators",
"=",
"[",
"validator",
".",
"with_args",
"(",
"*",
"*",
"kwargs",
")",
"if",
"hasattr",
"(",
"validator",
",",
"'with_args'",
")",
"else",
"validator",
"for",
"validator",
... | String substitution for names and docstrings. | [
"String",
"substitution",
"for",
"names",
"and",
"docstrings",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L233-L245 | train | 221,740 |
google/openhtf | openhtf/core/measurements.py | Measurement.validate | def validate(self):
"""Validate this measurement and update its 'outcome' field."""
# PASS if all our validators return True, otherwise FAIL.
try:
if all(v(self.measured_value.value) for v in self.validators):
self.outcome = Outcome.PASS
else:
self.outcome = Outcome.FAIL
return self
except Exception as e: # pylint: disable=bare-except
_LOG.error('Validation for measurement %s raised an exception %s.',
self.name, e)
self.outcome = Outcome.FAIL
raise
finally:
if self._cached:
self._cached['outcome'] = self.outcome.name | python | def validate(self):
"""Validate this measurement and update its 'outcome' field."""
# PASS if all our validators return True, otherwise FAIL.
try:
if all(v(self.measured_value.value) for v in self.validators):
self.outcome = Outcome.PASS
else:
self.outcome = Outcome.FAIL
return self
except Exception as e: # pylint: disable=bare-except
_LOG.error('Validation for measurement %s raised an exception %s.',
self.name, e)
self.outcome = Outcome.FAIL
raise
finally:
if self._cached:
self._cached['outcome'] = self.outcome.name | [
"def",
"validate",
"(",
"self",
")",
":",
"# PASS if all our validators return True, otherwise FAIL.",
"try",
":",
"if",
"all",
"(",
"v",
"(",
"self",
".",
"measured_value",
".",
"value",
")",
"for",
"v",
"in",
"self",
".",
"validators",
")",
":",
"self",
".... | Validate this measurement and update its 'outcome' field. | [
"Validate",
"this",
"measurement",
"and",
"update",
"its",
"outcome",
"field",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L260-L276 | train | 221,741 |
google/openhtf | openhtf/core/measurements.py | Measurement.as_base_types | def as_base_types(self):
"""Convert this measurement to a dict of basic types."""
if not self._cached:
# Create the single cache file the first time this is called.
self._cached = {
'name': self.name,
'outcome': self.outcome.name,
}
if self.validators:
self._cached['validators'] = data.convert_to_base_types(
tuple(str(v) for v in self.validators))
if self.dimensions:
self._cached['dimensions'] = data.convert_to_base_types(self.dimensions)
if self.units:
self._cached['units'] = data.convert_to_base_types(self.units)
if self.docstring:
self._cached['docstring'] = self.docstring
if self.measured_value.is_value_set:
self._cached['measured_value'] = self.measured_value.basetype_value()
return self._cached | python | def as_base_types(self):
"""Convert this measurement to a dict of basic types."""
if not self._cached:
# Create the single cache file the first time this is called.
self._cached = {
'name': self.name,
'outcome': self.outcome.name,
}
if self.validators:
self._cached['validators'] = data.convert_to_base_types(
tuple(str(v) for v in self.validators))
if self.dimensions:
self._cached['dimensions'] = data.convert_to_base_types(self.dimensions)
if self.units:
self._cached['units'] = data.convert_to_base_types(self.units)
if self.docstring:
self._cached['docstring'] = self.docstring
if self.measured_value.is_value_set:
self._cached['measured_value'] = self.measured_value.basetype_value()
return self._cached | [
"def",
"as_base_types",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_cached",
":",
"# Create the single cache file the first time this is called.",
"self",
".",
"_cached",
"=",
"{",
"'name'",
":",
"self",
".",
"name",
",",
"'outcome'",
":",
"self",
".",
... | Convert this measurement to a dict of basic types. | [
"Convert",
"this",
"measurement",
"to",
"a",
"dict",
"of",
"basic",
"types",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L278-L297 | train | 221,742 |
google/openhtf | openhtf/core/measurements.py | Measurement.to_dataframe | def to_dataframe(self, columns=None):
"""Convert a multi-dim to a pandas dataframe."""
if not isinstance(self.measured_value, DimensionedMeasuredValue):
raise TypeError(
'Only a dimensioned measurement can be converted to a DataFrame')
if columns is None:
columns = [d.name for d in self.dimensions]
columns += [self.units.name if self.units else 'value']
dataframe = self.measured_value.to_dataframe(columns)
return dataframe | python | def to_dataframe(self, columns=None):
"""Convert a multi-dim to a pandas dataframe."""
if not isinstance(self.measured_value, DimensionedMeasuredValue):
raise TypeError(
'Only a dimensioned measurement can be converted to a DataFrame')
if columns is None:
columns = [d.name for d in self.dimensions]
columns += [self.units.name if self.units else 'value']
dataframe = self.measured_value.to_dataframe(columns)
return dataframe | [
"def",
"to_dataframe",
"(",
"self",
",",
"columns",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"self",
".",
"measured_value",
",",
"DimensionedMeasuredValue",
")",
":",
"raise",
"TypeError",
"(",
"'Only a dimensioned measurement can be converted to a DataF... | Convert a multi-dim to a pandas dataframe. | [
"Convert",
"a",
"multi",
"-",
"dim",
"to",
"a",
"pandas",
"dataframe",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L299-L311 | train | 221,743 |
google/openhtf | openhtf/core/measurements.py | MeasuredValue.set | def set(self, value):
"""Set the value for this measurement, with some sanity checks."""
if self.is_value_set:
# While we want to *allow* re-setting previously set measurements, we'd
# rather promote the use of multidimensional measurements instead of
# discarding data, so we make this somewhat chatty.
_LOG.warning(
'Overriding previous measurement %s value of %s with %s, the old '
'value will be lost. Use a dimensioned measurement if you need to '
'save multiple values.', self.name, self.stored_value, value)
if value is None:
_LOG.warning('Measurement %s is set to None', self.name)
self.stored_value = value
self._cached_value = data.convert_to_base_types(value)
self.is_value_set = True | python | def set(self, value):
"""Set the value for this measurement, with some sanity checks."""
if self.is_value_set:
# While we want to *allow* re-setting previously set measurements, we'd
# rather promote the use of multidimensional measurements instead of
# discarding data, so we make this somewhat chatty.
_LOG.warning(
'Overriding previous measurement %s value of %s with %s, the old '
'value will be lost. Use a dimensioned measurement if you need to '
'save multiple values.', self.name, self.stored_value, value)
if value is None:
_LOG.warning('Measurement %s is set to None', self.name)
self.stored_value = value
self._cached_value = data.convert_to_base_types(value)
self.is_value_set = True | [
"def",
"set",
"(",
"self",
",",
"value",
")",
":",
"if",
"self",
".",
"is_value_set",
":",
"# While we want to *allow* re-setting previously set measurements, we'd",
"# rather promote the use of multidimensional measurements instead of",
"# discarding data, so we make this somewhat cha... | Set the value for this measurement, with some sanity checks. | [
"Set",
"the",
"value",
"for",
"this",
"measurement",
"with",
"some",
"sanity",
"checks",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L354-L368 | train | 221,744 |
google/openhtf | openhtf/core/measurements.py | Dimension.from_string | def from_string(cls, string):
"""Convert a string into a Dimension"""
# Note: There is some ambiguity as to whether the string passed is intended
# to become a unit looked up by name or suffix, or a Dimension descriptor.
if string in units.UNITS_BY_ALL:
return cls(description=string, unit=units.Unit(string))
else:
return cls(description=string) | python | def from_string(cls, string):
"""Convert a string into a Dimension"""
# Note: There is some ambiguity as to whether the string passed is intended
# to become a unit looked up by name or suffix, or a Dimension descriptor.
if string in units.UNITS_BY_ALL:
return cls(description=string, unit=units.Unit(string))
else:
return cls(description=string) | [
"def",
"from_string",
"(",
"cls",
",",
"string",
")",
":",
"# Note: There is some ambiguity as to whether the string passed is intended",
"# to become a unit looked up by name or suffix, or a Dimension descriptor.",
"if",
"string",
"in",
"units",
".",
"UNITS_BY_ALL",
":",
"return",... | Convert a string into a Dimension | [
"Convert",
"a",
"string",
"into",
"a",
"Dimension"
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L404-L411 | train | 221,745 |
google/openhtf | openhtf/core/measurements.py | DimensionedMeasuredValue.value | def value(self):
"""The values stored in this record.
Returns:
A list of tuples; the last element of each tuple will be the measured
value, the other elements will be the associated coordinates. The tuples
are output in the order in which they were set.
"""
if not self.is_value_set:
raise MeasurementNotSetError('Measurement not yet set', self.name)
return [dimensions + (value,) for dimensions, value in
six.iteritems(self.value_dict)] | python | def value(self):
"""The values stored in this record.
Returns:
A list of tuples; the last element of each tuple will be the measured
value, the other elements will be the associated coordinates. The tuples
are output in the order in which they were set.
"""
if not self.is_value_set:
raise MeasurementNotSetError('Measurement not yet set', self.name)
return [dimensions + (value,) for dimensions, value in
six.iteritems(self.value_dict)] | [
"def",
"value",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_value_set",
":",
"raise",
"MeasurementNotSetError",
"(",
"'Measurement not yet set'",
",",
"self",
".",
"name",
")",
"return",
"[",
"dimensions",
"+",
"(",
"value",
",",
")",
"for",
"dime... | The values stored in this record.
Returns:
A list of tuples; the last element of each tuple will be the measured
value, the other elements will be the associated coordinates. The tuples
are output in the order in which they were set. | [
"The",
"values",
"stored",
"in",
"this",
"record",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L505-L516 | train | 221,746 |
google/openhtf | openhtf/core/measurements.py | DimensionedMeasuredValue.to_dataframe | def to_dataframe(self, columns=None):
"""Converts to a `pandas.DataFrame`"""
if not self.is_value_set:
raise ValueError('Value must be set before converting to a DataFrame.')
if not pandas:
raise RuntimeError('Install pandas to convert to pandas.DataFrame')
return pandas.DataFrame.from_records(self.value, columns=columns) | python | def to_dataframe(self, columns=None):
"""Converts to a `pandas.DataFrame`"""
if not self.is_value_set:
raise ValueError('Value must be set before converting to a DataFrame.')
if not pandas:
raise RuntimeError('Install pandas to convert to pandas.DataFrame')
return pandas.DataFrame.from_records(self.value, columns=columns) | [
"def",
"to_dataframe",
"(",
"self",
",",
"columns",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"is_value_set",
":",
"raise",
"ValueError",
"(",
"'Value must be set before converting to a DataFrame.'",
")",
"if",
"not",
"pandas",
":",
"raise",
"RuntimeError",... | Converts to a `pandas.DataFrame` | [
"Converts",
"to",
"a",
"pandas",
".",
"DataFrame"
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L525-L531 | train | 221,747 |
google/openhtf | openhtf/core/measurements.py | Collection._assert_valid_key | def _assert_valid_key(self, name):
"""Raises if name is not a valid measurement."""
if name not in self._measurements:
raise NotAMeasurementError('Not a measurement', name, self._measurements) | python | def _assert_valid_key(self, name):
"""Raises if name is not a valid measurement."""
if name not in self._measurements:
raise NotAMeasurementError('Not a measurement', name, self._measurements) | [
"def",
"_assert_valid_key",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_measurements",
":",
"raise",
"NotAMeasurementError",
"(",
"'Not a measurement'",
",",
"name",
",",
"self",
".",
"_measurements",
")"
] | Raises if name is not a valid measurement. | [
"Raises",
"if",
"name",
"is",
"not",
"a",
"valid",
"measurement",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/core/measurements.py#L579-L582 | train | 221,748 |
google/openhtf | openhtf/plugs/usb/adb_device.py | AdbDevice.install | def install(self, apk_path, destination_dir=None, timeout_ms=None):
"""Install apk to device.
Doesn't support verifier file, instead allows destination directory to be
overridden.
Arguments:
apk_path: Local path to apk to install.
destination_dir: Optional destination directory. Use /system/app/ for
persistent applications.
timeout_ms: Expected timeout for pushing and installing.
Returns:
The pm install output.
"""
if not destination_dir:
destination_dir = '/data/local/tmp/'
basename = os.path.basename(apk_path)
destination_path = destination_dir + basename
self.push(apk_path, destination_path, timeout_ms=timeout_ms)
return self.Shell('pm install -r "%s"' % destination_path,
timeout_ms=timeout_ms) | python | def install(self, apk_path, destination_dir=None, timeout_ms=None):
"""Install apk to device.
Doesn't support verifier file, instead allows destination directory to be
overridden.
Arguments:
apk_path: Local path to apk to install.
destination_dir: Optional destination directory. Use /system/app/ for
persistent applications.
timeout_ms: Expected timeout for pushing and installing.
Returns:
The pm install output.
"""
if not destination_dir:
destination_dir = '/data/local/tmp/'
basename = os.path.basename(apk_path)
destination_path = destination_dir + basename
self.push(apk_path, destination_path, timeout_ms=timeout_ms)
return self.Shell('pm install -r "%s"' % destination_path,
timeout_ms=timeout_ms) | [
"def",
"install",
"(",
"self",
",",
"apk_path",
",",
"destination_dir",
"=",
"None",
",",
"timeout_ms",
"=",
"None",
")",
":",
"if",
"not",
"destination_dir",
":",
"destination_dir",
"=",
"'/data/local/tmp/'",
"basename",
"=",
"os",
".",
"path",
".",
"basena... | Install apk to device.
Doesn't support verifier file, instead allows destination directory to be
overridden.
Arguments:
apk_path: Local path to apk to install.
destination_dir: Optional destination directory. Use /system/app/ for
persistent applications.
timeout_ms: Expected timeout for pushing and installing.
Returns:
The pm install output. | [
"Install",
"apk",
"to",
"device",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_device.py#L111-L132 | train | 221,749 |
google/openhtf | openhtf/plugs/usb/adb_device.py | AdbDevice.push | def push(self, source_file, device_filename, timeout_ms=None):
"""Push source_file to file on device.
Arguments:
source_file: Either a filename or file-like object to push to the device.
If a filename, will set the remote mtime to match the local mtime,
otherwise will use the current time.
device_filename: The filename on the device to write to.
timeout_ms: Expected timeout for any part of the push.
"""
mtime = 0
if isinstance(source_file, six.string_types):
mtime = os.path.getmtime(source_file)
source_file = open(source_file)
self.filesync_service.send(
source_file, device_filename, mtime=mtime,
timeout=timeouts.PolledTimeout.from_millis(timeout_ms)) | python | def push(self, source_file, device_filename, timeout_ms=None):
"""Push source_file to file on device.
Arguments:
source_file: Either a filename or file-like object to push to the device.
If a filename, will set the remote mtime to match the local mtime,
otherwise will use the current time.
device_filename: The filename on the device to write to.
timeout_ms: Expected timeout for any part of the push.
"""
mtime = 0
if isinstance(source_file, six.string_types):
mtime = os.path.getmtime(source_file)
source_file = open(source_file)
self.filesync_service.send(
source_file, device_filename, mtime=mtime,
timeout=timeouts.PolledTimeout.from_millis(timeout_ms)) | [
"def",
"push",
"(",
"self",
",",
"source_file",
",",
"device_filename",
",",
"timeout_ms",
"=",
"None",
")",
":",
"mtime",
"=",
"0",
"if",
"isinstance",
"(",
"source_file",
",",
"six",
".",
"string_types",
")",
":",
"mtime",
"=",
"os",
".",
"path",
"."... | Push source_file to file on device.
Arguments:
source_file: Either a filename or file-like object to push to the device.
If a filename, will set the remote mtime to match the local mtime,
otherwise will use the current time.
device_filename: The filename on the device to write to.
timeout_ms: Expected timeout for any part of the push. | [
"Push",
"source_file",
"to",
"file",
"on",
"device",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_device.py#L134-L151 | train | 221,750 |
google/openhtf | openhtf/plugs/usb/adb_device.py | AdbDevice.pull | def pull(self, device_filename, dest_file=None, timeout_ms=None):
"""Pull file from device.
Arguments:
device_filename: The filename on the device to pull.
dest_file: If set, a filename or writable file-like object.
timeout_ms: Expected timeout for the pull.
Returns:
The file data if dest_file is not set, None otherwise.
"""
should_return_data = dest_file is None
if isinstance(dest_file, six.string_types):
dest_file = open(dest_file, 'w')
elif dest_file is None:
dest_file = six.StringIO()
self.filesync_service.recv(device_filename, dest_file,
timeouts.PolledTimeout.from_millis(timeout_ms))
if should_return_data:
return dest_file.getvalue() | python | def pull(self, device_filename, dest_file=None, timeout_ms=None):
"""Pull file from device.
Arguments:
device_filename: The filename on the device to pull.
dest_file: If set, a filename or writable file-like object.
timeout_ms: Expected timeout for the pull.
Returns:
The file data if dest_file is not set, None otherwise.
"""
should_return_data = dest_file is None
if isinstance(dest_file, six.string_types):
dest_file = open(dest_file, 'w')
elif dest_file is None:
dest_file = six.StringIO()
self.filesync_service.recv(device_filename, dest_file,
timeouts.PolledTimeout.from_millis(timeout_ms))
if should_return_data:
return dest_file.getvalue() | [
"def",
"pull",
"(",
"self",
",",
"device_filename",
",",
"dest_file",
"=",
"None",
",",
"timeout_ms",
"=",
"None",
")",
":",
"should_return_data",
"=",
"dest_file",
"is",
"None",
"if",
"isinstance",
"(",
"dest_file",
",",
"six",
".",
"string_types",
")",
"... | Pull file from device.
Arguments:
device_filename: The filename on the device to pull.
dest_file: If set, a filename or writable file-like object.
timeout_ms: Expected timeout for the pull.
Returns:
The file data if dest_file is not set, None otherwise. | [
"Pull",
"file",
"from",
"device",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_device.py#L153-L172 | train | 221,751 |
google/openhtf | openhtf/plugs/usb/adb_device.py | AdbDevice.list | def list(self, device_path, timeout_ms=None):
"""Yield filesync_service.DeviceFileStat objects for directory contents."""
return self.filesync_service.list(
device_path, timeouts.PolledTimeout.from_millis(timeout_ms)) | python | def list(self, device_path, timeout_ms=None):
"""Yield filesync_service.DeviceFileStat objects for directory contents."""
return self.filesync_service.list(
device_path, timeouts.PolledTimeout.from_millis(timeout_ms)) | [
"def",
"list",
"(",
"self",
",",
"device_path",
",",
"timeout_ms",
"=",
"None",
")",
":",
"return",
"self",
".",
"filesync_service",
".",
"list",
"(",
"device_path",
",",
"timeouts",
".",
"PolledTimeout",
".",
"from_millis",
"(",
"timeout_ms",
")",
")"
] | Yield filesync_service.DeviceFileStat objects for directory contents. | [
"Yield",
"filesync_service",
".",
"DeviceFileStat",
"objects",
"for",
"directory",
"contents",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_device.py#L174-L177 | train | 221,752 |
google/openhtf | openhtf/plugs/usb/adb_device.py | AdbDevice._check_remote_command | def _check_remote_command(self, destination, timeout_ms, success_msgs=None):
"""Open a stream to destination, check for remote errors.
Used for reboot, remount, and root services. If this method returns, the
command was successful, otherwise an appropriate error will have been
raised.
Args:
destination: Stream destination to open.
timeout_ms: Timeout in milliseconds for the operation.
success_msgs: If provided, a list of messages that, if returned from the
device, indicate success, so don't treat them as errors.
Raises:
AdbRemoteError: If the remote command fails, will contain any message we
got back from the device.
AdbStreamUnavailableError: The service requested isn't supported.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
stream = self._adb_connection.open_stream(destination, timeout)
if not stream:
raise usb_exceptions.AdbStreamUnavailableError(
'Service %s not supported', destination)
try:
message = stream.read(timeout_ms=timeout)
# Some commands report success messages, ignore them.
if any([m in message for m in success_msgs]):
return
except usb_exceptions.CommonUsbError:
if destination.startswith('reboot:'):
# We expect this if the device is rebooting.
return
raise
raise usb_exceptions.AdbRemoteError('Device message: %s', message) | python | def _check_remote_command(self, destination, timeout_ms, success_msgs=None):
"""Open a stream to destination, check for remote errors.
Used for reboot, remount, and root services. If this method returns, the
command was successful, otherwise an appropriate error will have been
raised.
Args:
destination: Stream destination to open.
timeout_ms: Timeout in milliseconds for the operation.
success_msgs: If provided, a list of messages that, if returned from the
device, indicate success, so don't treat them as errors.
Raises:
AdbRemoteError: If the remote command fails, will contain any message we
got back from the device.
AdbStreamUnavailableError: The service requested isn't supported.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
stream = self._adb_connection.open_stream(destination, timeout)
if not stream:
raise usb_exceptions.AdbStreamUnavailableError(
'Service %s not supported', destination)
try:
message = stream.read(timeout_ms=timeout)
# Some commands report success messages, ignore them.
if any([m in message for m in success_msgs]):
return
except usb_exceptions.CommonUsbError:
if destination.startswith('reboot:'):
# We expect this if the device is rebooting.
return
raise
raise usb_exceptions.AdbRemoteError('Device message: %s', message) | [
"def",
"_check_remote_command",
"(",
"self",
",",
"destination",
",",
"timeout_ms",
",",
"success_msgs",
"=",
"None",
")",
":",
"timeout",
"=",
"timeouts",
".",
"PolledTimeout",
".",
"from_millis",
"(",
"timeout_ms",
")",
"stream",
"=",
"self",
".",
"_adb_conn... | Open a stream to destination, check for remote errors.
Used for reboot, remount, and root services. If this method returns, the
command was successful, otherwise an appropriate error will have been
raised.
Args:
destination: Stream destination to open.
timeout_ms: Timeout in milliseconds for the operation.
success_msgs: If provided, a list of messages that, if returned from the
device, indicate success, so don't treat them as errors.
Raises:
AdbRemoteError: If the remote command fails, will contain any message we
got back from the device.
AdbStreamUnavailableError: The service requested isn't supported. | [
"Open",
"a",
"stream",
"to",
"destination",
"check",
"for",
"remote",
"errors",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_device.py#L191-L224 | train | 221,753 |
google/openhtf | openhtf/util/multicast.py | send | def send(query,
address=DEFAULT_ADDRESS,
port=DEFAULT_PORT,
ttl=DEFAULT_TTL,
local_only=False,
timeout_s=2):
"""Sends a query to the given multicast socket and returns responses.
Args:
query: The string query to send.
address: Multicast IP address component of the socket to send to.
port: Multicast UDP port component of the socket to send to.
ttl: TTL for multicast messages. 1 to keep traffic in-network.
timeout_s: Seconds to wait for responses.
Returns: A set of all responses that arrived before the timeout expired.
Responses are tuples of (sender_address, message).
"""
# Set up the socket as a UDP Multicast socket with the given timeout.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
if local_only:
# Set outgoing interface to localhost to ensure no packets leave this host.
sock.setsockopt(
socket.IPPROTO_IP,
socket.IP_MULTICAST_IF,
struct.pack('!L', LOCALHOST_ADDRESS))
sock.settimeout(timeout_s)
sock.sendto(query.encode('utf-8'), (address, port))
# Set up our thread-safe Queue for handling responses.
recv_queue = queue.Queue()
def _handle_responses():
while True:
try:
data, address = sock.recvfrom(MAX_MESSAGE_BYTES)
data = data.decode('utf-8')
except socket.timeout:
recv_queue.put(None)
break
else:
_LOG.debug('Multicast response to query "%s": %s:%s',
query, address[0], data)
recv_queue.put((address[0], str(data)))
# Yield responses as they come in, giving up once timeout expires.
response_thread = threading.Thread(target=_handle_responses)
response_thread.start()
while response_thread.is_alive():
recv_tuple = recv_queue.get()
if not recv_tuple:
break
yield recv_tuple
response_thread.join() | python | def send(query,
address=DEFAULT_ADDRESS,
port=DEFAULT_PORT,
ttl=DEFAULT_TTL,
local_only=False,
timeout_s=2):
"""Sends a query to the given multicast socket and returns responses.
Args:
query: The string query to send.
address: Multicast IP address component of the socket to send to.
port: Multicast UDP port component of the socket to send to.
ttl: TTL for multicast messages. 1 to keep traffic in-network.
timeout_s: Seconds to wait for responses.
Returns: A set of all responses that arrived before the timeout expired.
Responses are tuples of (sender_address, message).
"""
# Set up the socket as a UDP Multicast socket with the given timeout.
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
if local_only:
# Set outgoing interface to localhost to ensure no packets leave this host.
sock.setsockopt(
socket.IPPROTO_IP,
socket.IP_MULTICAST_IF,
struct.pack('!L', LOCALHOST_ADDRESS))
sock.settimeout(timeout_s)
sock.sendto(query.encode('utf-8'), (address, port))
# Set up our thread-safe Queue for handling responses.
recv_queue = queue.Queue()
def _handle_responses():
while True:
try:
data, address = sock.recvfrom(MAX_MESSAGE_BYTES)
data = data.decode('utf-8')
except socket.timeout:
recv_queue.put(None)
break
else:
_LOG.debug('Multicast response to query "%s": %s:%s',
query, address[0], data)
recv_queue.put((address[0], str(data)))
# Yield responses as they come in, giving up once timeout expires.
response_thread = threading.Thread(target=_handle_responses)
response_thread.start()
while response_thread.is_alive():
recv_tuple = recv_queue.get()
if not recv_tuple:
break
yield recv_tuple
response_thread.join() | [
"def",
"send",
"(",
"query",
",",
"address",
"=",
"DEFAULT_ADDRESS",
",",
"port",
"=",
"DEFAULT_PORT",
",",
"ttl",
"=",
"DEFAULT_TTL",
",",
"local_only",
"=",
"False",
",",
"timeout_s",
"=",
"2",
")",
":",
"# Set up the socket as a UDP Multicast socket with the gi... | Sends a query to the given multicast socket and returns responses.
Args:
query: The string query to send.
address: Multicast IP address component of the socket to send to.
port: Multicast UDP port component of the socket to send to.
ttl: TTL for multicast messages. 1 to keep traffic in-network.
timeout_s: Seconds to wait for responses.
Returns: A set of all responses that arrived before the timeout expired.
Responses are tuples of (sender_address, message). | [
"Sends",
"a",
"query",
"to",
"the",
"given",
"multicast",
"socket",
"and",
"returns",
"responses",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/multicast.py#L135-L188 | train | 221,754 |
google/openhtf | openhtf/util/multicast.py | MulticastListener.run | def run(self):
"""Listen for pings until stopped."""
self._live = True
self._sock.settimeout(self.LISTEN_TIMEOUT_S)
# Passing in INADDR_ANY means the kernel will choose the default interface.
# The localhost address is used to receive messages sent in "local_only"
# mode and the default address is used to receive all other messages.
for interface_ip in (socket.INADDR_ANY, LOCALHOST_ADDRESS):
self._sock.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
# IP_ADD_MEMBERSHIP takes the 8-byte group address followed by the IP
# assigned to the interface on which to listen.
struct.pack('!4sL', socket.inet_aton(self.address), interface_ip))
if sys.platform == 'darwin':
self._sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEPORT,
1) # Allow multiple listeners to bind.
else:
self._sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1) # Allow multiple listeners to bind.
self._sock.bind((self.address, self.port))
while self._live:
try:
data, address = self._sock.recvfrom(MAX_MESSAGE_BYTES)
data = data.decode('utf-8')
log_line = 'Received multicast message from %s: %s' % (address, data)
response = self._callback(data)
if response is not None:
log_line += ', responding with %s bytes' % len(response)
# Send replies out-of-band instead of with the same multicast socket
# so that multiple processes on the same host can listen for
# requests and reply (if they all try to use the multicast socket
# to reply, they conflict and this sendto fails).
response = response.encode('utf-8')
socket.socket(socket.AF_INET, socket.SOCK_DGRAM).sendto(
response, address)
_LOG.debug(log_line)
except socket.timeout:
pass
except socket.error:
_LOG.debug('Error receiving multicast message', exc_info=True) | python | def run(self):
"""Listen for pings until stopped."""
self._live = True
self._sock.settimeout(self.LISTEN_TIMEOUT_S)
# Passing in INADDR_ANY means the kernel will choose the default interface.
# The localhost address is used to receive messages sent in "local_only"
# mode and the default address is used to receive all other messages.
for interface_ip in (socket.INADDR_ANY, LOCALHOST_ADDRESS):
self._sock.setsockopt(
socket.IPPROTO_IP,
socket.IP_ADD_MEMBERSHIP,
# IP_ADD_MEMBERSHIP takes the 8-byte group address followed by the IP
# assigned to the interface on which to listen.
struct.pack('!4sL', socket.inet_aton(self.address), interface_ip))
if sys.platform == 'darwin':
self._sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEPORT,
1) # Allow multiple listeners to bind.
else:
self._sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1) # Allow multiple listeners to bind.
self._sock.bind((self.address, self.port))
while self._live:
try:
data, address = self._sock.recvfrom(MAX_MESSAGE_BYTES)
data = data.decode('utf-8')
log_line = 'Received multicast message from %s: %s' % (address, data)
response = self._callback(data)
if response is not None:
log_line += ', responding with %s bytes' % len(response)
# Send replies out-of-band instead of with the same multicast socket
# so that multiple processes on the same host can listen for
# requests and reply (if they all try to use the multicast socket
# to reply, they conflict and this sendto fails).
response = response.encode('utf-8')
socket.socket(socket.AF_INET, socket.SOCK_DGRAM).sendto(
response, address)
_LOG.debug(log_line)
except socket.timeout:
pass
except socket.error:
_LOG.debug('Error receiving multicast message', exc_info=True) | [
"def",
"run",
"(",
"self",
")",
":",
"self",
".",
"_live",
"=",
"True",
"self",
".",
"_sock",
".",
"settimeout",
"(",
"self",
".",
"LISTEN_TIMEOUT_S",
")",
"# Passing in INADDR_ANY means the kernel will choose the default interface.",
"# The localhost address is used to r... | Listen for pings until stopped. | [
"Listen",
"for",
"pings",
"until",
"stopped",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/util/multicast.py#L87-L132 | train | 221,755 |
google/openhtf | openhtf/plugs/usb/fastboot_protocol.py | FastbootProtocol._handle_progress | def _handle_progress(self, total, progress_callback): # pylint: disable=no-self-use
"""Calls the callback with the current progress and total ."""
current = 0
while True:
current += yield
try:
progress_callback(current, total)
except Exception: # pylint: disable=broad-except
_LOG.exception('Progress callback raised an exception. %s',
progress_callback)
continue | python | def _handle_progress(self, total, progress_callback): # pylint: disable=no-self-use
"""Calls the callback with the current progress and total ."""
current = 0
while True:
current += yield
try:
progress_callback(current, total)
except Exception: # pylint: disable=broad-except
_LOG.exception('Progress callback raised an exception. %s',
progress_callback)
continue | [
"def",
"_handle_progress",
"(",
"self",
",",
"total",
",",
"progress_callback",
")",
":",
"# pylint: disable=no-self-use",
"current",
"=",
"0",
"while",
"True",
":",
"current",
"+=",
"yield",
"try",
":",
"progress_callback",
"(",
"current",
",",
"total",
")",
... | Calls the callback with the current progress and total . | [
"Calls",
"the",
"callback",
"with",
"the",
"current",
"progress",
"and",
"total",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/fastboot_protocol.py#L162-L172 | train | 221,756 |
google/openhtf | openhtf/plugs/usb/fastboot_protocol.py | FastbootCommands._simple_command | def _simple_command(self, command, arg=None, **kwargs):
"""Send a simple command."""
self._protocol.send_command(command, arg)
return self._protocol.handle_simple_responses(**kwargs) | python | def _simple_command(self, command, arg=None, **kwargs):
"""Send a simple command."""
self._protocol.send_command(command, arg)
return self._protocol.handle_simple_responses(**kwargs) | [
"def",
"_simple_command",
"(",
"self",
",",
"command",
",",
"arg",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_protocol",
".",
"send_command",
"(",
"command",
",",
"arg",
")",
"return",
"self",
".",
"_protocol",
".",
"handle_simple_res... | Send a simple command. | [
"Send",
"a",
"simple",
"command",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/fastboot_protocol.py#L210-L213 | train | 221,757 |
google/openhtf | openhtf/output/servers/dashboard_server.py | _discover | def _discover(**kwargs):
"""Yields info about station servers announcing themselves via multicast."""
query = station_server.MULTICAST_QUERY
for host, response in multicast.send(query, **kwargs):
try:
result = json.loads(response)
except ValueError:
_LOG.warn('Received bad JSON over multicast from %s: %s', host, response)
try:
yield StationInfo(result['cell'], host, result['port'],
result['station_id'], 'ONLINE',
result.get('test_description'),
result['test_name'])
except KeyError:
if 'last_activity_time_millis' in result:
_LOG.debug('Received old station API response on multicast. Ignoring.')
else:
_LOG.warn('Received bad multicast response from %s: %s', host, response) | python | def _discover(**kwargs):
"""Yields info about station servers announcing themselves via multicast."""
query = station_server.MULTICAST_QUERY
for host, response in multicast.send(query, **kwargs):
try:
result = json.loads(response)
except ValueError:
_LOG.warn('Received bad JSON over multicast from %s: %s', host, response)
try:
yield StationInfo(result['cell'], host, result['port'],
result['station_id'], 'ONLINE',
result.get('test_description'),
result['test_name'])
except KeyError:
if 'last_activity_time_millis' in result:
_LOG.debug('Received old station API response on multicast. Ignoring.')
else:
_LOG.warn('Received bad multicast response from %s: %s', host, response) | [
"def",
"_discover",
"(",
"*",
"*",
"kwargs",
")",
":",
"query",
"=",
"station_server",
".",
"MULTICAST_QUERY",
"for",
"host",
",",
"response",
"in",
"multicast",
".",
"send",
"(",
"query",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"result",
"=",
... | Yields info about station servers announcing themselves via multicast. | [
"Yields",
"info",
"about",
"station",
"servers",
"announcing",
"themselves",
"via",
"multicast",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/servers/dashboard_server.py#L35-L52 | train | 221,758 |
google/openhtf | openhtf/output/servers/dashboard_server.py | DashboardPubSub.update_stations | def update_stations(cls, station_info_list):
"""Called by the station discovery loop to update the station map."""
with cls.station_map_lock:
# By default, assume old stations are unreachable.
for host_port, station_info in six.iteritems(cls.station_map):
cls.station_map[host_port] = station_info._replace(status='UNREACHABLE')
for station_info in station_info_list:
host_port = '%s:%s' % (station_info.host, station_info.port)
cls.station_map[host_port] = station_info | python | def update_stations(cls, station_info_list):
"""Called by the station discovery loop to update the station map."""
with cls.station_map_lock:
# By default, assume old stations are unreachable.
for host_port, station_info in six.iteritems(cls.station_map):
cls.station_map[host_port] = station_info._replace(status='UNREACHABLE')
for station_info in station_info_list:
host_port = '%s:%s' % (station_info.host, station_info.port)
cls.station_map[host_port] = station_info | [
"def",
"update_stations",
"(",
"cls",
",",
"station_info_list",
")",
":",
"with",
"cls",
".",
"station_map_lock",
":",
"# By default, assume old stations are unreachable.",
"for",
"host_port",
",",
"station_info",
"in",
"six",
".",
"iteritems",
"(",
"cls",
".",
"sta... | Called by the station discovery loop to update the station map. | [
"Called",
"by",
"the",
"station",
"discovery",
"loop",
"to",
"update",
"the",
"station",
"map",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/servers/dashboard_server.py#L79-L89 | train | 221,759 |
google/openhtf | openhtf/output/servers/dashboard_server.py | DashboardPubSub.publish_if_new | def publish_if_new(cls):
"""If the station map has changed, publish the new information."""
message = cls.make_message()
if message != cls.last_message:
super(DashboardPubSub, cls).publish(message)
cls.last_message = message | python | def publish_if_new(cls):
"""If the station map has changed, publish the new information."""
message = cls.make_message()
if message != cls.last_message:
super(DashboardPubSub, cls).publish(message)
cls.last_message = message | [
"def",
"publish_if_new",
"(",
"cls",
")",
":",
"message",
"=",
"cls",
".",
"make_message",
"(",
")",
"if",
"message",
"!=",
"cls",
".",
"last_message",
":",
"super",
"(",
"DashboardPubSub",
",",
"cls",
")",
".",
"publish",
"(",
"message",
")",
"cls",
"... | If the station map has changed, publish the new information. | [
"If",
"the",
"station",
"map",
"has",
"changed",
"publish",
"the",
"new",
"information",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/servers/dashboard_server.py#L92-L97 | train | 221,760 |
google/openhtf | examples/phase_groups.py | run_basic_group | def run_basic_group():
"""Run the basic phase group example.
In this example, there are no terminal phases; all phases are run.
"""
test = htf.Test(htf.PhaseGroup(
setup=[setup_phase],
main=[main_phase],
teardown=[teardown_phase],
))
test.execute() | python | def run_basic_group():
"""Run the basic phase group example.
In this example, there are no terminal phases; all phases are run.
"""
test = htf.Test(htf.PhaseGroup(
setup=[setup_phase],
main=[main_phase],
teardown=[teardown_phase],
))
test.execute() | [
"def",
"run_basic_group",
"(",
")",
":",
"test",
"=",
"htf",
".",
"Test",
"(",
"htf",
".",
"PhaseGroup",
"(",
"setup",
"=",
"[",
"setup_phase",
"]",
",",
"main",
"=",
"[",
"main_phase",
"]",
",",
"teardown",
"=",
"[",
"teardown_phase",
"]",
",",
")",... | Run the basic phase group example.
In this example, there are no terminal phases; all phases are run. | [
"Run",
"the",
"basic",
"phase",
"group",
"example",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/examples/phase_groups.py#L54-L64 | train | 221,761 |
google/openhtf | examples/phase_groups.py | run_setup_error_group | def run_setup_error_group():
"""Run the phase group example where an error occurs in a setup phase.
The terminal setup phase shortcuts the test. The main phases are
skipped. The PhaseGroup is not entered, so the teardown phases are also
skipped.
"""
test = htf.Test(htf.PhaseGroup(
setup=[error_setup_phase],
main=[main_phase],
teardown=[teardown_phase],
))
test.execute() | python | def run_setup_error_group():
"""Run the phase group example where an error occurs in a setup phase.
The terminal setup phase shortcuts the test. The main phases are
skipped. The PhaseGroup is not entered, so the teardown phases are also
skipped.
"""
test = htf.Test(htf.PhaseGroup(
setup=[error_setup_phase],
main=[main_phase],
teardown=[teardown_phase],
))
test.execute() | [
"def",
"run_setup_error_group",
"(",
")",
":",
"test",
"=",
"htf",
".",
"Test",
"(",
"htf",
".",
"PhaseGroup",
"(",
"setup",
"=",
"[",
"error_setup_phase",
"]",
",",
"main",
"=",
"[",
"main_phase",
"]",
",",
"teardown",
"=",
"[",
"teardown_phase",
"]",
... | Run the phase group example where an error occurs in a setup phase.
The terminal setup phase shortcuts the test. The main phases are
skipped. The PhaseGroup is not entered, so the teardown phases are also
skipped. | [
"Run",
"the",
"phase",
"group",
"example",
"where",
"an",
"error",
"occurs",
"in",
"a",
"setup",
"phase",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/examples/phase_groups.py#L67-L79 | train | 221,762 |
google/openhtf | examples/phase_groups.py | run_main_error_group | def run_main_error_group():
"""Run the phase group example where an error occurs in a main phase.
The main phase in this example is terminal. The PhaseGroup was entered
because the setup phases ran without error, so the teardown phases are run.
The other main phase is skipped.
"""
test = htf.Test(htf.PhaseGroup(
setup=[setup_phase],
main=[error_main_phase, main_phase],
teardown=[teardown_phase],
))
test.execute() | python | def run_main_error_group():
"""Run the phase group example where an error occurs in a main phase.
The main phase in this example is terminal. The PhaseGroup was entered
because the setup phases ran without error, so the teardown phases are run.
The other main phase is skipped.
"""
test = htf.Test(htf.PhaseGroup(
setup=[setup_phase],
main=[error_main_phase, main_phase],
teardown=[teardown_phase],
))
test.execute() | [
"def",
"run_main_error_group",
"(",
")",
":",
"test",
"=",
"htf",
".",
"Test",
"(",
"htf",
".",
"PhaseGroup",
"(",
"setup",
"=",
"[",
"setup_phase",
"]",
",",
"main",
"=",
"[",
"error_main_phase",
",",
"main_phase",
"]",
",",
"teardown",
"=",
"[",
"tea... | Run the phase group example where an error occurs in a main phase.
The main phase in this example is terminal. The PhaseGroup was entered
because the setup phases ran without error, so the teardown phases are run.
The other main phase is skipped. | [
"Run",
"the",
"phase",
"group",
"example",
"where",
"an",
"error",
"occurs",
"in",
"a",
"main",
"phase",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/examples/phase_groups.py#L82-L94 | train | 221,763 |
google/openhtf | examples/phase_groups.py | run_nested_groups | def run_nested_groups():
"""Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase
"""
test = htf.Test(
htf.PhaseGroup(
main=[
main_phase,
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
inner_main_phase),
],
teardown=[teardown_phase]
)
)
test.execute() | python | def run_nested_groups():
"""Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase
"""
test = htf.Test(
htf.PhaseGroup(
main=[
main_phase,
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
inner_main_phase),
],
teardown=[teardown_phase]
)
)
test.execute() | [
"def",
"run_nested_groups",
"(",
")",
":",
"test",
"=",
"htf",
".",
"Test",
"(",
"htf",
".",
"PhaseGroup",
"(",
"main",
"=",
"[",
"main_phase",
",",
"htf",
".",
"PhaseGroup",
".",
"with_teardown",
"(",
"inner_teardown_phase",
")",
"(",
"inner_main_phase",
... | Run the nested groups example.
This example shows a PhaseGroup in a PhaseGroup. No phase is terminal, so all
are run in the order;
main_phase
inner_main_phase
inner_teardown_phase
teardown_phase | [
"Run",
"the",
"nested",
"groups",
"example",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/examples/phase_groups.py#L97-L117 | train | 221,764 |
google/openhtf | examples/phase_groups.py | run_nested_error_groups | def run_nested_error_groups():
"""Run nested groups example where an error occurs in nested main phase.
In this example, the first main phase in the nested PhaseGroup errors out.
The other inner main phase is skipped, as is the outer main phase. Both
PhaseGroups were entered, so both teardown phases are run.
"""
test = htf.Test(
htf.PhaseGroup(
main=[
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
error_main_phase, main_phase),
main_phase,
],
teardown=[teardown_phase],
)
)
test.execute() | python | def run_nested_error_groups():
"""Run nested groups example where an error occurs in nested main phase.
In this example, the first main phase in the nested PhaseGroup errors out.
The other inner main phase is skipped, as is the outer main phase. Both
PhaseGroups were entered, so both teardown phases are run.
"""
test = htf.Test(
htf.PhaseGroup(
main=[
htf.PhaseGroup.with_teardown(inner_teardown_phase)(
error_main_phase, main_phase),
main_phase,
],
teardown=[teardown_phase],
)
)
test.execute() | [
"def",
"run_nested_error_groups",
"(",
")",
":",
"test",
"=",
"htf",
".",
"Test",
"(",
"htf",
".",
"PhaseGroup",
"(",
"main",
"=",
"[",
"htf",
".",
"PhaseGroup",
".",
"with_teardown",
"(",
"inner_teardown_phase",
")",
"(",
"error_main_phase",
",",
"main_phas... | Run nested groups example where an error occurs in nested main phase.
In this example, the first main phase in the nested PhaseGroup errors out.
The other inner main phase is skipped, as is the outer main phase. Both
PhaseGroups were entered, so both teardown phases are run. | [
"Run",
"nested",
"groups",
"example",
"where",
"an",
"error",
"occurs",
"in",
"nested",
"main",
"phase",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/examples/phase_groups.py#L120-L137 | train | 221,765 |
google/openhtf | bin/units_from_xls.py | main | def main():
"""Main entry point for UNECE code .xls parsing."""
parser = argparse.ArgumentParser(
description='Reads in a .xls file and generates a units module for '
'OpenHTF.',
prog='python units_from_xls.py')
parser.add_argument('xlsfile', type=str,
help='the .xls file to parse')
parser.add_argument(
'--outfile',
type=str,
default=os.path.join(os.path.dirname(__file__), os.path.pardir,
'openhtf','util', 'units.py'),
help='where to put the generated .py file.')
args = parser.parse_args()
if not os.path.exists(args.xlsfile):
print('Unable to locate the file "%s".' % args.xlsfile)
parser.print_help()
sys.exit()
unit_defs = unit_defs_from_sheet(
xlrd.open_workbook(args.xlsfile).sheet_by_name(SHEET_NAME),
COLUMN_NAMES)
_, tmp_path = mkstemp()
with open(tmp_path, 'w') as new_file:
new_file.write(PRE)
new_file.writelines(
[line.encode('utf8', 'replace') for line in unit_defs])
new_file.write(POST)
new_file.flush()
os.remove(args.outfile)
shutil.move(tmp_path, args.outfile) | python | def main():
"""Main entry point for UNECE code .xls parsing."""
parser = argparse.ArgumentParser(
description='Reads in a .xls file and generates a units module for '
'OpenHTF.',
prog='python units_from_xls.py')
parser.add_argument('xlsfile', type=str,
help='the .xls file to parse')
parser.add_argument(
'--outfile',
type=str,
default=os.path.join(os.path.dirname(__file__), os.path.pardir,
'openhtf','util', 'units.py'),
help='where to put the generated .py file.')
args = parser.parse_args()
if not os.path.exists(args.xlsfile):
print('Unable to locate the file "%s".' % args.xlsfile)
parser.print_help()
sys.exit()
unit_defs = unit_defs_from_sheet(
xlrd.open_workbook(args.xlsfile).sheet_by_name(SHEET_NAME),
COLUMN_NAMES)
_, tmp_path = mkstemp()
with open(tmp_path, 'w') as new_file:
new_file.write(PRE)
new_file.writelines(
[line.encode('utf8', 'replace') for line in unit_defs])
new_file.write(POST)
new_file.flush()
os.remove(args.outfile)
shutil.move(tmp_path, args.outfile) | [
"def",
"main",
"(",
")",
":",
"parser",
"=",
"argparse",
".",
"ArgumentParser",
"(",
"description",
"=",
"'Reads in a .xls file and generates a units module for '",
"'OpenHTF.'",
",",
"prog",
"=",
"'python units_from_xls.py'",
")",
"parser",
".",
"add_argument",
"(",
... | Main entry point for UNECE code .xls parsing. | [
"Main",
"entry",
"point",
"for",
"UNECE",
"code",
".",
"xls",
"parsing",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/bin/units_from_xls.py#L169-L203 | train | 221,766 |
google/openhtf | bin/units_from_xls.py | unit_defs_from_sheet | def unit_defs_from_sheet(sheet, column_names):
"""A generator that parses a worksheet containing UNECE code definitions.
Args:
sheet: An xldr.sheet object representing a UNECE code worksheet.
column_names: A list/tuple with the expected column names corresponding to
the unit name, code and suffix in that order.
Yields: Lines of Python source code that define OpenHTF Unit objects.
"""
seen = set()
try:
col_indices = {}
rows = sheet.get_rows()
# Find the indices for the columns we care about.
for idx, cell in enumerate(six.next(rows)):
if cell.value in column_names:
col_indices[cell.value] = idx
# loop over all remaining rows and pull out units.
for row in rows:
name = row[col_indices[column_names[0]]].value.replace("'", r'\'')
code = row[col_indices[column_names[1]]].value
suffix = row[col_indices[column_names[2]]].value.replace("'", r'\'')
key = unit_key_from_name(name)
if key in seen:
continue
seen.add(key)
# Split on ' or ' to support the units like '% or pct'
for suffix in suffix.split(' or '):
yield "%s = UnitDescriptor('%s', '%s', '''%s''')\n" % (
key, name, code, suffix)
yield "ALL_UNITS.append(%s)\n" % key
except xlrd.XLRDError:
sys.stdout.write('Unable to process the .xls file.') | python | def unit_defs_from_sheet(sheet, column_names):
"""A generator that parses a worksheet containing UNECE code definitions.
Args:
sheet: An xldr.sheet object representing a UNECE code worksheet.
column_names: A list/tuple with the expected column names corresponding to
the unit name, code and suffix in that order.
Yields: Lines of Python source code that define OpenHTF Unit objects.
"""
seen = set()
try:
col_indices = {}
rows = sheet.get_rows()
# Find the indices for the columns we care about.
for idx, cell in enumerate(six.next(rows)):
if cell.value in column_names:
col_indices[cell.value] = idx
# loop over all remaining rows and pull out units.
for row in rows:
name = row[col_indices[column_names[0]]].value.replace("'", r'\'')
code = row[col_indices[column_names[1]]].value
suffix = row[col_indices[column_names[2]]].value.replace("'", r'\'')
key = unit_key_from_name(name)
if key in seen:
continue
seen.add(key)
# Split on ' or ' to support the units like '% or pct'
for suffix in suffix.split(' or '):
yield "%s = UnitDescriptor('%s', '%s', '''%s''')\n" % (
key, name, code, suffix)
yield "ALL_UNITS.append(%s)\n" % key
except xlrd.XLRDError:
sys.stdout.write('Unable to process the .xls file.') | [
"def",
"unit_defs_from_sheet",
"(",
"sheet",
",",
"column_names",
")",
":",
"seen",
"=",
"set",
"(",
")",
"try",
":",
"col_indices",
"=",
"{",
"}",
"rows",
"=",
"sheet",
".",
"get_rows",
"(",
")",
"# Find the indices for the columns we care about.",
"for",
"id... | A generator that parses a worksheet containing UNECE code definitions.
Args:
sheet: An xldr.sheet object representing a UNECE code worksheet.
column_names: A list/tuple with the expected column names corresponding to
the unit name, code and suffix in that order.
Yields: Lines of Python source code that define OpenHTF Unit objects. | [
"A",
"generator",
"that",
"parses",
"a",
"worksheet",
"containing",
"UNECE",
"code",
"definitions",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/bin/units_from_xls.py#L206-L242 | train | 221,767 |
google/openhtf | bin/units_from_xls.py | unit_key_from_name | def unit_key_from_name(name):
"""Return a legal python name for the given name for use as a unit key."""
result = name
for old, new in six.iteritems(UNIT_KEY_REPLACEMENTS):
result = result.replace(old, new)
# Collapse redundant underscores and convert to uppercase.
result = re.sub(r'_+', '_', result.upper())
return result | python | def unit_key_from_name(name):
"""Return a legal python name for the given name for use as a unit key."""
result = name
for old, new in six.iteritems(UNIT_KEY_REPLACEMENTS):
result = result.replace(old, new)
# Collapse redundant underscores and convert to uppercase.
result = re.sub(r'_+', '_', result.upper())
return result | [
"def",
"unit_key_from_name",
"(",
"name",
")",
":",
"result",
"=",
"name",
"for",
"old",
",",
"new",
"in",
"six",
".",
"iteritems",
"(",
"UNIT_KEY_REPLACEMENTS",
")",
":",
"result",
"=",
"result",
".",
"replace",
"(",
"old",
",",
"new",
")",
"# Collapse ... | Return a legal python name for the given name for use as a unit key. | [
"Return",
"a",
"legal",
"python",
"name",
"for",
"the",
"given",
"name",
"for",
"use",
"as",
"a",
"unit",
"key",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/bin/units_from_xls.py#L245-L255 | train | 221,768 |
google/openhtf | openhtf/plugs/usb/adb_message.py | make_wire_commands | def make_wire_commands(*ids):
"""Assemble the commands."""
cmd_to_wire = {
cmd: sum(ord(c) << (i * 8) for i, c in enumerate(cmd)) for cmd in ids
}
wire_to_cmd = {wire: cmd for cmd, wire in six.iteritems(cmd_to_wire)}
return cmd_to_wire, wire_to_cmd | python | def make_wire_commands(*ids):
"""Assemble the commands."""
cmd_to_wire = {
cmd: sum(ord(c) << (i * 8) for i, c in enumerate(cmd)) for cmd in ids
}
wire_to_cmd = {wire: cmd for cmd, wire in six.iteritems(cmd_to_wire)}
return cmd_to_wire, wire_to_cmd | [
"def",
"make_wire_commands",
"(",
"*",
"ids",
")",
":",
"cmd_to_wire",
"=",
"{",
"cmd",
":",
"sum",
"(",
"ord",
"(",
"c",
")",
"<<",
"(",
"i",
"*",
"8",
")",
"for",
"i",
",",
"c",
"in",
"enumerate",
"(",
"cmd",
")",
")",
"for",
"cmd",
"in",
"... | Assemble the commands. | [
"Assemble",
"the",
"commands",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_message.py#L55-L61 | train | 221,769 |
google/openhtf | openhtf/plugs/usb/adb_message.py | RawAdbMessage.to_adb_message | def to_adb_message(self, data):
"""Turn the data into an ADB message."""
message = AdbMessage(AdbMessage.WIRE_TO_CMD.get(self.cmd),
self.arg0, self.arg1, data)
if (len(data) != self.data_length or
message.data_crc32 != self.data_checksum):
raise usb_exceptions.AdbDataIntegrityError(
'%s (%s) received invalid data: %s', message, self, repr(data))
return message | python | def to_adb_message(self, data):
"""Turn the data into an ADB message."""
message = AdbMessage(AdbMessage.WIRE_TO_CMD.get(self.cmd),
self.arg0, self.arg1, data)
if (len(data) != self.data_length or
message.data_crc32 != self.data_checksum):
raise usb_exceptions.AdbDataIntegrityError(
'%s (%s) received invalid data: %s', message, self, repr(data))
return message | [
"def",
"to_adb_message",
"(",
"self",
",",
"data",
")",
":",
"message",
"=",
"AdbMessage",
"(",
"AdbMessage",
".",
"WIRE_TO_CMD",
".",
"get",
"(",
"self",
".",
"cmd",
")",
",",
"self",
".",
"arg0",
",",
"self",
".",
"arg1",
",",
"data",
")",
"if",
... | Turn the data into an ADB message. | [
"Turn",
"the",
"data",
"into",
"an",
"ADB",
"message",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_message.py#L70-L78 | train | 221,770 |
google/openhtf | openhtf/plugs/usb/adb_message.py | AdbTransportAdapter.write_message | def write_message(self, message, timeout):
"""Send the given message over this transport.
Args:
message: The AdbMessage to send.
timeout: Use this timeout for the entire write operation, it should be an
instance of timeouts.PolledTimeout.
"""
with self._writer_lock:
self._transport.write(message.header, timeout.remaining_ms)
# Use any remaining time to send the data. Note that if we get this far,
# we always at least try to send the data (with a minimum of 10ms timeout)
# because we don't want the remote end to get out of sync because we sent
# a header but no data.
if timeout.has_expired():
_LOG.warning('Timed out between AdbMessage header and data, sending '
'data anyway with 10ms timeout')
timeout = timeouts.PolledTimeout.from_millis(10)
self._transport.write(message.data, timeout.remaining_ms) | python | def write_message(self, message, timeout):
"""Send the given message over this transport.
Args:
message: The AdbMessage to send.
timeout: Use this timeout for the entire write operation, it should be an
instance of timeouts.PolledTimeout.
"""
with self._writer_lock:
self._transport.write(message.header, timeout.remaining_ms)
# Use any remaining time to send the data. Note that if we get this far,
# we always at least try to send the data (with a minimum of 10ms timeout)
# because we don't want the remote end to get out of sync because we sent
# a header but no data.
if timeout.has_expired():
_LOG.warning('Timed out between AdbMessage header and data, sending '
'data anyway with 10ms timeout')
timeout = timeouts.PolledTimeout.from_millis(10)
self._transport.write(message.data, timeout.remaining_ms) | [
"def",
"write_message",
"(",
"self",
",",
"message",
",",
"timeout",
")",
":",
"with",
"self",
".",
"_writer_lock",
":",
"self",
".",
"_transport",
".",
"write",
"(",
"message",
".",
"header",
",",
"timeout",
".",
"remaining_ms",
")",
"# Use any remaining ti... | Send the given message over this transport.
Args:
message: The AdbMessage to send.
timeout: Use this timeout for the entire write operation, it should be an
instance of timeouts.PolledTimeout. | [
"Send",
"the",
"given",
"message",
"over",
"this",
"transport",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_message.py#L109-L128 | train | 221,771 |
google/openhtf | openhtf/plugs/usb/adb_message.py | AdbTransportAdapter.read_message | def read_message(self, timeout):
"""Read an AdbMessage from this transport.
Args:
timeout: Timeout for the entire read operation, in the form of a
timeouts.PolledTimeout instance. Note that for packets with a data
payload, two USB reads are performed.
Returns:
The ADB message read from the device.
Raises:
UsbReadFailedError: There's an error during read, including timeout.
AdbProtocolError: A message is incorrectly formatted.
AdbTimeoutError: timeout is already expired, or expires before we read the
entire message, specifically between reading header and data packets.
"""
with self._reader_lock:
raw_header = self._transport.read(
struct.calcsize(AdbMessage.HEADER_STRUCT_FORMAT),
timeout.remaining_ms)
if not raw_header:
raise usb_exceptions.AdbProtocolError('Adb connection lost')
try:
raw_message = RawAdbMessage(*struct.unpack(
AdbMessage.HEADER_STRUCT_FORMAT, raw_header))
except struct.error as exception:
raise usb_exceptions.AdbProtocolError(
'Unable to unpack ADB command (%s): %s (%s)',
AdbMessage.HEADER_STRUCT_FORMAT, raw_header, exception)
if raw_message.data_length > 0:
if timeout.has_expired():
_LOG.warning('Timed out between AdbMessage header and data, reading '
'data anyway with 10ms timeout')
timeout = timeouts.PolledTimeout.from_millis(10)
data = self._transport.read(raw_message.data_length,
timeout.remaining_ms)
else:
data = ''
return raw_message.to_adb_message(data) | python | def read_message(self, timeout):
"""Read an AdbMessage from this transport.
Args:
timeout: Timeout for the entire read operation, in the form of a
timeouts.PolledTimeout instance. Note that for packets with a data
payload, two USB reads are performed.
Returns:
The ADB message read from the device.
Raises:
UsbReadFailedError: There's an error during read, including timeout.
AdbProtocolError: A message is incorrectly formatted.
AdbTimeoutError: timeout is already expired, or expires before we read the
entire message, specifically between reading header and data packets.
"""
with self._reader_lock:
raw_header = self._transport.read(
struct.calcsize(AdbMessage.HEADER_STRUCT_FORMAT),
timeout.remaining_ms)
if not raw_header:
raise usb_exceptions.AdbProtocolError('Adb connection lost')
try:
raw_message = RawAdbMessage(*struct.unpack(
AdbMessage.HEADER_STRUCT_FORMAT, raw_header))
except struct.error as exception:
raise usb_exceptions.AdbProtocolError(
'Unable to unpack ADB command (%s): %s (%s)',
AdbMessage.HEADER_STRUCT_FORMAT, raw_header, exception)
if raw_message.data_length > 0:
if timeout.has_expired():
_LOG.warning('Timed out between AdbMessage header and data, reading '
'data anyway with 10ms timeout')
timeout = timeouts.PolledTimeout.from_millis(10)
data = self._transport.read(raw_message.data_length,
timeout.remaining_ms)
else:
data = ''
return raw_message.to_adb_message(data) | [
"def",
"read_message",
"(",
"self",
",",
"timeout",
")",
":",
"with",
"self",
".",
"_reader_lock",
":",
"raw_header",
"=",
"self",
".",
"_transport",
".",
"read",
"(",
"struct",
".",
"calcsize",
"(",
"AdbMessage",
".",
"HEADER_STRUCT_FORMAT",
")",
",",
"ti... | Read an AdbMessage from this transport.
Args:
timeout: Timeout for the entire read operation, in the form of a
timeouts.PolledTimeout instance. Note that for packets with a data
payload, two USB reads are performed.
Returns:
The ADB message read from the device.
Raises:
UsbReadFailedError: There's an error during read, including timeout.
AdbProtocolError: A message is incorrectly formatted.
AdbTimeoutError: timeout is already expired, or expires before we read the
entire message, specifically between reading header and data packets. | [
"Read",
"an",
"AdbMessage",
"from",
"this",
"transport",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_message.py#L130-L172 | train | 221,772 |
google/openhtf | openhtf/plugs/usb/adb_message.py | AdbTransportAdapter.read_until | def read_until(self, expected_commands, timeout):
"""Read AdbMessages from this transport until we get an expected command.
The ADB protocol specifies that before a successful CNXN handshake, any
other packets must be ignored, so this method provides the ability to
ignore unwanted commands. It's primarily used during the initial
connection to the device. See Read() for more details, including more
exceptions that may be raised.
Args:
expected_commands: Iterable of expected command responses, like
('CNXN', 'AUTH').
timeout: timeouts.PolledTimeout object to use for timeout.
Returns:
The ADB message received that matched one of expected_commands.
Raises:
AdbProtocolError: If timeout expires between reads, this can happen
if we are getting spammed with unexpected commands.
"""
msg = timeouts.loop_until_timeout_or_valid(
timeout, lambda: self.read_message(timeout),
lambda m: m.command in expected_commands, 0)
if msg.command not in expected_commands:
raise usb_exceptions.AdbTimeoutError(
'Timed out establishing connection, waiting for: %s',
expected_commands)
return msg | python | def read_until(self, expected_commands, timeout):
"""Read AdbMessages from this transport until we get an expected command.
The ADB protocol specifies that before a successful CNXN handshake, any
other packets must be ignored, so this method provides the ability to
ignore unwanted commands. It's primarily used during the initial
connection to the device. See Read() for more details, including more
exceptions that may be raised.
Args:
expected_commands: Iterable of expected command responses, like
('CNXN', 'AUTH').
timeout: timeouts.PolledTimeout object to use for timeout.
Returns:
The ADB message received that matched one of expected_commands.
Raises:
AdbProtocolError: If timeout expires between reads, this can happen
if we are getting spammed with unexpected commands.
"""
msg = timeouts.loop_until_timeout_or_valid(
timeout, lambda: self.read_message(timeout),
lambda m: m.command in expected_commands, 0)
if msg.command not in expected_commands:
raise usb_exceptions.AdbTimeoutError(
'Timed out establishing connection, waiting for: %s',
expected_commands)
return msg | [
"def",
"read_until",
"(",
"self",
",",
"expected_commands",
",",
"timeout",
")",
":",
"msg",
"=",
"timeouts",
".",
"loop_until_timeout_or_valid",
"(",
"timeout",
",",
"lambda",
":",
"self",
".",
"read_message",
"(",
"timeout",
")",
",",
"lambda",
"m",
":",
... | Read AdbMessages from this transport until we get an expected command.
The ADB protocol specifies that before a successful CNXN handshake, any
other packets must be ignored, so this method provides the ability to
ignore unwanted commands. It's primarily used during the initial
connection to the device. See Read() for more details, including more
exceptions that may be raised.
Args:
expected_commands: Iterable of expected command responses, like
('CNXN', 'AUTH').
timeout: timeouts.PolledTimeout object to use for timeout.
Returns:
The ADB message received that matched one of expected_commands.
Raises:
AdbProtocolError: If timeout expires between reads, this can happen
if we are getting spammed with unexpected commands. | [
"Read",
"AdbMessages",
"from",
"this",
"transport",
"until",
"we",
"get",
"an",
"expected",
"command",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_message.py#L174-L202 | train | 221,773 |
google/openhtf | openhtf/plugs/usb/adb_message.py | AdbMessage.header | def header(self):
"""The message header."""
return struct.pack(
self.HEADER_STRUCT_FORMAT, self._command, self.arg0, self.arg1,
len(self.data), self.data_crc32, self.magic) | python | def header(self):
"""The message header."""
return struct.pack(
self.HEADER_STRUCT_FORMAT, self._command, self.arg0, self.arg1,
len(self.data), self.data_crc32, self.magic) | [
"def",
"header",
"(",
"self",
")",
":",
"return",
"struct",
".",
"pack",
"(",
"self",
".",
"HEADER_STRUCT_FORMAT",
",",
"self",
".",
"_command",
",",
"self",
".",
"arg0",
",",
"self",
".",
"arg1",
",",
"len",
"(",
"self",
".",
"data",
")",
",",
"se... | The message header. | [
"The",
"message",
"header",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_message.py#L276-L280 | train | 221,774 |
google/openhtf | openhtf/plugs/usb/filesync_service.py | _make_message_type | def _make_message_type(name, attributes, has_data=True):
"""Make a message type for the AdbTransport subclasses."""
def assert_command_is(self, command): # pylint: disable=invalid-name
"""Assert that a message's command matches the given command."""
if self.command != command:
raise usb_exceptions.AdbProtocolError(
'Expected %s command, received %s', command, self)
return type(name, (collections.namedtuple(name, attributes),),
{
'assert_command_is': assert_command_is,
'has_data': has_data,
# Struct format on the wire has an unsigned int for each attr.
'struct_format': '<%sI' % len(attributes.split()),
}) | python | def _make_message_type(name, attributes, has_data=True):
"""Make a message type for the AdbTransport subclasses."""
def assert_command_is(self, command): # pylint: disable=invalid-name
"""Assert that a message's command matches the given command."""
if self.command != command:
raise usb_exceptions.AdbProtocolError(
'Expected %s command, received %s', command, self)
return type(name, (collections.namedtuple(name, attributes),),
{
'assert_command_is': assert_command_is,
'has_data': has_data,
# Struct format on the wire has an unsigned int for each attr.
'struct_format': '<%sI' % len(attributes.split()),
}) | [
"def",
"_make_message_type",
"(",
"name",
",",
"attributes",
",",
"has_data",
"=",
"True",
")",
":",
"def",
"assert_command_is",
"(",
"self",
",",
"command",
")",
":",
"# pylint: disable=invalid-name",
"\"\"\"Assert that a message's command matches the given command.\"\"\""... | Make a message type for the AdbTransport subclasses. | [
"Make",
"a",
"message",
"type",
"for",
"the",
"AdbTransport",
"subclasses",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L130-L145 | train | 221,775 |
google/openhtf | openhtf/plugs/usb/filesync_service.py | FilesyncService.stat | def stat(self, filename, timeout=None):
"""Return device file stat."""
transport = StatFilesyncTransport(self.stream)
transport.write_data('STAT', filename, timeout)
stat_msg = transport.read_message(timeout)
stat_msg.assert_command_is('STAT')
return DeviceFileStat(filename, stat_msg.mode, stat_msg.size, stat_msg.time) | python | def stat(self, filename, timeout=None):
"""Return device file stat."""
transport = StatFilesyncTransport(self.stream)
transport.write_data('STAT', filename, timeout)
stat_msg = transport.read_message(timeout)
stat_msg.assert_command_is('STAT')
return DeviceFileStat(filename, stat_msg.mode, stat_msg.size, stat_msg.time) | [
"def",
"stat",
"(",
"self",
",",
"filename",
",",
"timeout",
"=",
"None",
")",
":",
"transport",
"=",
"StatFilesyncTransport",
"(",
"self",
".",
"stream",
")",
"transport",
".",
"write_data",
"(",
"'STAT'",
",",
"filename",
",",
"timeout",
")",
"stat_msg",... | Return device file stat. | [
"Return",
"device",
"file",
"stat",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L173-L179 | train | 221,776 |
google/openhtf | openhtf/plugs/usb/filesync_service.py | FilesyncService.list | def list(self, path, timeout=None):
"""List directory contents on the device.
Args:
path: List the contents of this directory.
timeout: Timeout to use for this operation.
Returns:
Generator yielding DeviceFileStat tuples representing the contents of
the requested path.
"""
transport = DentFilesyncTransport(self.stream)
transport.write_data('LIST', path, timeout)
return (DeviceFileStat(dent_msg.name, dent_msg.mode,
dent_msg.size, dent_msg.time) for dent_msg in
transport.read_until_done('DENT', timeout)) | python | def list(self, path, timeout=None):
"""List directory contents on the device.
Args:
path: List the contents of this directory.
timeout: Timeout to use for this operation.
Returns:
Generator yielding DeviceFileStat tuples representing the contents of
the requested path.
"""
transport = DentFilesyncTransport(self.stream)
transport.write_data('LIST', path, timeout)
return (DeviceFileStat(dent_msg.name, dent_msg.mode,
dent_msg.size, dent_msg.time) for dent_msg in
transport.read_until_done('DENT', timeout)) | [
"def",
"list",
"(",
"self",
",",
"path",
",",
"timeout",
"=",
"None",
")",
":",
"transport",
"=",
"DentFilesyncTransport",
"(",
"self",
".",
"stream",
")",
"transport",
".",
"write_data",
"(",
"'LIST'",
",",
"path",
",",
"timeout",
")",
"return",
"(",
... | List directory contents on the device.
Args:
path: List the contents of this directory.
timeout: Timeout to use for this operation.
Returns:
Generator yielding DeviceFileStat tuples representing the contents of
the requested path. | [
"List",
"directory",
"contents",
"on",
"the",
"device",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L181-L196 | train | 221,777 |
google/openhtf | openhtf/plugs/usb/filesync_service.py | FilesyncService.recv | def recv(self, filename, dest_file, timeout=None):
"""Retrieve a file from the device into the file-like dest_file."""
transport = DataFilesyncTransport(self.stream)
transport.write_data('RECV', filename, timeout)
for data_msg in transport.read_until_done('DATA', timeout):
dest_file.write(data_msg.data) | python | def recv(self, filename, dest_file, timeout=None):
"""Retrieve a file from the device into the file-like dest_file."""
transport = DataFilesyncTransport(self.stream)
transport.write_data('RECV', filename, timeout)
for data_msg in transport.read_until_done('DATA', timeout):
dest_file.write(data_msg.data) | [
"def",
"recv",
"(",
"self",
",",
"filename",
",",
"dest_file",
",",
"timeout",
"=",
"None",
")",
":",
"transport",
"=",
"DataFilesyncTransport",
"(",
"self",
".",
"stream",
")",
"transport",
".",
"write_data",
"(",
"'RECV'",
",",
"filename",
",",
"timeout"... | Retrieve a file from the device into the file-like dest_file. | [
"Retrieve",
"a",
"file",
"from",
"the",
"device",
"into",
"the",
"file",
"-",
"like",
"dest_file",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L198-L203 | train | 221,778 |
google/openhtf | openhtf/plugs/usb/filesync_service.py | FilesyncService._check_for_fail_message | def _check_for_fail_message(self, transport, exc_info, timeout): # pylint: disable=no-self-use
"""Check for a 'FAIL' message from transport.
This method always raises, if 'FAIL' was read, it will raise an
AdbRemoteError with the message, otherwise it will raise based on
exc_info, which should be a tuple as per sys.exc_info().
Args:
transport: Transport from which to read for a 'FAIL' message.
exc_info: Exception info to raise if no 'FAIL' is read.
timeout: Timeout to use for the read operation.
Raises:
AdbRemoteError: If a 'FAIL' is read, otherwise raises exc_info.
"""
try:
transport.read_message(timeout)
except usb_exceptions.CommonUsbError:
# If we got a remote error, raise that exception.
if sys.exc_info()[0] is usb_exceptions.AdbRemoteError:
raise
# Otherwise reraise the original exception.
raise_with_traceback(exc_info[0](exc_info[1]), traceback=exc_info[2]) | python | def _check_for_fail_message(self, transport, exc_info, timeout): # pylint: disable=no-self-use
"""Check for a 'FAIL' message from transport.
This method always raises, if 'FAIL' was read, it will raise an
AdbRemoteError with the message, otherwise it will raise based on
exc_info, which should be a tuple as per sys.exc_info().
Args:
transport: Transport from which to read for a 'FAIL' message.
exc_info: Exception info to raise if no 'FAIL' is read.
timeout: Timeout to use for the read operation.
Raises:
AdbRemoteError: If a 'FAIL' is read, otherwise raises exc_info.
"""
try:
transport.read_message(timeout)
except usb_exceptions.CommonUsbError:
# If we got a remote error, raise that exception.
if sys.exc_info()[0] is usb_exceptions.AdbRemoteError:
raise
# Otherwise reraise the original exception.
raise_with_traceback(exc_info[0](exc_info[1]), traceback=exc_info[2]) | [
"def",
"_check_for_fail_message",
"(",
"self",
",",
"transport",
",",
"exc_info",
",",
"timeout",
")",
":",
"# pylint: disable=no-self-use",
"try",
":",
"transport",
".",
"read_message",
"(",
"timeout",
")",
"except",
"usb_exceptions",
".",
"CommonUsbError",
":",
... | Check for a 'FAIL' message from transport.
This method always raises, if 'FAIL' was read, it will raise an
AdbRemoteError with the message, otherwise it will raise based on
exc_info, which should be a tuple as per sys.exc_info().
Args:
transport: Transport from which to read for a 'FAIL' message.
exc_info: Exception info to raise if no 'FAIL' is read.
timeout: Timeout to use for the read operation.
Raises:
AdbRemoteError: If a 'FAIL' is read, otherwise raises exc_info. | [
"Check",
"for",
"a",
"FAIL",
"message",
"from",
"transport",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L205-L227 | train | 221,779 |
google/openhtf | openhtf/plugs/usb/filesync_service.py | AbstractFilesyncTransport.write_data | def write_data(self, command, data, timeout=None):
"""Shortcut for writing specifically a DataMessage."""
self.write_message(FilesyncMessageTypes.DataMessage(command, data), timeout) | python | def write_data(self, command, data, timeout=None):
"""Shortcut for writing specifically a DataMessage."""
self.write_message(FilesyncMessageTypes.DataMessage(command, data), timeout) | [
"def",
"write_data",
"(",
"self",
",",
"command",
",",
"data",
",",
"timeout",
"=",
"None",
")",
":",
"self",
".",
"write_message",
"(",
"FilesyncMessageTypes",
".",
"DataMessage",
"(",
"command",
",",
"data",
")",
",",
"timeout",
")"
] | Shortcut for writing specifically a DataMessage. | [
"Shortcut",
"for",
"writing",
"specifically",
"a",
"DataMessage",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L362-L364 | train | 221,780 |
google/openhtf | openhtf/plugs/usb/filesync_service.py | AbstractFilesyncTransport.read_until_done | def read_until_done(self, command, timeout=None):
"""Yield messages read until we receive a 'DONE' command.
Read messages of the given command until we receive a 'DONE' command. If a
command different than the requested one is received, an AdbProtocolError
is raised.
Args:
command: The command to expect, like 'DENT' or 'DATA'.
timeout: The timeouts.PolledTimeout to use for this operation.
Yields:
Messages read, of type self.RECV_MSG_TYPE, see read_message().
Raises:
AdbProtocolError: If an unexpected command is read.
AdbRemoteError: If a 'FAIL' message is read.
"""
message = self.read_message(timeout)
while message.command != 'DONE':
message.assert_command_is(command)
yield message
message = self.read_message(timeout) | python | def read_until_done(self, command, timeout=None):
"""Yield messages read until we receive a 'DONE' command.
Read messages of the given command until we receive a 'DONE' command. If a
command different than the requested one is received, an AdbProtocolError
is raised.
Args:
command: The command to expect, like 'DENT' or 'DATA'.
timeout: The timeouts.PolledTimeout to use for this operation.
Yields:
Messages read, of type self.RECV_MSG_TYPE, see read_message().
Raises:
AdbProtocolError: If an unexpected command is read.
AdbRemoteError: If a 'FAIL' message is read.
"""
message = self.read_message(timeout)
while message.command != 'DONE':
message.assert_command_is(command)
yield message
message = self.read_message(timeout) | [
"def",
"read_until_done",
"(",
"self",
",",
"command",
",",
"timeout",
"=",
"None",
")",
":",
"message",
"=",
"self",
".",
"read_message",
"(",
"timeout",
")",
"while",
"message",
".",
"command",
"!=",
"'DONE'",
":",
"message",
".",
"assert_command_is",
"(... | Yield messages read until we receive a 'DONE' command.
Read messages of the given command until we receive a 'DONE' command. If a
command different than the requested one is received, an AdbProtocolError
is raised.
Args:
command: The command to expect, like 'DENT' or 'DATA'.
timeout: The timeouts.PolledTimeout to use for this operation.
Yields:
Messages read, of type self.RECV_MSG_TYPE, see read_message().
Raises:
AdbProtocolError: If an unexpected command is read.
AdbRemoteError: If a 'FAIL' message is read. | [
"Yield",
"messages",
"read",
"until",
"we",
"receive",
"a",
"DONE",
"command",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L391-L413 | train | 221,781 |
google/openhtf | openhtf/plugs/usb/filesync_service.py | AbstractFilesyncTransport.read_message | def read_message(self, timeout=None):
"""Read a message from this transport and return it.
Reads a message of RECV_MSG_TYPE and returns it. Note that this method
abstracts the data length and data read so that the caller simply gets the
data along with the header in the returned message.
Args:
timeout: timeouts.PolledTimeout to use for the operation.
Returns:
An instance of self.RECV_MSG_TYPE that was read from self.stream.
Raises:
AdbProtocolError: If an invalid response is received.
AdbRemoteError: If a FAIL response is received.
"""
raw_data = self.stream.read(
struct.calcsize(self.RECV_MSG_TYPE.struct_format), timeout)
try:
raw_message = struct.unpack(self.RECV_MSG_TYPE.struct_format, raw_data)
except struct.error:
raise usb_exceptions.AdbProtocolError(
'%s expected format "%s", got data %s', self,
self.RECV_MSG_TYPE.struct_format, raw_data)
if raw_message[0] not in self.WIRE_TO_CMD:
raise usb_exceptions.AdbProtocolError(
'Unrecognized command id: %s', raw_message)
# Swap out the wire command with the string equivalent.
raw_message = (self.WIRE_TO_CMD[raw_message[0]],) + raw_message[1:]
if self.RECV_MSG_TYPE.has_data and raw_message[-1]:
# For messages that have data, the length of the data is the last field
# in the struct. We do another read and swap out that length for the
# actual data read before we create the namedtuple to return.
data_len = raw_message[-1]
raw_message = raw_message[:-1] + (self.stream.read(data_len, timeout),)
if raw_message[0] not in self.VALID_RESPONSES:
raise usb_exceptions.AdbProtocolError(
'%s not a valid response for %s', raw_message[0], self)
if raw_message[0] == 'FAIL':
raise usb_exceptions.AdbRemoteError(
'Remote ADB failure: %s', raw_message)
return self.RECV_MSG_TYPE(*raw_message) | python | def read_message(self, timeout=None):
"""Read a message from this transport and return it.
Reads a message of RECV_MSG_TYPE and returns it. Note that this method
abstracts the data length and data read so that the caller simply gets the
data along with the header in the returned message.
Args:
timeout: timeouts.PolledTimeout to use for the operation.
Returns:
An instance of self.RECV_MSG_TYPE that was read from self.stream.
Raises:
AdbProtocolError: If an invalid response is received.
AdbRemoteError: If a FAIL response is received.
"""
raw_data = self.stream.read(
struct.calcsize(self.RECV_MSG_TYPE.struct_format), timeout)
try:
raw_message = struct.unpack(self.RECV_MSG_TYPE.struct_format, raw_data)
except struct.error:
raise usb_exceptions.AdbProtocolError(
'%s expected format "%s", got data %s', self,
self.RECV_MSG_TYPE.struct_format, raw_data)
if raw_message[0] not in self.WIRE_TO_CMD:
raise usb_exceptions.AdbProtocolError(
'Unrecognized command id: %s', raw_message)
# Swap out the wire command with the string equivalent.
raw_message = (self.WIRE_TO_CMD[raw_message[0]],) + raw_message[1:]
if self.RECV_MSG_TYPE.has_data and raw_message[-1]:
# For messages that have data, the length of the data is the last field
# in the struct. We do another read and swap out that length for the
# actual data read before we create the namedtuple to return.
data_len = raw_message[-1]
raw_message = raw_message[:-1] + (self.stream.read(data_len, timeout),)
if raw_message[0] not in self.VALID_RESPONSES:
raise usb_exceptions.AdbProtocolError(
'%s not a valid response for %s', raw_message[0], self)
if raw_message[0] == 'FAIL':
raise usb_exceptions.AdbRemoteError(
'Remote ADB failure: %s', raw_message)
return self.RECV_MSG_TYPE(*raw_message) | [
"def",
"read_message",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"raw_data",
"=",
"self",
".",
"stream",
".",
"read",
"(",
"struct",
".",
"calcsize",
"(",
"self",
".",
"RECV_MSG_TYPE",
".",
"struct_format",
")",
",",
"timeout",
")",
"try",
":... | Read a message from this transport and return it.
Reads a message of RECV_MSG_TYPE and returns it. Note that this method
abstracts the data length and data read so that the caller simply gets the
data along with the header in the returned message.
Args:
timeout: timeouts.PolledTimeout to use for the operation.
Returns:
An instance of self.RECV_MSG_TYPE that was read from self.stream.
Raises:
AdbProtocolError: If an invalid response is received.
AdbRemoteError: If a FAIL response is received. | [
"Read",
"a",
"message",
"from",
"this",
"transport",
"and",
"return",
"it",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/filesync_service.py#L415-L461 | train | 221,782 |
google/openhtf | openhtf/output/proto/mfg_event_converter.py | _lazy_load_units_by_code | def _lazy_load_units_by_code():
"""Populate dict of units by code iff UNITS_BY_CODE is empty."""
if UNITS_BY_CODE:
# already populated
return
for unit in units.UNITS_BY_NAME.values():
UNITS_BY_CODE[unit.code] = unit | python | def _lazy_load_units_by_code():
"""Populate dict of units by code iff UNITS_BY_CODE is empty."""
if UNITS_BY_CODE:
# already populated
return
for unit in units.UNITS_BY_NAME.values():
UNITS_BY_CODE[unit.code] = unit | [
"def",
"_lazy_load_units_by_code",
"(",
")",
":",
"if",
"UNITS_BY_CODE",
":",
"# already populated",
"return",
"for",
"unit",
"in",
"units",
".",
"UNITS_BY_NAME",
".",
"values",
"(",
")",
":",
"UNITS_BY_CODE",
"[",
"unit",
".",
"code",
"]",
"=",
"unit"
] | Populate dict of units by code iff UNITS_BY_CODE is empty. | [
"Populate",
"dict",
"of",
"units",
"by",
"code",
"iff",
"UNITS_BY_CODE",
"is",
"empty",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/proto/mfg_event_converter.py#L52-L59 | train | 221,783 |
google/openhtf | openhtf/output/proto/mfg_event_converter.py | _populate_basic_data | def _populate_basic_data(mfg_event, record):
"""Copies data from the OpenHTF TestRecord to the MfgEvent proto."""
# TODO:
# * Missing in proto: set run name from metadata.
# * `part_tags` field on proto is unused
# * `timings` field on proto is unused.
# * Handle arbitrary units as uom_code/uom_suffix.
# Populate non-repeated fields.
mfg_event.dut_serial = record.dut_id
mfg_event.start_time_ms = record.start_time_millis
mfg_event.end_time_ms = record.end_time_millis
mfg_event.tester_name = record.station_id
mfg_event.test_name = record.metadata.get('test_name') or record.station_id
mfg_event.test_status = test_runs_converter.OUTCOME_MAP[record.outcome]
mfg_event.operator_name = record.metadata.get('operator_name', '')
mfg_event.test_version = str(record.metadata.get('test_version', ''))
mfg_event.test_description = record.metadata.get('test_description', '')
# Populate part_tags.
mfg_event.part_tags.extend(record.metadata.get('part_tags', []))
# Populate phases.
for phase in record.phases:
mfg_phase = mfg_event.phases.add()
mfg_phase.name = phase.name
mfg_phase.description = phase.codeinfo.sourcecode
mfg_phase.timing.start_time_millis = phase.start_time_millis
mfg_phase.timing.end_time_millis = phase.end_time_millis
# Populate failure codes.
for details in record.outcome_details:
failure_code = mfg_event.failure_codes.add()
failure_code.code = details.code
failure_code.details = details.description
# Populate test logs.
for log_record in record.log_records:
test_log = mfg_event.test_logs.add()
test_log.timestamp_millis = log_record.timestamp_millis
test_log.log_message = log_record.message
test_log.logger_name = log_record.logger_name
test_log.levelno = log_record.level
if log_record.level <= logging.DEBUG:
test_log.level = test_runs_pb2.TestRunLogMessage.DEBUG
elif log_record.level <= logging.INFO:
test_log.level = test_runs_pb2.TestRunLogMessage.INFO
elif log_record.level <= logging.WARNING:
test_log.level = test_runs_pb2.TestRunLogMessage.WARNING
elif log_record.level <= logging.ERROR:
test_log.level = test_runs_pb2.TestRunLogMessage.ERROR
elif log_record.level <= logging.CRITICAL:
test_log.level = test_runs_pb2.TestRunLogMessage.CRITICAL
test_log.log_source = log_record.source
test_log.lineno = log_record.lineno | python | def _populate_basic_data(mfg_event, record):
"""Copies data from the OpenHTF TestRecord to the MfgEvent proto."""
# TODO:
# * Missing in proto: set run name from metadata.
# * `part_tags` field on proto is unused
# * `timings` field on proto is unused.
# * Handle arbitrary units as uom_code/uom_suffix.
# Populate non-repeated fields.
mfg_event.dut_serial = record.dut_id
mfg_event.start_time_ms = record.start_time_millis
mfg_event.end_time_ms = record.end_time_millis
mfg_event.tester_name = record.station_id
mfg_event.test_name = record.metadata.get('test_name') or record.station_id
mfg_event.test_status = test_runs_converter.OUTCOME_MAP[record.outcome]
mfg_event.operator_name = record.metadata.get('operator_name', '')
mfg_event.test_version = str(record.metadata.get('test_version', ''))
mfg_event.test_description = record.metadata.get('test_description', '')
# Populate part_tags.
mfg_event.part_tags.extend(record.metadata.get('part_tags', []))
# Populate phases.
for phase in record.phases:
mfg_phase = mfg_event.phases.add()
mfg_phase.name = phase.name
mfg_phase.description = phase.codeinfo.sourcecode
mfg_phase.timing.start_time_millis = phase.start_time_millis
mfg_phase.timing.end_time_millis = phase.end_time_millis
# Populate failure codes.
for details in record.outcome_details:
failure_code = mfg_event.failure_codes.add()
failure_code.code = details.code
failure_code.details = details.description
# Populate test logs.
for log_record in record.log_records:
test_log = mfg_event.test_logs.add()
test_log.timestamp_millis = log_record.timestamp_millis
test_log.log_message = log_record.message
test_log.logger_name = log_record.logger_name
test_log.levelno = log_record.level
if log_record.level <= logging.DEBUG:
test_log.level = test_runs_pb2.TestRunLogMessage.DEBUG
elif log_record.level <= logging.INFO:
test_log.level = test_runs_pb2.TestRunLogMessage.INFO
elif log_record.level <= logging.WARNING:
test_log.level = test_runs_pb2.TestRunLogMessage.WARNING
elif log_record.level <= logging.ERROR:
test_log.level = test_runs_pb2.TestRunLogMessage.ERROR
elif log_record.level <= logging.CRITICAL:
test_log.level = test_runs_pb2.TestRunLogMessage.CRITICAL
test_log.log_source = log_record.source
test_log.lineno = log_record.lineno | [
"def",
"_populate_basic_data",
"(",
"mfg_event",
",",
"record",
")",
":",
"# TODO:",
"# * Missing in proto: set run name from metadata.",
"# * `part_tags` field on proto is unused",
"# * `timings` field on proto is unused.",
"# * Handle arbitrary units as uom_code/uom_suffix.",
"# ... | Copies data from the OpenHTF TestRecord to the MfgEvent proto. | [
"Copies",
"data",
"from",
"the",
"OpenHTF",
"TestRecord",
"to",
"the",
"MfgEvent",
"proto",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/proto/mfg_event_converter.py#L105-L159 | train | 221,784 |
google/openhtf | openhtf/output/proto/mfg_event_converter.py | _attach_record_as_json | def _attach_record_as_json(mfg_event, record):
"""Attach a copy of the record as JSON so we have an un-mangled copy."""
attachment = mfg_event.attachment.add()
attachment.name = TEST_RECORD_ATTACHMENT_NAME
test_record_dict = htf_data.convert_to_base_types(record)
attachment.value_binary = _convert_object_to_json(test_record_dict)
attachment.type = test_runs_pb2.TEXT_UTF8 | python | def _attach_record_as_json(mfg_event, record):
"""Attach a copy of the record as JSON so we have an un-mangled copy."""
attachment = mfg_event.attachment.add()
attachment.name = TEST_RECORD_ATTACHMENT_NAME
test_record_dict = htf_data.convert_to_base_types(record)
attachment.value_binary = _convert_object_to_json(test_record_dict)
attachment.type = test_runs_pb2.TEXT_UTF8 | [
"def",
"_attach_record_as_json",
"(",
"mfg_event",
",",
"record",
")",
":",
"attachment",
"=",
"mfg_event",
".",
"attachment",
".",
"add",
"(",
")",
"attachment",
".",
"name",
"=",
"TEST_RECORD_ATTACHMENT_NAME",
"test_record_dict",
"=",
"htf_data",
".",
"convert_t... | Attach a copy of the record as JSON so we have an un-mangled copy. | [
"Attach",
"a",
"copy",
"of",
"the",
"record",
"as",
"JSON",
"so",
"we",
"have",
"an",
"un",
"-",
"mangled",
"copy",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/proto/mfg_event_converter.py#L162-L168 | train | 221,785 |
google/openhtf | openhtf/output/proto/mfg_event_converter.py | _attach_config | def _attach_config(mfg_event, record):
"""Attaches the OpenHTF config file as JSON."""
if 'config' not in record.metadata:
return
attachment = mfg_event.attachment.add()
attachment.name = 'config'
attachment.value_binary = _convert_object_to_json(record.metadata['config'])
attachment.type = test_runs_pb2.TEXT_UTF8 | python | def _attach_config(mfg_event, record):
"""Attaches the OpenHTF config file as JSON."""
if 'config' not in record.metadata:
return
attachment = mfg_event.attachment.add()
attachment.name = 'config'
attachment.value_binary = _convert_object_to_json(record.metadata['config'])
attachment.type = test_runs_pb2.TEXT_UTF8 | [
"def",
"_attach_config",
"(",
"mfg_event",
",",
"record",
")",
":",
"if",
"'config'",
"not",
"in",
"record",
".",
"metadata",
":",
"return",
"attachment",
"=",
"mfg_event",
".",
"attachment",
".",
"add",
"(",
")",
"attachment",
".",
"name",
"=",
"'config'"... | Attaches the OpenHTF config file as JSON. | [
"Attaches",
"the",
"OpenHTF",
"config",
"file",
"as",
"JSON",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/proto/mfg_event_converter.py#L186-L193 | train | 221,786 |
google/openhtf | openhtf/output/proto/mfg_event_converter.py | phase_uniquizer | def phase_uniquizer(all_phases):
"""Makes the names of phase measurement and attachments unique.
This function will make the names of measurements and attachments unique.
It modifies the input all_phases.
Args:
all_phases: the phases to make unique
Returns:
the phases now modified.
"""
measurement_name_maker = UniqueNameMaker(
itertools.chain.from_iterable(
phase.measurements.keys() for phase in all_phases
if phase.measurements))
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Make measurements unique.
for name, _ in sorted(phase.measurements.items()):
old_name = name
name = measurement_name_maker.make_unique(name)
phase.measurements[old_name].name = name
phase.measurements[name] = phase.measurements.pop(old_name)
# Make attachments unique.
for name, _ in sorted(phase.attachments.items()):
old_name = name
name = attachment_name_maker.make_unique(name)
phase.attachments[old_name].name = name
phase.attachments[name] = phase.attachments.pop(old_name)
return all_phases | python | def phase_uniquizer(all_phases):
"""Makes the names of phase measurement and attachments unique.
This function will make the names of measurements and attachments unique.
It modifies the input all_phases.
Args:
all_phases: the phases to make unique
Returns:
the phases now modified.
"""
measurement_name_maker = UniqueNameMaker(
itertools.chain.from_iterable(
phase.measurements.keys() for phase in all_phases
if phase.measurements))
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Make measurements unique.
for name, _ in sorted(phase.measurements.items()):
old_name = name
name = measurement_name_maker.make_unique(name)
phase.measurements[old_name].name = name
phase.measurements[name] = phase.measurements.pop(old_name)
# Make attachments unique.
for name, _ in sorted(phase.attachments.items()):
old_name = name
name = attachment_name_maker.make_unique(name)
phase.attachments[old_name].name = name
phase.attachments[name] = phase.attachments.pop(old_name)
return all_phases | [
"def",
"phase_uniquizer",
"(",
"all_phases",
")",
":",
"measurement_name_maker",
"=",
"UniqueNameMaker",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"phase",
".",
"measurements",
".",
"keys",
"(",
")",
"for",
"phase",
"in",
"all_phases",
"if",
"... | Makes the names of phase measurement and attachments unique.
This function will make the names of measurements and attachments unique.
It modifies the input all_phases.
Args:
all_phases: the phases to make unique
Returns:
the phases now modified. | [
"Makes",
"the",
"names",
"of",
"phase",
"measurement",
"and",
"attachments",
"unique",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/proto/mfg_event_converter.py#L224-L261 | train | 221,787 |
google/openhtf | openhtf/output/proto/mfg_event_converter.py | multidim_measurement_to_attachment | def multidim_measurement_to_attachment(name, measurement):
"""Convert a multi-dim measurement to an `openhtf.test_record.Attachment`."""
dimensions = list(measurement.dimensions)
if measurement.units:
dimensions.append(
measurements.Dimension.from_unit_descriptor(measurement.units))
dims = []
for d in dimensions:
if d.suffix is None:
suffix = u''
# Ensure that the suffix is unicode. It's typically str/bytes because
# units.py looks them up against str/bytes.
elif isinstance(d.suffix, unicode):
suffix = d.suffix
else:
suffix = d.suffix.decode('utf8')
dims.append({
'uom_suffix': suffix,
'uom_code': d.code,
'name': d.name,
})
# Refer to the module docstring for the expected schema.
dimensioned_measured_value = measurement.measured_value
value = (sorted(dimensioned_measured_value.value, key=lambda x: x[0])
if dimensioned_measured_value.is_value_set else None)
outcome_str = MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME[measurement.outcome]
data = _convert_object_to_json({
'outcome': outcome_str,
'name': name,
'dimensions': dims,
'value': value,
})
attachment = htf_test_record.Attachment(data, test_runs_pb2.MULTIDIM_JSON)
return attachment | python | def multidim_measurement_to_attachment(name, measurement):
"""Convert a multi-dim measurement to an `openhtf.test_record.Attachment`."""
dimensions = list(measurement.dimensions)
if measurement.units:
dimensions.append(
measurements.Dimension.from_unit_descriptor(measurement.units))
dims = []
for d in dimensions:
if d.suffix is None:
suffix = u''
# Ensure that the suffix is unicode. It's typically str/bytes because
# units.py looks them up against str/bytes.
elif isinstance(d.suffix, unicode):
suffix = d.suffix
else:
suffix = d.suffix.decode('utf8')
dims.append({
'uom_suffix': suffix,
'uom_code': d.code,
'name': d.name,
})
# Refer to the module docstring for the expected schema.
dimensioned_measured_value = measurement.measured_value
value = (sorted(dimensioned_measured_value.value, key=lambda x: x[0])
if dimensioned_measured_value.is_value_set else None)
outcome_str = MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME[measurement.outcome]
data = _convert_object_to_json({
'outcome': outcome_str,
'name': name,
'dimensions': dims,
'value': value,
})
attachment = htf_test_record.Attachment(data, test_runs_pb2.MULTIDIM_JSON)
return attachment | [
"def",
"multidim_measurement_to_attachment",
"(",
"name",
",",
"measurement",
")",
":",
"dimensions",
"=",
"list",
"(",
"measurement",
".",
"dimensions",
")",
"if",
"measurement",
".",
"units",
":",
"dimensions",
".",
"append",
"(",
"measurements",
".",
"Dimensi... | Convert a multi-dim measurement to an `openhtf.test_record.Attachment`. | [
"Convert",
"a",
"multi",
"-",
"dim",
"measurement",
"to",
"an",
"openhtf",
".",
"test_record",
".",
"Attachment",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/proto/mfg_event_converter.py#L264-L300 | train | 221,788 |
google/openhtf | openhtf/output/proto/mfg_event_converter.py | convert_multidim_measurements | def convert_multidim_measurements(all_phases):
"""Converts each multidim measurements into attachments for all phases.."""
# Combine actual attachments with attachments we make from multi-dim
# measurements.
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Process multi-dim measurements into unique attachments.
for name, measurement in sorted(phase.measurements.items()):
if measurement.dimensions:
old_name = name
name = attachment_name_maker.make_unique('multidim_%s' % name)
attachment = multidim_measurement_to_attachment(name, measurement)
phase.attachments[name] = attachment
phase.measurements.pop(old_name)
return all_phases | python | def convert_multidim_measurements(all_phases):
"""Converts each multidim measurements into attachments for all phases.."""
# Combine actual attachments with attachments we make from multi-dim
# measurements.
attachment_names = list(itertools.chain.from_iterable(
phase.attachments.keys() for phase in all_phases))
attachment_names.extend(itertools.chain.from_iterable([
'multidim_' + name for name, meas in phase.measurements.items()
if meas.dimensions is not None
] for phase in all_phases if phase.measurements))
attachment_name_maker = UniqueNameMaker(attachment_names)
for phase in all_phases:
# Process multi-dim measurements into unique attachments.
for name, measurement in sorted(phase.measurements.items()):
if measurement.dimensions:
old_name = name
name = attachment_name_maker.make_unique('multidim_%s' % name)
attachment = multidim_measurement_to_attachment(name, measurement)
phase.attachments[name] = attachment
phase.measurements.pop(old_name)
return all_phases | [
"def",
"convert_multidim_measurements",
"(",
"all_phases",
")",
":",
"# Combine actual attachments with attachments we make from multi-dim",
"# measurements.",
"attachment_names",
"=",
"list",
"(",
"itertools",
".",
"chain",
".",
"from_iterable",
"(",
"phase",
".",
"attachmen... | Converts each multidim measurements into attachments for all phases.. | [
"Converts",
"each",
"multidim",
"measurements",
"into",
"attachments",
"for",
"all",
"phases",
".."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/proto/mfg_event_converter.py#L303-L324 | train | 221,789 |
google/openhtf | openhtf/output/proto/mfg_event_converter.py | attachment_to_multidim_measurement | def attachment_to_multidim_measurement(attachment, name=None):
"""Convert an OpenHTF test record attachment to a multi-dim measurement.
This is a best effort attempt to reverse, as some data is lost in converting
from a multidim to an attachment.
Args:
attachment: an `openhtf.test_record.Attachment` from a multi-dim.
name: an optional name for the measurement. If not provided will use the
name included in the attachment.
Returns:
An multi-dim `openhtf.Measurement`.
"""
data = json.loads(attachment.data)
name = name or data.get('name')
# attachment_dimn are a list of dicts with keys 'uom_suffix' and 'uom_code'
attachment_dims = data.get('dimensions', [])
# attachment_value is a list of lists [[t1, x1, y1, f1], [t2, x2, y2, f2]]
attachment_values = data.get('value')
attachment_outcome_str = data.get('outcome')
if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME:
# Fpr backward compatibility with saved data we'll convert integers to str
try:
attachment_outcome_str = test_runs_pb2.Status.Name(
int(attachment_outcome_str))
except ValueError:
attachment_outcome_str = None
# Convert test status outcome str to measurement outcome
outcome = TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME.get(
attachment_outcome_str)
# convert dimensions into htf.Dimensions
_lazy_load_units_by_code()
dims = []
for d in attachment_dims:
# Try to convert into htf.Dimension including backwards compatibility.
unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE)
description = d.get('name', '')
dims.append(measurements.Dimension(description=description, unit=unit))
# Attempt to determine if units are included.
if attachment_values and len(dims) == len(attachment_values[0]):
# units provided
units_ = dims[-1].unit
dimensions = dims[:-1]
else:
units_ = None
dimensions = dims
# created dimensioned_measured_value and populate with values.
measured_value = measurements.DimensionedMeasuredValue(
name=name,
num_dimensions=len(dimensions)
)
for row in attachment_values:
coordinates = tuple(row[:-1])
val = row[-1]
measured_value[coordinates] = val
measurement = measurements.Measurement(
name=name,
units=units_,
dimensions=tuple(dimensions),
measured_value=measured_value,
outcome=outcome
)
return measurement | python | def attachment_to_multidim_measurement(attachment, name=None):
"""Convert an OpenHTF test record attachment to a multi-dim measurement.
This is a best effort attempt to reverse, as some data is lost in converting
from a multidim to an attachment.
Args:
attachment: an `openhtf.test_record.Attachment` from a multi-dim.
name: an optional name for the measurement. If not provided will use the
name included in the attachment.
Returns:
An multi-dim `openhtf.Measurement`.
"""
data = json.loads(attachment.data)
name = name or data.get('name')
# attachment_dimn are a list of dicts with keys 'uom_suffix' and 'uom_code'
attachment_dims = data.get('dimensions', [])
# attachment_value is a list of lists [[t1, x1, y1, f1], [t2, x2, y2, f2]]
attachment_values = data.get('value')
attachment_outcome_str = data.get('outcome')
if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME:
# Fpr backward compatibility with saved data we'll convert integers to str
try:
attachment_outcome_str = test_runs_pb2.Status.Name(
int(attachment_outcome_str))
except ValueError:
attachment_outcome_str = None
# Convert test status outcome str to measurement outcome
outcome = TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME.get(
attachment_outcome_str)
# convert dimensions into htf.Dimensions
_lazy_load_units_by_code()
dims = []
for d in attachment_dims:
# Try to convert into htf.Dimension including backwards compatibility.
unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE)
description = d.get('name', '')
dims.append(measurements.Dimension(description=description, unit=unit))
# Attempt to determine if units are included.
if attachment_values and len(dims) == len(attachment_values[0]):
# units provided
units_ = dims[-1].unit
dimensions = dims[:-1]
else:
units_ = None
dimensions = dims
# created dimensioned_measured_value and populate with values.
measured_value = measurements.DimensionedMeasuredValue(
name=name,
num_dimensions=len(dimensions)
)
for row in attachment_values:
coordinates = tuple(row[:-1])
val = row[-1]
measured_value[coordinates] = val
measurement = measurements.Measurement(
name=name,
units=units_,
dimensions=tuple(dimensions),
measured_value=measured_value,
outcome=outcome
)
return measurement | [
"def",
"attachment_to_multidim_measurement",
"(",
"attachment",
",",
"name",
"=",
"None",
")",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"attachment",
".",
"data",
")",
"name",
"=",
"name",
"or",
"data",
".",
"get",
"(",
"'name'",
")",
"# attachment_dim... | Convert an OpenHTF test record attachment to a multi-dim measurement.
This is a best effort attempt to reverse, as some data is lost in converting
from a multidim to an attachment.
Args:
attachment: an `openhtf.test_record.Attachment` from a multi-dim.
name: an optional name for the measurement. If not provided will use the
name included in the attachment.
Returns:
An multi-dim `openhtf.Measurement`. | [
"Convert",
"an",
"OpenHTF",
"test",
"record",
"attachment",
"to",
"a",
"multi",
"-",
"dim",
"measurement",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/proto/mfg_event_converter.py#L427-L497 | train | 221,790 |
google/openhtf | openhtf/output/proto/mfg_event_converter.py | PhaseCopier._copy_unidimensional_measurement | def _copy_unidimensional_measurement(
self, phase, name, measurement, mfg_event):
"""Copy uni-dimensional measurements to the MfgEvent."""
mfg_measurement = mfg_event.measurement.add()
# Copy basic measurement fields.
mfg_measurement.name = name
if measurement.docstring:
mfg_measurement.description = measurement.docstring
mfg_measurement.parameter_tag.append(phase.name)
if (measurement.units and
measurement.units.code in test_runs_converter.UOM_CODE_MAP):
mfg_measurement.unit_code = (
test_runs_converter.UOM_CODE_MAP[measurement.units.code])
# Copy failed measurements as failure_codes. This happens early to include
# unset measurements.
if (measurement.outcome != measurements.Outcome.PASS and
phase.outcome != htf_test_record.PhaseOutcome.SKIP):
failure_code = mfg_event.failure_codes.add()
failure_code.code = name
failure_code.details = '\n'.join(str(v) for v in measurement.validators)
# Copy measurement value.
measured_value = measurement.measured_value
status_str = MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME[
measurement.outcome]
mfg_measurement.status = test_runs_pb2.Status.Value(status_str)
if not measured_value.is_value_set:
return
value = measured_value.value
if isinstance(value, numbers.Number):
mfg_measurement.numeric_value = float(value)
elif isinstance(value, bytes):
# text_value expects unicode or ascii-compatible strings, so we must
# 'decode' it, even if it's actually just garbage bytestring data.
mfg_measurement.text_value = unicode(value, errors='replace')
elif isinstance(value, unicode):
# Don't waste time and potential errors decoding unicode.
mfg_measurement.text_value = value
else:
# Coercing to string.
mfg_measurement.text_value = str(value)
# Copy measurement validators.
for validator in measurement.validators:
if isinstance(validator, validators.RangeValidatorBase):
if validator.minimum is not None:
mfg_measurement.numeric_minimum = float(validator.minimum)
if validator.maximum is not None:
mfg_measurement.numeric_maximum = float(validator.maximum)
elif isinstance(validator, validators.RegexMatcher):
mfg_measurement.expected_text = validator.regex
else:
mfg_measurement.description += '\nValidator: ' + str(validator) | python | def _copy_unidimensional_measurement(
self, phase, name, measurement, mfg_event):
"""Copy uni-dimensional measurements to the MfgEvent."""
mfg_measurement = mfg_event.measurement.add()
# Copy basic measurement fields.
mfg_measurement.name = name
if measurement.docstring:
mfg_measurement.description = measurement.docstring
mfg_measurement.parameter_tag.append(phase.name)
if (measurement.units and
measurement.units.code in test_runs_converter.UOM_CODE_MAP):
mfg_measurement.unit_code = (
test_runs_converter.UOM_CODE_MAP[measurement.units.code])
# Copy failed measurements as failure_codes. This happens early to include
# unset measurements.
if (measurement.outcome != measurements.Outcome.PASS and
phase.outcome != htf_test_record.PhaseOutcome.SKIP):
failure_code = mfg_event.failure_codes.add()
failure_code.code = name
failure_code.details = '\n'.join(str(v) for v in measurement.validators)
# Copy measurement value.
measured_value = measurement.measured_value
status_str = MEASUREMENT_OUTCOME_TO_TEST_RUN_STATUS_NAME[
measurement.outcome]
mfg_measurement.status = test_runs_pb2.Status.Value(status_str)
if not measured_value.is_value_set:
return
value = measured_value.value
if isinstance(value, numbers.Number):
mfg_measurement.numeric_value = float(value)
elif isinstance(value, bytes):
# text_value expects unicode or ascii-compatible strings, so we must
# 'decode' it, even if it's actually just garbage bytestring data.
mfg_measurement.text_value = unicode(value, errors='replace')
elif isinstance(value, unicode):
# Don't waste time and potential errors decoding unicode.
mfg_measurement.text_value = value
else:
# Coercing to string.
mfg_measurement.text_value = str(value)
# Copy measurement validators.
for validator in measurement.validators:
if isinstance(validator, validators.RangeValidatorBase):
if validator.minimum is not None:
mfg_measurement.numeric_minimum = float(validator.minimum)
if validator.maximum is not None:
mfg_measurement.numeric_maximum = float(validator.maximum)
elif isinstance(validator, validators.RegexMatcher):
mfg_measurement.expected_text = validator.regex
else:
mfg_measurement.description += '\nValidator: ' + str(validator) | [
"def",
"_copy_unidimensional_measurement",
"(",
"self",
",",
"phase",
",",
"name",
",",
"measurement",
",",
"mfg_event",
")",
":",
"mfg_measurement",
"=",
"mfg_event",
".",
"measurement",
".",
"add",
"(",
")",
"# Copy basic measurement fields.",
"mfg_measurement",
"... | Copy uni-dimensional measurements to the MfgEvent. | [
"Copy",
"uni",
"-",
"dimensional",
"measurements",
"to",
"the",
"MfgEvent",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/proto/mfg_event_converter.py#L341-L396 | train | 221,791 |
google/openhtf | openhtf/output/proto/mfg_event_converter.py | PhaseCopier._copy_attachment | def _copy_attachment(self, name, data, mimetype, mfg_event):
"""Copies an attachment to mfg_event."""
attachment = mfg_event.attachment.add()
attachment.name = name
if isinstance(data, unicode):
data = data.encode('utf8')
attachment.value_binary = data
if mimetype in test_runs_converter.MIMETYPE_MAP:
attachment.type = test_runs_converter.MIMETYPE_MAP[mimetype]
elif mimetype == test_runs_pb2.MULTIDIM_JSON:
attachment.type = mimetype
else:
attachment.type = test_runs_pb2.BINARY | python | def _copy_attachment(self, name, data, mimetype, mfg_event):
"""Copies an attachment to mfg_event."""
attachment = mfg_event.attachment.add()
attachment.name = name
if isinstance(data, unicode):
data = data.encode('utf8')
attachment.value_binary = data
if mimetype in test_runs_converter.MIMETYPE_MAP:
attachment.type = test_runs_converter.MIMETYPE_MAP[mimetype]
elif mimetype == test_runs_pb2.MULTIDIM_JSON:
attachment.type = mimetype
else:
attachment.type = test_runs_pb2.BINARY | [
"def",
"_copy_attachment",
"(",
"self",
",",
"name",
",",
"data",
",",
"mimetype",
",",
"mfg_event",
")",
":",
"attachment",
"=",
"mfg_event",
".",
"attachment",
".",
"add",
"(",
")",
"attachment",
".",
"name",
"=",
"name",
"if",
"isinstance",
"(",
"data... | Copies an attachment to mfg_event. | [
"Copies",
"an",
"attachment",
"to",
"mfg_event",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/output/proto/mfg_event_converter.py#L403-L415 | train | 221,792 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbStream.write | def write(self, data, timeout_ms=None):
"""Write data to this stream.
Args:
data: Data to write.
timeout_ms: Timeout to use for the write/Ack transaction, in
milliseconds (or as a PolledTimeout object).
Raises:
AdbProtocolError: If an ACK is not received.
AdbStreamClosedError: If the stream is already closed, or gets closed
before the write completes.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
# Break the data up into our transport's maxdata sized WRTE messages.
while data:
self._transport.write(
data[:self._transport.adb_connection.maxdata], timeout)
data = data[self._transport.adb_connection.maxdata:] | python | def write(self, data, timeout_ms=None):
"""Write data to this stream.
Args:
data: Data to write.
timeout_ms: Timeout to use for the write/Ack transaction, in
milliseconds (or as a PolledTimeout object).
Raises:
AdbProtocolError: If an ACK is not received.
AdbStreamClosedError: If the stream is already closed, or gets closed
before the write completes.
"""
timeout = timeouts.PolledTimeout.from_millis(timeout_ms)
# Break the data up into our transport's maxdata sized WRTE messages.
while data:
self._transport.write(
data[:self._transport.adb_connection.maxdata], timeout)
data = data[self._transport.adb_connection.maxdata:] | [
"def",
"write",
"(",
"self",
",",
"data",
",",
"timeout_ms",
"=",
"None",
")",
":",
"timeout",
"=",
"timeouts",
".",
"PolledTimeout",
".",
"from_millis",
"(",
"timeout_ms",
")",
"# Break the data up into our transport's maxdata sized WRTE messages.",
"while",
"data",
... | Write data to this stream.
Args:
data: Data to write.
timeout_ms: Timeout to use for the write/Ack transaction, in
milliseconds (or as a PolledTimeout object).
Raises:
AdbProtocolError: If an ACK is not received.
AdbStreamClosedError: If the stream is already closed, or gets closed
before the write completes. | [
"Write",
"data",
"to",
"this",
"stream",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L167-L185 | train | 221,793 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbStream.read | def read(self, length=0, timeout_ms=None):
"""Reads data from the remote end of this stream.
Internally, this data will have been contained in AdbMessages, but
users of streams shouldn't need to care about the transport mechanism.
Args:
length: If provided, the number of bytes to read, otherwise all available
data will be returned (at least one byte).
timeout_ms: Time to wait for a message to come in for this stream, in
milliseconds (or as a PolledTimeout object).
Returns:
Data that was read, or None if the end of the stream was reached.
Raises:
AdbProtocolError: Received an unexpected wonky non-stream packet (like a
CNXN ADB message).
AdbStreamClosedError: The stream is already closed.
AdbTimeoutError: Timed out waiting for a message.
"""
return self._transport.read(
length, timeouts.PolledTimeout.from_millis(timeout_ms)) | python | def read(self, length=0, timeout_ms=None):
"""Reads data from the remote end of this stream.
Internally, this data will have been contained in AdbMessages, but
users of streams shouldn't need to care about the transport mechanism.
Args:
length: If provided, the number of bytes to read, otherwise all available
data will be returned (at least one byte).
timeout_ms: Time to wait for a message to come in for this stream, in
milliseconds (or as a PolledTimeout object).
Returns:
Data that was read, or None if the end of the stream was reached.
Raises:
AdbProtocolError: Received an unexpected wonky non-stream packet (like a
CNXN ADB message).
AdbStreamClosedError: The stream is already closed.
AdbTimeoutError: Timed out waiting for a message.
"""
return self._transport.read(
length, timeouts.PolledTimeout.from_millis(timeout_ms)) | [
"def",
"read",
"(",
"self",
",",
"length",
"=",
"0",
",",
"timeout_ms",
"=",
"None",
")",
":",
"return",
"self",
".",
"_transport",
".",
"read",
"(",
"length",
",",
"timeouts",
".",
"PolledTimeout",
".",
"from_millis",
"(",
"timeout_ms",
")",
")"
] | Reads data from the remote end of this stream.
Internally, this data will have been contained in AdbMessages, but
users of streams shouldn't need to care about the transport mechanism.
Args:
length: If provided, the number of bytes to read, otherwise all available
data will be returned (at least one byte).
timeout_ms: Time to wait for a message to come in for this stream, in
milliseconds (or as a PolledTimeout object).
Returns:
Data that was read, or None if the end of the stream was reached.
Raises:
AdbProtocolError: Received an unexpected wonky non-stream packet (like a
CNXN ADB message).
AdbStreamClosedError: The stream is already closed.
AdbTimeoutError: Timed out waiting for a message. | [
"Reads",
"data",
"from",
"the",
"remote",
"end",
"of",
"this",
"stream",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L187-L209 | train | 221,794 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbStream.read_until_close | def read_until_close(self, timeout_ms=None):
"""Yield data until this stream is closed.
Args:
timeout_ms: Timeout in milliseconds to keep reading (or a PolledTimeout
object).
Yields:
Data read from a single call to self.read(), until the stream is closed
or timeout is reached.
Raises:
AdbTimeoutError: On timeout.
"""
while True:
try:
yield self.read(timeout_ms=timeout_ms)
except usb_exceptions.AdbStreamClosedError:
break | python | def read_until_close(self, timeout_ms=None):
"""Yield data until this stream is closed.
Args:
timeout_ms: Timeout in milliseconds to keep reading (or a PolledTimeout
object).
Yields:
Data read from a single call to self.read(), until the stream is closed
or timeout is reached.
Raises:
AdbTimeoutError: On timeout.
"""
while True:
try:
yield self.read(timeout_ms=timeout_ms)
except usb_exceptions.AdbStreamClosedError:
break | [
"def",
"read_until_close",
"(",
"self",
",",
"timeout_ms",
"=",
"None",
")",
":",
"while",
"True",
":",
"try",
":",
"yield",
"self",
".",
"read",
"(",
"timeout_ms",
"=",
"timeout_ms",
")",
"except",
"usb_exceptions",
".",
"AdbStreamClosedError",
":",
"break"... | Yield data until this stream is closed.
Args:
timeout_ms: Timeout in milliseconds to keep reading (or a PolledTimeout
object).
Yields:
Data read from a single call to self.read(), until the stream is closed
or timeout is reached.
Raises:
AdbTimeoutError: On timeout. | [
"Yield",
"data",
"until",
"this",
"stream",
"is",
"closed",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L211-L229 | train | 221,795 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbStreamTransport._set_or_check_remote_id | def _set_or_check_remote_id(self, remote_id):
"""Set or check the remote id."""
if not self.remote_id:
assert self.closed_state == self.ClosedState.PENDING, 'Bad ClosedState!'
self.remote_id = remote_id
self.closed_state = self.ClosedState.OPEN
elif self.remote_id != remote_id:
raise usb_exceptions.AdbProtocolError(
'%s remote-id change to %s', self, remote_id) | python | def _set_or_check_remote_id(self, remote_id):
"""Set or check the remote id."""
if not self.remote_id:
assert self.closed_state == self.ClosedState.PENDING, 'Bad ClosedState!'
self.remote_id = remote_id
self.closed_state = self.ClosedState.OPEN
elif self.remote_id != remote_id:
raise usb_exceptions.AdbProtocolError(
'%s remote-id change to %s', self, remote_id) | [
"def",
"_set_or_check_remote_id",
"(",
"self",
",",
"remote_id",
")",
":",
"if",
"not",
"self",
".",
"remote_id",
":",
"assert",
"self",
".",
"closed_state",
"==",
"self",
".",
"ClosedState",
".",
"PENDING",
",",
"'Bad ClosedState!'",
"self",
".",
"remote_id",... | Set or check the remote id. | [
"Set",
"or",
"check",
"the",
"remote",
"id",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L277-L285 | train | 221,796 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbStreamTransport._handle_message | def _handle_message(self, message, handle_wrte=True):
"""Handle a message that was read for this stream.
For each message type, this means:
OKAY: Check id's and make sure we are expecting an OKAY. Clear the
self._expecting_okay flag so any pending write()'s know.
CLSE: Set our internal state to closed.
WRTE: Add the data read to our internal read buffer. Note we don't
return the actual data because it may not be this thread that needs it.
Args:
message: Message that was read.
handle_wrte: If True, we can handle WRTE messages, otherwise raise.
Raises:
AdbProtocolError: If we get a WRTE message but handle_wrte is False.
"""
if message.command == 'OKAY':
self._set_or_check_remote_id(message.arg0)
if not self._expecting_okay:
raise usb_exceptions.AdbProtocolError(
'%s received unexpected OKAY: %s', self, message)
self._expecting_okay = False
elif message.command == 'CLSE':
self.closed_state = self.ClosedState.CLOSED
elif not handle_wrte:
raise usb_exceptions.AdbProtocolError(
'%s received WRTE before OKAY/CLSE: %s', self, message)
else:
with self._read_buffer_lock:
self._read_buffer.append(message.data)
self._buffer_size += len(message.data) | python | def _handle_message(self, message, handle_wrte=True):
"""Handle a message that was read for this stream.
For each message type, this means:
OKAY: Check id's and make sure we are expecting an OKAY. Clear the
self._expecting_okay flag so any pending write()'s know.
CLSE: Set our internal state to closed.
WRTE: Add the data read to our internal read buffer. Note we don't
return the actual data because it may not be this thread that needs it.
Args:
message: Message that was read.
handle_wrte: If True, we can handle WRTE messages, otherwise raise.
Raises:
AdbProtocolError: If we get a WRTE message but handle_wrte is False.
"""
if message.command == 'OKAY':
self._set_or_check_remote_id(message.arg0)
if not self._expecting_okay:
raise usb_exceptions.AdbProtocolError(
'%s received unexpected OKAY: %s', self, message)
self._expecting_okay = False
elif message.command == 'CLSE':
self.closed_state = self.ClosedState.CLOSED
elif not handle_wrte:
raise usb_exceptions.AdbProtocolError(
'%s received WRTE before OKAY/CLSE: %s', self, message)
else:
with self._read_buffer_lock:
self._read_buffer.append(message.data)
self._buffer_size += len(message.data) | [
"def",
"_handle_message",
"(",
"self",
",",
"message",
",",
"handle_wrte",
"=",
"True",
")",
":",
"if",
"message",
".",
"command",
"==",
"'OKAY'",
":",
"self",
".",
"_set_or_check_remote_id",
"(",
"message",
".",
"arg0",
")",
"if",
"not",
"self",
".",
"_... | Handle a message that was read for this stream.
For each message type, this means:
OKAY: Check id's and make sure we are expecting an OKAY. Clear the
self._expecting_okay flag so any pending write()'s know.
CLSE: Set our internal state to closed.
WRTE: Add the data read to our internal read buffer. Note we don't
return the actual data because it may not be this thread that needs it.
Args:
message: Message that was read.
handle_wrte: If True, we can handle WRTE messages, otherwise raise.
Raises:
AdbProtocolError: If we get a WRTE message but handle_wrte is False. | [
"Handle",
"a",
"message",
"that",
"was",
"read",
"for",
"this",
"stream",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L314-L345 | train | 221,797 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbStreamTransport._read_messages_until_true | def _read_messages_until_true(self, predicate, timeout):
"""Read a message from this stream and handle it.
This method tries to read a message from this stream, blocking until a
message is read. Once read, it will handle it accordingly by calling
self._handle_message().
This is repeated as long as predicate() returns False. There is some
locking used internally here so that we don't end up with multiple threads
blocked on a call to read_for_stream when another thread has read the
message that caused predicate() to become True.
Args:
predicate: Callable, keep reading messages until it returns true. Note
that predicate() should not block, as doing so may cause this method to
hang beyond its timeout.
timeout: Timeout to use for this call.
Raises:
AdbStreamClosedError: If this stream is already closed.
"""
while not predicate():
# Hold the message_received Lock while we try to acquire the reader_lock
# and waiting on the message_received condition, to prevent another reader
# thread from notifying the condition between us failing to acquire the
# reader_lock and waiting on the condition.
self._message_received.acquire()
if self._reader_lock.acquire(False):
try:
# Release the message_received Lock while we do the read so other
# threads can wait() on the condition without having to block on
# acquiring the message_received Lock (we may have a longer timeout
# than them, so that would be bad).
self._message_received.release()
# We are now the thread responsible for reading a message. Check
# predicate() to make sure nobody else read a message between our last
# check and acquiring the reader Lock.
if predicate():
return
# Read and handle a message, using our timeout.
self._handle_message(
self.adb_connection.read_for_stream(self, timeout))
# Notify anyone interested that we handled a message, causing them to
# check their predicate again.
with self._message_received:
self._message_received.notify_all()
finally:
self._reader_lock.release()
else:
# There is some other thread reading a message. Since we are already
# holding the message_received Lock, we can immediately do the wait.
try:
self._message_received.wait(timeout.remaining)
if timeout.has_expired():
raise usb_exceptions.AdbTimeoutError(
'%s timed out reading messages.', self)
finally:
# Make sure we release this even if an exception occurred.
self._message_received.release() | python | def _read_messages_until_true(self, predicate, timeout):
"""Read a message from this stream and handle it.
This method tries to read a message from this stream, blocking until a
message is read. Once read, it will handle it accordingly by calling
self._handle_message().
This is repeated as long as predicate() returns False. There is some
locking used internally here so that we don't end up with multiple threads
blocked on a call to read_for_stream when another thread has read the
message that caused predicate() to become True.
Args:
predicate: Callable, keep reading messages until it returns true. Note
that predicate() should not block, as doing so may cause this method to
hang beyond its timeout.
timeout: Timeout to use for this call.
Raises:
AdbStreamClosedError: If this stream is already closed.
"""
while not predicate():
# Hold the message_received Lock while we try to acquire the reader_lock
# and waiting on the message_received condition, to prevent another reader
# thread from notifying the condition between us failing to acquire the
# reader_lock and waiting on the condition.
self._message_received.acquire()
if self._reader_lock.acquire(False):
try:
# Release the message_received Lock while we do the read so other
# threads can wait() on the condition without having to block on
# acquiring the message_received Lock (we may have a longer timeout
# than them, so that would be bad).
self._message_received.release()
# We are now the thread responsible for reading a message. Check
# predicate() to make sure nobody else read a message between our last
# check and acquiring the reader Lock.
if predicate():
return
# Read and handle a message, using our timeout.
self._handle_message(
self.adb_connection.read_for_stream(self, timeout))
# Notify anyone interested that we handled a message, causing them to
# check their predicate again.
with self._message_received:
self._message_received.notify_all()
finally:
self._reader_lock.release()
else:
# There is some other thread reading a message. Since we are already
# holding the message_received Lock, we can immediately do the wait.
try:
self._message_received.wait(timeout.remaining)
if timeout.has_expired():
raise usb_exceptions.AdbTimeoutError(
'%s timed out reading messages.', self)
finally:
# Make sure we release this even if an exception occurred.
self._message_received.release() | [
"def",
"_read_messages_until_true",
"(",
"self",
",",
"predicate",
",",
"timeout",
")",
":",
"while",
"not",
"predicate",
"(",
")",
":",
"# Hold the message_received Lock while we try to acquire the reader_lock",
"# and waiting on the message_received condition, to prevent another ... | Read a message from this stream and handle it.
This method tries to read a message from this stream, blocking until a
message is read. Once read, it will handle it accordingly by calling
self._handle_message().
This is repeated as long as predicate() returns False. There is some
locking used internally here so that we don't end up with multiple threads
blocked on a call to read_for_stream when another thread has read the
message that caused predicate() to become True.
Args:
predicate: Callable, keep reading messages until it returns true. Note
that predicate() should not block, as doing so may cause this method to
hang beyond its timeout.
timeout: Timeout to use for this call.
Raises:
AdbStreamClosedError: If this stream is already closed. | [
"Read",
"a",
"message",
"from",
"this",
"stream",
"and",
"handle",
"it",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L347-L408 | train | 221,798 |
google/openhtf | openhtf/plugs/usb/adb_protocol.py | AdbStreamTransport.ensure_opened | def ensure_opened(self, timeout):
"""Ensure this stream transport was successfully opened.
Checks to make sure we receive our initial OKAY message. This must be
called after creating this AdbStreamTransport and before calling read() or
write().
Args:
timeout: timeouts.PolledTimeout to use for this operation.
Returns:
True if this stream was successfully opened, False if the service was
not recognized by the remote endpoint. If False is returned, then this
AdbStreamTransport will be already closed.
Raises:
AdbProtocolError: If we receive a WRTE message instead of OKAY/CLSE.
"""
self._handle_message(self.adb_connection.read_for_stream(self, timeout),
handle_wrte=False)
return self.is_open() | python | def ensure_opened(self, timeout):
"""Ensure this stream transport was successfully opened.
Checks to make sure we receive our initial OKAY message. This must be
called after creating this AdbStreamTransport and before calling read() or
write().
Args:
timeout: timeouts.PolledTimeout to use for this operation.
Returns:
True if this stream was successfully opened, False if the service was
not recognized by the remote endpoint. If False is returned, then this
AdbStreamTransport will be already closed.
Raises:
AdbProtocolError: If we receive a WRTE message instead of OKAY/CLSE.
"""
self._handle_message(self.adb_connection.read_for_stream(self, timeout),
handle_wrte=False)
return self.is_open() | [
"def",
"ensure_opened",
"(",
"self",
",",
"timeout",
")",
":",
"self",
".",
"_handle_message",
"(",
"self",
".",
"adb_connection",
".",
"read_for_stream",
"(",
"self",
",",
"timeout",
")",
",",
"handle_wrte",
"=",
"False",
")",
"return",
"self",
".",
"is_o... | Ensure this stream transport was successfully opened.
Checks to make sure we receive our initial OKAY message. This must be
called after creating this AdbStreamTransport and before calling read() or
write().
Args:
timeout: timeouts.PolledTimeout to use for this operation.
Returns:
True if this stream was successfully opened, False if the service was
not recognized by the remote endpoint. If False is returned, then this
AdbStreamTransport will be already closed.
Raises:
AdbProtocolError: If we receive a WRTE message instead of OKAY/CLSE. | [
"Ensure",
"this",
"stream",
"transport",
"was",
"successfully",
"opened",
"."
] | 655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09 | https://github.com/google/openhtf/blob/655e85df7134db7bdf8f8fdd6ff9a6bf932e7b09/openhtf/plugs/usb/adb_protocol.py#L410-L430 | train | 221,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.