repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
adamreeve/npTDMS | nptdms/tdms.py | fromfile | def fromfile(file, dtype, count, *args, **kwargs):
"""Wrapper around np.fromfile to support any file-like object"""
try:
return np.fromfile(file, dtype=dtype, count=count, *args, **kwargs)
except (TypeError, IOError, UnsupportedOperation):
return np.frombuffer(
file.read(count * np.dtype(dtype).itemsize),
dtype=dtype, count=count, *args, **kwargs) | python | def fromfile(file, dtype, count, *args, **kwargs):
"""Wrapper around np.fromfile to support any file-like object"""
try:
return np.fromfile(file, dtype=dtype, count=count, *args, **kwargs)
except (TypeError, IOError, UnsupportedOperation):
return np.frombuffer(
file.read(count * np.dtype(dtype).itemsize),
dtype=dtype, count=count, *args, **kwargs) | [
"def",
"fromfile",
"(",
"file",
",",
"dtype",
",",
"count",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"np",
".",
"fromfile",
"(",
"file",
",",
"dtype",
"=",
"dtype",
",",
"count",
"=",
"count",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"(",
"TypeError",
",",
"IOError",
",",
"UnsupportedOperation",
")",
":",
"return",
"np",
".",
"frombuffer",
"(",
"file",
".",
"read",
"(",
"count",
"*",
"np",
".",
"dtype",
"(",
"dtype",
")",
".",
"itemsize",
")",
",",
"dtype",
"=",
"dtype",
",",
"count",
"=",
"count",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")"
] | Wrapper around np.fromfile to support any file-like object | [
"Wrapper",
"around",
"np",
".",
"fromfile",
"to",
"support",
"any",
"file",
"-",
"like",
"object"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L43-L51 | train | 238,400 |
adamreeve/npTDMS | nptdms/tdms.py | read_property | def read_property(f, endianness="<"):
""" Read a property from a segment's metadata """
prop_name = types.String.read(f, endianness)
prop_data_type = types.tds_data_types[types.Uint32.read(f, endianness)]
value = prop_data_type.read(f, endianness)
log.debug("Property %s: %r", prop_name, value)
return prop_name, value | python | def read_property(f, endianness="<"):
""" Read a property from a segment's metadata """
prop_name = types.String.read(f, endianness)
prop_data_type = types.tds_data_types[types.Uint32.read(f, endianness)]
value = prop_data_type.read(f, endianness)
log.debug("Property %s: %r", prop_name, value)
return prop_name, value | [
"def",
"read_property",
"(",
"f",
",",
"endianness",
"=",
"\"<\"",
")",
":",
"prop_name",
"=",
"types",
".",
"String",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"prop_data_type",
"=",
"types",
".",
"tds_data_types",
"[",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"]",
"value",
"=",
"prop_data_type",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"log",
".",
"debug",
"(",
"\"Property %s: %r\"",
",",
"prop_name",
",",
"value",
")",
"return",
"prop_name",
",",
"value"
] | Read a property from a segment's metadata | [
"Read",
"a",
"property",
"from",
"a",
"segment",
"s",
"metadata"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L54-L61 | train | 238,401 |
adamreeve/npTDMS | nptdms/tdms.py | read_string_data | def read_string_data(file, number_values, endianness):
""" Read string raw data
This is stored as an array of offsets
followed by the contiguous string data.
"""
offsets = [0]
for i in range(number_values):
offsets.append(types.Uint32.read(file, endianness))
strings = []
for i in range(number_values):
s = file.read(offsets[i + 1] - offsets[i])
strings.append(s.decode('utf-8'))
return strings | python | def read_string_data(file, number_values, endianness):
""" Read string raw data
This is stored as an array of offsets
followed by the contiguous string data.
"""
offsets = [0]
for i in range(number_values):
offsets.append(types.Uint32.read(file, endianness))
strings = []
for i in range(number_values):
s = file.read(offsets[i + 1] - offsets[i])
strings.append(s.decode('utf-8'))
return strings | [
"def",
"read_string_data",
"(",
"file",
",",
"number_values",
",",
"endianness",
")",
":",
"offsets",
"=",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"number_values",
")",
":",
"offsets",
".",
"append",
"(",
"types",
".",
"Uint32",
".",
"read",
"(",
"file",
",",
"endianness",
")",
")",
"strings",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"number_values",
")",
":",
"s",
"=",
"file",
".",
"read",
"(",
"offsets",
"[",
"i",
"+",
"1",
"]",
"-",
"offsets",
"[",
"i",
"]",
")",
"strings",
".",
"append",
"(",
"s",
".",
"decode",
"(",
"'utf-8'",
")",
")",
"return",
"strings"
] | Read string raw data
This is stored as an array of offsets
followed by the contiguous string data. | [
"Read",
"string",
"raw",
"data"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L1049-L1062 | train | 238,402 |
adamreeve/npTDMS | nptdms/tdms.py | path_components | def path_components(path):
"""Convert a path into group and channel name components"""
def yield_components(path):
# Iterate over each character and the next character
chars = zip_longest(path, path[1:])
try:
# Iterate over components
while True:
c, n = next(chars)
if c != '/':
raise ValueError("Invalid path, expected \"/\"")
elif (n is not None and n != "'"):
raise ValueError("Invalid path, expected \"'\"")
else:
# Consume "'" or raise StopIteration if at the end
next(chars)
component = []
# Iterate over characters in component name
while True:
c, n = next(chars)
if c == "'" and n == "'":
component += "'"
# Consume second "'"
next(chars)
elif c == "'":
yield "".join(component)
break
else:
component += c
except StopIteration:
return
return list(yield_components(path)) | python | def path_components(path):
"""Convert a path into group and channel name components"""
def yield_components(path):
# Iterate over each character and the next character
chars = zip_longest(path, path[1:])
try:
# Iterate over components
while True:
c, n = next(chars)
if c != '/':
raise ValueError("Invalid path, expected \"/\"")
elif (n is not None and n != "'"):
raise ValueError("Invalid path, expected \"'\"")
else:
# Consume "'" or raise StopIteration if at the end
next(chars)
component = []
# Iterate over characters in component name
while True:
c, n = next(chars)
if c == "'" and n == "'":
component += "'"
# Consume second "'"
next(chars)
elif c == "'":
yield "".join(component)
break
else:
component += c
except StopIteration:
return
return list(yield_components(path)) | [
"def",
"path_components",
"(",
"path",
")",
":",
"def",
"yield_components",
"(",
"path",
")",
":",
"# Iterate over each character and the next character",
"chars",
"=",
"zip_longest",
"(",
"path",
",",
"path",
"[",
"1",
":",
"]",
")",
"try",
":",
"# Iterate over components",
"while",
"True",
":",
"c",
",",
"n",
"=",
"next",
"(",
"chars",
")",
"if",
"c",
"!=",
"'/'",
":",
"raise",
"ValueError",
"(",
"\"Invalid path, expected \\\"/\\\"\"",
")",
"elif",
"(",
"n",
"is",
"not",
"None",
"and",
"n",
"!=",
"\"'\"",
")",
":",
"raise",
"ValueError",
"(",
"\"Invalid path, expected \\\"'\\\"\"",
")",
"else",
":",
"# Consume \"'\" or raise StopIteration if at the end",
"next",
"(",
"chars",
")",
"component",
"=",
"[",
"]",
"# Iterate over characters in component name",
"while",
"True",
":",
"c",
",",
"n",
"=",
"next",
"(",
"chars",
")",
"if",
"c",
"==",
"\"'\"",
"and",
"n",
"==",
"\"'\"",
":",
"component",
"+=",
"\"'\"",
"# Consume second \"'\"",
"next",
"(",
"chars",
")",
"elif",
"c",
"==",
"\"'\"",
":",
"yield",
"\"\"",
".",
"join",
"(",
"component",
")",
"break",
"else",
":",
"component",
"+=",
"c",
"except",
"StopIteration",
":",
"return",
"return",
"list",
"(",
"yield_components",
"(",
"path",
")",
")"
] | Convert a path into group and channel name components | [
"Convert",
"a",
"path",
"into",
"group",
"and",
"channel",
"name",
"components"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L1065-L1098 | train | 238,403 |
adamreeve/npTDMS | nptdms/tdms.py | TdmsFile.object | def object(self, *path):
"""Get a TDMS object from the file
:param path: The object group and channel. Providing no channel
returns a group object, and providing no channel or group
will return the root object.
:rtype: :class:`TdmsObject`
For example, to get the root object::
object()
To get a group::
object("group_name")
To get a channel::
object("group_name", "channel_name")
"""
object_path = self._path(*path)
try:
return self.objects[object_path]
except KeyError:
raise KeyError("Invalid object path: %s" % object_path) | python | def object(self, *path):
"""Get a TDMS object from the file
:param path: The object group and channel. Providing no channel
returns a group object, and providing no channel or group
will return the root object.
:rtype: :class:`TdmsObject`
For example, to get the root object::
object()
To get a group::
object("group_name")
To get a channel::
object("group_name", "channel_name")
"""
object_path = self._path(*path)
try:
return self.objects[object_path]
except KeyError:
raise KeyError("Invalid object path: %s" % object_path) | [
"def",
"object",
"(",
"self",
",",
"*",
"path",
")",
":",
"object_path",
"=",
"self",
".",
"_path",
"(",
"*",
"path",
")",
"try",
":",
"return",
"self",
".",
"objects",
"[",
"object_path",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Invalid object path: %s\"",
"%",
"object_path",
")"
] | Get a TDMS object from the file
:param path: The object group and channel. Providing no channel
returns a group object, and providing no channel or group
will return the root object.
:rtype: :class:`TdmsObject`
For example, to get the root object::
object()
To get a group::
object("group_name")
To get a channel::
object("group_name", "channel_name") | [
"Get",
"a",
"TDMS",
"object",
"from",
"the",
"file"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L132-L157 | train | 238,404 |
adamreeve/npTDMS | nptdms/tdms.py | TdmsFile.groups | def groups(self):
"""Return the names of groups in the file
Note that there is not necessarily a TDMS object associated with
each group name.
:rtype: List of strings.
"""
# Split paths into components and take the first (group) component.
object_paths = (
path_components(path)
for path in self.objects)
group_names = (path[0] for path in object_paths if len(path) > 0)
# Use an ordered dict as an ordered set to find unique
# groups in order.
groups_set = OrderedDict()
for group in group_names:
groups_set[group] = None
return list(groups_set) | python | def groups(self):
"""Return the names of groups in the file
Note that there is not necessarily a TDMS object associated with
each group name.
:rtype: List of strings.
"""
# Split paths into components and take the first (group) component.
object_paths = (
path_components(path)
for path in self.objects)
group_names = (path[0] for path in object_paths if len(path) > 0)
# Use an ordered dict as an ordered set to find unique
# groups in order.
groups_set = OrderedDict()
for group in group_names:
groups_set[group] = None
return list(groups_set) | [
"def",
"groups",
"(",
"self",
")",
":",
"# Split paths into components and take the first (group) component.",
"object_paths",
"=",
"(",
"path_components",
"(",
"path",
")",
"for",
"path",
"in",
"self",
".",
"objects",
")",
"group_names",
"=",
"(",
"path",
"[",
"0",
"]",
"for",
"path",
"in",
"object_paths",
"if",
"len",
"(",
"path",
")",
">",
"0",
")",
"# Use an ordered dict as an ordered set to find unique",
"# groups in order.",
"groups_set",
"=",
"OrderedDict",
"(",
")",
"for",
"group",
"in",
"group_names",
":",
"groups_set",
"[",
"group",
"]",
"=",
"None",
"return",
"list",
"(",
"groups_set",
")"
] | Return the names of groups in the file
Note that there is not necessarily a TDMS object associated with
each group name.
:rtype: List of strings. | [
"Return",
"the",
"names",
"of",
"groups",
"in",
"the",
"file"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L159-L180 | train | 238,405 |
adamreeve/npTDMS | nptdms/tdms.py | TdmsFile.group_channels | def group_channels(self, group):
"""Returns a list of channel objects for the given group
:param group: Name of the group to get channels for.
:rtype: List of :class:`TdmsObject` objects.
"""
path = self._path(group)
return [
self.objects[p]
for p in self.objects
if p.startswith(path + '/')] | python | def group_channels(self, group):
"""Returns a list of channel objects for the given group
:param group: Name of the group to get channels for.
:rtype: List of :class:`TdmsObject` objects.
"""
path = self._path(group)
return [
self.objects[p]
for p in self.objects
if p.startswith(path + '/')] | [
"def",
"group_channels",
"(",
"self",
",",
"group",
")",
":",
"path",
"=",
"self",
".",
"_path",
"(",
"group",
")",
"return",
"[",
"self",
".",
"objects",
"[",
"p",
"]",
"for",
"p",
"in",
"self",
".",
"objects",
"if",
"p",
".",
"startswith",
"(",
"path",
"+",
"'/'",
")",
"]"
] | Returns a list of channel objects for the given group
:param group: Name of the group to get channels for.
:rtype: List of :class:`TdmsObject` objects. | [
"Returns",
"a",
"list",
"of",
"channel",
"objects",
"for",
"the",
"given",
"group"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L182-L194 | train | 238,406 |
adamreeve/npTDMS | nptdms/tdms.py | TdmsFile.as_dataframe | def as_dataframe(self, time_index=False, absolute_time=False):
"""
Converts the TDMS file to a DataFrame
:param time_index: Whether to include a time index for the dataframe.
:param absolute_time: If time_index is true, whether the time index
values are absolute times or relative to the start time.
:return: The full TDMS file data.
:rtype: pandas.DataFrame
"""
import pandas as pd
dataframe_dict = OrderedDict()
for key, value in self.objects.items():
if value.has_data:
index = value.time_track(absolute_time) if time_index else None
dataframe_dict[key] = pd.Series(data=value.data, index=index)
return pd.DataFrame.from_dict(dataframe_dict) | python | def as_dataframe(self, time_index=False, absolute_time=False):
"""
Converts the TDMS file to a DataFrame
:param time_index: Whether to include a time index for the dataframe.
:param absolute_time: If time_index is true, whether the time index
values are absolute times or relative to the start time.
:return: The full TDMS file data.
:rtype: pandas.DataFrame
"""
import pandas as pd
dataframe_dict = OrderedDict()
for key, value in self.objects.items():
if value.has_data:
index = value.time_track(absolute_time) if time_index else None
dataframe_dict[key] = pd.Series(data=value.data, index=index)
return pd.DataFrame.from_dict(dataframe_dict) | [
"def",
"as_dataframe",
"(",
"self",
",",
"time_index",
"=",
"False",
",",
"absolute_time",
"=",
"False",
")",
":",
"import",
"pandas",
"as",
"pd",
"dataframe_dict",
"=",
"OrderedDict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"self",
".",
"objects",
".",
"items",
"(",
")",
":",
"if",
"value",
".",
"has_data",
":",
"index",
"=",
"value",
".",
"time_track",
"(",
"absolute_time",
")",
"if",
"time_index",
"else",
"None",
"dataframe_dict",
"[",
"key",
"]",
"=",
"pd",
".",
"Series",
"(",
"data",
"=",
"value",
".",
"data",
",",
"index",
"=",
"index",
")",
"return",
"pd",
".",
"DataFrame",
".",
"from_dict",
"(",
"dataframe_dict",
")"
] | Converts the TDMS file to a DataFrame
:param time_index: Whether to include a time index for the dataframe.
:param absolute_time: If time_index is true, whether the time index
values are absolute times or relative to the start time.
:return: The full TDMS file data.
:rtype: pandas.DataFrame | [
"Converts",
"the",
"TDMS",
"file",
"to",
"a",
"DataFrame"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L208-L226 | train | 238,407 |
adamreeve/npTDMS | nptdms/tdms.py | TdmsFile.as_hdf | def as_hdf(self, filepath, mode='w', group='/'):
"""
Converts the TDMS file into an HDF5 file
:param filepath: The path of the HDF5 file you want to write to.
:param mode: The write mode of the HDF5 file. This can be w, a ...
:param group: A group in the HDF5 file that will contain the TDMS data.
"""
import h5py
# Groups in TDMS are mapped to the first level of the HDF5 hierarchy
# Channels in TDMS are then mapped to the second level of the HDF5
# hierarchy, under the appropriate groups.
# Properties in TDMS are mapped to attributes in HDF5.
# These all exist under the appropriate, channel group etc.
h5file = h5py.File(filepath, mode)
container_group = None
if group in h5file:
container_group = h5file[group]
else:
container_group = h5file.create_group(group)
# First write the properties at the root level
try:
root = self.object()
for property_name, property_value in root.properties.items():
container_group.attrs[property_name] = property_value
except KeyError:
# No root object present
pass
# Now iterate through groups and channels,
# writing the properties and data
for group_name in self.groups():
try:
group = self.object(group_name)
# Write the group's properties
for prop_name, prop_value in group.properties.items():
container_group[group_name].attrs[prop_name] = prop_value
except KeyError:
# No group object present
pass
# Write properties and data for each channel
for channel in self.group_channels(group_name):
for prop_name, prop_value in channel.properties.items():
container_group.attrs[prop_name] = prop_value
container_group[group_name+'/'+channel.channel] = channel.data
return h5file | python | def as_hdf(self, filepath, mode='w', group='/'):
"""
Converts the TDMS file into an HDF5 file
:param filepath: The path of the HDF5 file you want to write to.
:param mode: The write mode of the HDF5 file. This can be w, a ...
:param group: A group in the HDF5 file that will contain the TDMS data.
"""
import h5py
# Groups in TDMS are mapped to the first level of the HDF5 hierarchy
# Channels in TDMS are then mapped to the second level of the HDF5
# hierarchy, under the appropriate groups.
# Properties in TDMS are mapped to attributes in HDF5.
# These all exist under the appropriate, channel group etc.
h5file = h5py.File(filepath, mode)
container_group = None
if group in h5file:
container_group = h5file[group]
else:
container_group = h5file.create_group(group)
# First write the properties at the root level
try:
root = self.object()
for property_name, property_value in root.properties.items():
container_group.attrs[property_name] = property_value
except KeyError:
# No root object present
pass
# Now iterate through groups and channels,
# writing the properties and data
for group_name in self.groups():
try:
group = self.object(group_name)
# Write the group's properties
for prop_name, prop_value in group.properties.items():
container_group[group_name].attrs[prop_name] = prop_value
except KeyError:
# No group object present
pass
# Write properties and data for each channel
for channel in self.group_channels(group_name):
for prop_name, prop_value in channel.properties.items():
container_group.attrs[prop_name] = prop_value
container_group[group_name+'/'+channel.channel] = channel.data
return h5file | [
"def",
"as_hdf",
"(",
"self",
",",
"filepath",
",",
"mode",
"=",
"'w'",
",",
"group",
"=",
"'/'",
")",
":",
"import",
"h5py",
"# Groups in TDMS are mapped to the first level of the HDF5 hierarchy",
"# Channels in TDMS are then mapped to the second level of the HDF5",
"# hierarchy, under the appropriate groups.",
"# Properties in TDMS are mapped to attributes in HDF5.",
"# These all exist under the appropriate, channel group etc.",
"h5file",
"=",
"h5py",
".",
"File",
"(",
"filepath",
",",
"mode",
")",
"container_group",
"=",
"None",
"if",
"group",
"in",
"h5file",
":",
"container_group",
"=",
"h5file",
"[",
"group",
"]",
"else",
":",
"container_group",
"=",
"h5file",
".",
"create_group",
"(",
"group",
")",
"# First write the properties at the root level",
"try",
":",
"root",
"=",
"self",
".",
"object",
"(",
")",
"for",
"property_name",
",",
"property_value",
"in",
"root",
".",
"properties",
".",
"items",
"(",
")",
":",
"container_group",
".",
"attrs",
"[",
"property_name",
"]",
"=",
"property_value",
"except",
"KeyError",
":",
"# No root object present",
"pass",
"# Now iterate through groups and channels,",
"# writing the properties and data",
"for",
"group_name",
"in",
"self",
".",
"groups",
"(",
")",
":",
"try",
":",
"group",
"=",
"self",
".",
"object",
"(",
"group_name",
")",
"# Write the group's properties",
"for",
"prop_name",
",",
"prop_value",
"in",
"group",
".",
"properties",
".",
"items",
"(",
")",
":",
"container_group",
"[",
"group_name",
"]",
".",
"attrs",
"[",
"prop_name",
"]",
"=",
"prop_value",
"except",
"KeyError",
":",
"# No group object present",
"pass",
"# Write properties and data for each channel",
"for",
"channel",
"in",
"self",
".",
"group_channels",
"(",
"group_name",
")",
":",
"for",
"prop_name",
",",
"prop_value",
"in",
"channel",
".",
"properties",
".",
"items",
"(",
")",
":",
"container_group",
".",
"attrs",
"[",
"prop_name",
"]",
"=",
"prop_value",
"container_group",
"[",
"group_name",
"+",
"'/'",
"+",
"channel",
".",
"channel",
"]",
"=",
"channel",
".",
"data",
"return",
"h5file"
] | Converts the TDMS file into an HDF5 file
:param filepath: The path of the HDF5 file you want to write to.
:param mode: The write mode of the HDF5 file. This can be w, a ...
:param group: A group in the HDF5 file that will contain the TDMS data. | [
"Converts",
"the",
"TDMS",
"file",
"into",
"an",
"HDF5",
"file"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L228-L284 | train | 238,408 |
adamreeve/npTDMS | nptdms/tdms.py | _TdmsSegment.read_metadata | def read_metadata(self, f, objects, previous_segment=None):
"""Read segment metadata section and update object information"""
if not self.toc["kTocMetaData"]:
try:
self.ordered_objects = previous_segment.ordered_objects
except AttributeError:
raise ValueError(
"kTocMetaData is not set for segment but "
"there is no previous segment")
self.calculate_chunks()
return
if not self.toc["kTocNewObjList"]:
# In this case, there can be a list of new objects that
# are appended, or previous objects can also be repeated
# if their properties change
self.ordered_objects = [
copy(o) for o in previous_segment.ordered_objects]
log.debug("Reading metadata at %d", f.tell())
# First four bytes have number of objects in metadata
num_objects = types.Int32.read(f, self.endianness)
for obj in range(num_objects):
# Read the object path
object_path = types.String.read(f, self.endianness)
# If this is a new segment for an existing object,
# reuse the existing object, otherwise,
# create a new object and add it to the object dictionary
if object_path in objects:
obj = objects[object_path]
else:
obj = TdmsObject(object_path, self.tdms_file)
objects[object_path] = obj
# Add this segment object to the list of segment objects,
# re-using any properties from previous segments.
updating_existing = False
if not self.toc["kTocNewObjList"]:
# Search for the same object from the previous segment
# object list.
obj_index = [
i for i, o in enumerate(self.ordered_objects)
if o.tdms_object is obj]
if len(obj_index) > 0:
updating_existing = True
log.debug("Updating object in segment list")
obj_index = obj_index[0]
segment_obj = self.ordered_objects[obj_index]
if not updating_existing:
if obj._previous_segment_object is not None:
log.debug("Copying previous segment object")
segment_obj = copy(obj._previous_segment_object)
else:
log.debug("Creating a new segment object")
segment_obj = _TdmsSegmentObject(obj, self.endianness)
self.ordered_objects.append(segment_obj)
# Read the metadata for this object, updating any
# data structure information and properties.
segment_obj._read_metadata(f)
obj._previous_segment_object = segment_obj
self.calculate_chunks() | python | def read_metadata(self, f, objects, previous_segment=None):
"""Read segment metadata section and update object information"""
if not self.toc["kTocMetaData"]:
try:
self.ordered_objects = previous_segment.ordered_objects
except AttributeError:
raise ValueError(
"kTocMetaData is not set for segment but "
"there is no previous segment")
self.calculate_chunks()
return
if not self.toc["kTocNewObjList"]:
# In this case, there can be a list of new objects that
# are appended, or previous objects can also be repeated
# if their properties change
self.ordered_objects = [
copy(o) for o in previous_segment.ordered_objects]
log.debug("Reading metadata at %d", f.tell())
# First four bytes have number of objects in metadata
num_objects = types.Int32.read(f, self.endianness)
for obj in range(num_objects):
# Read the object path
object_path = types.String.read(f, self.endianness)
# If this is a new segment for an existing object,
# reuse the existing object, otherwise,
# create a new object and add it to the object dictionary
if object_path in objects:
obj = objects[object_path]
else:
obj = TdmsObject(object_path, self.tdms_file)
objects[object_path] = obj
# Add this segment object to the list of segment objects,
# re-using any properties from previous segments.
updating_existing = False
if not self.toc["kTocNewObjList"]:
# Search for the same object from the previous segment
# object list.
obj_index = [
i for i, o in enumerate(self.ordered_objects)
if o.tdms_object is obj]
if len(obj_index) > 0:
updating_existing = True
log.debug("Updating object in segment list")
obj_index = obj_index[0]
segment_obj = self.ordered_objects[obj_index]
if not updating_existing:
if obj._previous_segment_object is not None:
log.debug("Copying previous segment object")
segment_obj = copy(obj._previous_segment_object)
else:
log.debug("Creating a new segment object")
segment_obj = _TdmsSegmentObject(obj, self.endianness)
self.ordered_objects.append(segment_obj)
# Read the metadata for this object, updating any
# data structure information and properties.
segment_obj._read_metadata(f)
obj._previous_segment_object = segment_obj
self.calculate_chunks() | [
"def",
"read_metadata",
"(",
"self",
",",
"f",
",",
"objects",
",",
"previous_segment",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"toc",
"[",
"\"kTocMetaData\"",
"]",
":",
"try",
":",
"self",
".",
"ordered_objects",
"=",
"previous_segment",
".",
"ordered_objects",
"except",
"AttributeError",
":",
"raise",
"ValueError",
"(",
"\"kTocMetaData is not set for segment but \"",
"\"there is no previous segment\"",
")",
"self",
".",
"calculate_chunks",
"(",
")",
"return",
"if",
"not",
"self",
".",
"toc",
"[",
"\"kTocNewObjList\"",
"]",
":",
"# In this case, there can be a list of new objects that",
"# are appended, or previous objects can also be repeated",
"# if their properties change",
"self",
".",
"ordered_objects",
"=",
"[",
"copy",
"(",
"o",
")",
"for",
"o",
"in",
"previous_segment",
".",
"ordered_objects",
"]",
"log",
".",
"debug",
"(",
"\"Reading metadata at %d\"",
",",
"f",
".",
"tell",
"(",
")",
")",
"# First four bytes have number of objects in metadata",
"num_objects",
"=",
"types",
".",
"Int32",
".",
"read",
"(",
"f",
",",
"self",
".",
"endianness",
")",
"for",
"obj",
"in",
"range",
"(",
"num_objects",
")",
":",
"# Read the object path",
"object_path",
"=",
"types",
".",
"String",
".",
"read",
"(",
"f",
",",
"self",
".",
"endianness",
")",
"# If this is a new segment for an existing object,",
"# reuse the existing object, otherwise,",
"# create a new object and add it to the object dictionary",
"if",
"object_path",
"in",
"objects",
":",
"obj",
"=",
"objects",
"[",
"object_path",
"]",
"else",
":",
"obj",
"=",
"TdmsObject",
"(",
"object_path",
",",
"self",
".",
"tdms_file",
")",
"objects",
"[",
"object_path",
"]",
"=",
"obj",
"# Add this segment object to the list of segment objects,",
"# re-using any properties from previous segments.",
"updating_existing",
"=",
"False",
"if",
"not",
"self",
".",
"toc",
"[",
"\"kTocNewObjList\"",
"]",
":",
"# Search for the same object from the previous segment",
"# object list.",
"obj_index",
"=",
"[",
"i",
"for",
"i",
",",
"o",
"in",
"enumerate",
"(",
"self",
".",
"ordered_objects",
")",
"if",
"o",
".",
"tdms_object",
"is",
"obj",
"]",
"if",
"len",
"(",
"obj_index",
")",
">",
"0",
":",
"updating_existing",
"=",
"True",
"log",
".",
"debug",
"(",
"\"Updating object in segment list\"",
")",
"obj_index",
"=",
"obj_index",
"[",
"0",
"]",
"segment_obj",
"=",
"self",
".",
"ordered_objects",
"[",
"obj_index",
"]",
"if",
"not",
"updating_existing",
":",
"if",
"obj",
".",
"_previous_segment_object",
"is",
"not",
"None",
":",
"log",
".",
"debug",
"(",
"\"Copying previous segment object\"",
")",
"segment_obj",
"=",
"copy",
"(",
"obj",
".",
"_previous_segment_object",
")",
"else",
":",
"log",
".",
"debug",
"(",
"\"Creating a new segment object\"",
")",
"segment_obj",
"=",
"_TdmsSegmentObject",
"(",
"obj",
",",
"self",
".",
"endianness",
")",
"self",
".",
"ordered_objects",
".",
"append",
"(",
"segment_obj",
")",
"# Read the metadata for this object, updating any",
"# data structure information and properties.",
"segment_obj",
".",
"_read_metadata",
"(",
"f",
")",
"obj",
".",
"_previous_segment_object",
"=",
"segment_obj",
"self",
".",
"calculate_chunks",
"(",
")"
] | Read segment metadata section and update object information | [
"Read",
"segment",
"metadata",
"section",
"and",
"update",
"object",
"information"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L359-L423 | train | 238,409 |
adamreeve/npTDMS | nptdms/tdms.py | _TdmsSegment.calculate_chunks | def calculate_chunks(self):
"""
Work out the number of chunks the data is in, for cases
where the meta data doesn't change at all so there is no
lead in.
Also increments the number of values for objects in this
segment, based on the number of chunks.
"""
if self.toc['kTocDAQmxRawData']:
# chunks defined differently for DAQmxRawData format
try:
data_size = next(
o.number_values * o.raw_data_width
for o in self.ordered_objects
if o.has_data and o.number_values * o.raw_data_width > 0)
except StopIteration:
data_size = 0
else:
data_size = sum([
o.data_size
for o in self.ordered_objects if o.has_data])
total_data_size = self.next_segment_offset - self.raw_data_offset
if data_size < 0 or total_data_size < 0:
raise ValueError("Negative data size")
elif data_size == 0:
# Sometimes kTocRawData is set, but there isn't actually any data
if total_data_size != data_size:
raise ValueError(
"Zero channel data size but data length based on "
"segment offset is %d." % total_data_size)
self.num_chunks = 0
return
chunk_remainder = total_data_size % data_size
if chunk_remainder == 0:
self.num_chunks = int(total_data_size // data_size)
# Update data count for the overall tdms object
# using the data count for this segment.
for obj in self.ordered_objects:
if obj.has_data:
obj.tdms_object.number_values += (
obj.number_values * self.num_chunks)
else:
log.warning(
"Data size %d is not a multiple of the "
"chunk size %d. Will attempt to read last chunk" %
(total_data_size, data_size))
self.num_chunks = 1 + int(total_data_size // data_size)
self.final_chunk_proportion = (
float(chunk_remainder) / float(data_size))
for obj in self.ordered_objects:
if obj.has_data:
obj.tdms_object.number_values += (
obj.number_values * (self.num_chunks - 1) + int(
obj.number_values * self.final_chunk_proportion)) | python | def calculate_chunks(self):
"""
Work out the number of chunks the data is in, for cases
where the meta data doesn't change at all so there is no
lead in.
Also increments the number of values for objects in this
segment, based on the number of chunks.
"""
if self.toc['kTocDAQmxRawData']:
# chunks defined differently for DAQmxRawData format
try:
data_size = next(
o.number_values * o.raw_data_width
for o in self.ordered_objects
if o.has_data and o.number_values * o.raw_data_width > 0)
except StopIteration:
data_size = 0
else:
data_size = sum([
o.data_size
for o in self.ordered_objects if o.has_data])
total_data_size = self.next_segment_offset - self.raw_data_offset
if data_size < 0 or total_data_size < 0:
raise ValueError("Negative data size")
elif data_size == 0:
# Sometimes kTocRawData is set, but there isn't actually any data
if total_data_size != data_size:
raise ValueError(
"Zero channel data size but data length based on "
"segment offset is %d." % total_data_size)
self.num_chunks = 0
return
chunk_remainder = total_data_size % data_size
if chunk_remainder == 0:
self.num_chunks = int(total_data_size // data_size)
# Update data count for the overall tdms object
# using the data count for this segment.
for obj in self.ordered_objects:
if obj.has_data:
obj.tdms_object.number_values += (
obj.number_values * self.num_chunks)
else:
log.warning(
"Data size %d is not a multiple of the "
"chunk size %d. Will attempt to read last chunk" %
(total_data_size, data_size))
self.num_chunks = 1 + int(total_data_size // data_size)
self.final_chunk_proportion = (
float(chunk_remainder) / float(data_size))
for obj in self.ordered_objects:
if obj.has_data:
obj.tdms_object.number_values += (
obj.number_values * (self.num_chunks - 1) + int(
obj.number_values * self.final_chunk_proportion)) | [
"def",
"calculate_chunks",
"(",
"self",
")",
":",
"if",
"self",
".",
"toc",
"[",
"'kTocDAQmxRawData'",
"]",
":",
"# chunks defined differently for DAQmxRawData format",
"try",
":",
"data_size",
"=",
"next",
"(",
"o",
".",
"number_values",
"*",
"o",
".",
"raw_data_width",
"for",
"o",
"in",
"self",
".",
"ordered_objects",
"if",
"o",
".",
"has_data",
"and",
"o",
".",
"number_values",
"*",
"o",
".",
"raw_data_width",
">",
"0",
")",
"except",
"StopIteration",
":",
"data_size",
"=",
"0",
"else",
":",
"data_size",
"=",
"sum",
"(",
"[",
"o",
".",
"data_size",
"for",
"o",
"in",
"self",
".",
"ordered_objects",
"if",
"o",
".",
"has_data",
"]",
")",
"total_data_size",
"=",
"self",
".",
"next_segment_offset",
"-",
"self",
".",
"raw_data_offset",
"if",
"data_size",
"<",
"0",
"or",
"total_data_size",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"Negative data size\"",
")",
"elif",
"data_size",
"==",
"0",
":",
"# Sometimes kTocRawData is set, but there isn't actually any data",
"if",
"total_data_size",
"!=",
"data_size",
":",
"raise",
"ValueError",
"(",
"\"Zero channel data size but data length based on \"",
"\"segment offset is %d.\"",
"%",
"total_data_size",
")",
"self",
".",
"num_chunks",
"=",
"0",
"return",
"chunk_remainder",
"=",
"total_data_size",
"%",
"data_size",
"if",
"chunk_remainder",
"==",
"0",
":",
"self",
".",
"num_chunks",
"=",
"int",
"(",
"total_data_size",
"//",
"data_size",
")",
"# Update data count for the overall tdms object",
"# using the data count for this segment.",
"for",
"obj",
"in",
"self",
".",
"ordered_objects",
":",
"if",
"obj",
".",
"has_data",
":",
"obj",
".",
"tdms_object",
".",
"number_values",
"+=",
"(",
"obj",
".",
"number_values",
"*",
"self",
".",
"num_chunks",
")",
"else",
":",
"log",
".",
"warning",
"(",
"\"Data size %d is not a multiple of the \"",
"\"chunk size %d. Will attempt to read last chunk\"",
"%",
"(",
"total_data_size",
",",
"data_size",
")",
")",
"self",
".",
"num_chunks",
"=",
"1",
"+",
"int",
"(",
"total_data_size",
"//",
"data_size",
")",
"self",
".",
"final_chunk_proportion",
"=",
"(",
"float",
"(",
"chunk_remainder",
")",
"/",
"float",
"(",
"data_size",
")",
")",
"for",
"obj",
"in",
"self",
".",
"ordered_objects",
":",
"if",
"obj",
".",
"has_data",
":",
"obj",
".",
"tdms_object",
".",
"number_values",
"+=",
"(",
"obj",
".",
"number_values",
"*",
"(",
"self",
".",
"num_chunks",
"-",
"1",
")",
"+",
"int",
"(",
"obj",
".",
"number_values",
"*",
"self",
".",
"final_chunk_proportion",
")",
")"
] | Work out the number of chunks the data is in, for cases
where the meta data doesn't change at all so there is no
lead in.
Also increments the number of values for objects in this
segment, based on the number of chunks. | [
"Work",
"out",
"the",
"number",
"of",
"chunks",
"the",
"data",
"is",
"in",
"for",
"cases",
"where",
"the",
"meta",
"data",
"doesn",
"t",
"change",
"at",
"all",
"so",
"there",
"is",
"no",
"lead",
"in",
"."
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L425-L485 | train | 238,410 |
adamreeve/npTDMS | nptdms/tdms.py | _TdmsSegment.read_raw_data | def read_raw_data(self, f):
"""Read signal data from file"""
if not self.toc["kTocRawData"]:
return
f.seek(self.data_position)
total_data_size = self.next_segment_offset - self.raw_data_offset
log.debug(
"Reading %d bytes of data at %d in %d chunks" %
(total_data_size, f.tell(), self.num_chunks))
for chunk in range(self.num_chunks):
if self.toc["kTocInterleavedData"]:
log.debug("Data is interleaved")
data_objects = [o for o in self.ordered_objects if o.has_data]
# If all data types have numpy types and all the lengths are
# the same, then we can read all data at once with numpy,
# which is much faster
all_numpy = all(
(o.data_type.nptype is not None for o in data_objects))
same_length = (len(
set((o.number_values for o in data_objects))) == 1)
if (all_numpy and same_length):
self._read_interleaved_numpy(f, data_objects)
else:
self._read_interleaved(f, data_objects)
else:
object_data = {}
log.debug("Data is contiguous")
for obj in self.ordered_objects:
if obj.has_data:
if (chunk == (self.num_chunks - 1) and
self.final_chunk_proportion != 1.0):
number_values = int(
obj.number_values *
self.final_chunk_proportion)
else:
number_values = obj.number_values
object_data[obj.path] = (
obj._read_values(f, number_values))
for obj in self.ordered_objects:
if obj.has_data:
obj.tdms_object._update_data(object_data[obj.path]) | python | def read_raw_data(self, f):
"""Read signal data from file"""
if not self.toc["kTocRawData"]:
return
f.seek(self.data_position)
total_data_size = self.next_segment_offset - self.raw_data_offset
log.debug(
"Reading %d bytes of data at %d in %d chunks" %
(total_data_size, f.tell(), self.num_chunks))
for chunk in range(self.num_chunks):
if self.toc["kTocInterleavedData"]:
log.debug("Data is interleaved")
data_objects = [o for o in self.ordered_objects if o.has_data]
# If all data types have numpy types and all the lengths are
# the same, then we can read all data at once with numpy,
# which is much faster
all_numpy = all(
(o.data_type.nptype is not None for o in data_objects))
same_length = (len(
set((o.number_values for o in data_objects))) == 1)
if (all_numpy and same_length):
self._read_interleaved_numpy(f, data_objects)
else:
self._read_interleaved(f, data_objects)
else:
object_data = {}
log.debug("Data is contiguous")
for obj in self.ordered_objects:
if obj.has_data:
if (chunk == (self.num_chunks - 1) and
self.final_chunk_proportion != 1.0):
number_values = int(
obj.number_values *
self.final_chunk_proportion)
else:
number_values = obj.number_values
object_data[obj.path] = (
obj._read_values(f, number_values))
for obj in self.ordered_objects:
if obj.has_data:
obj.tdms_object._update_data(object_data[obj.path]) | [
"def",
"read_raw_data",
"(",
"self",
",",
"f",
")",
":",
"if",
"not",
"self",
".",
"toc",
"[",
"\"kTocRawData\"",
"]",
":",
"return",
"f",
".",
"seek",
"(",
"self",
".",
"data_position",
")",
"total_data_size",
"=",
"self",
".",
"next_segment_offset",
"-",
"self",
".",
"raw_data_offset",
"log",
".",
"debug",
"(",
"\"Reading %d bytes of data at %d in %d chunks\"",
"%",
"(",
"total_data_size",
",",
"f",
".",
"tell",
"(",
")",
",",
"self",
".",
"num_chunks",
")",
")",
"for",
"chunk",
"in",
"range",
"(",
"self",
".",
"num_chunks",
")",
":",
"if",
"self",
".",
"toc",
"[",
"\"kTocInterleavedData\"",
"]",
":",
"log",
".",
"debug",
"(",
"\"Data is interleaved\"",
")",
"data_objects",
"=",
"[",
"o",
"for",
"o",
"in",
"self",
".",
"ordered_objects",
"if",
"o",
".",
"has_data",
"]",
"# If all data types have numpy types and all the lengths are",
"# the same, then we can read all data at once with numpy,",
"# which is much faster",
"all_numpy",
"=",
"all",
"(",
"(",
"o",
".",
"data_type",
".",
"nptype",
"is",
"not",
"None",
"for",
"o",
"in",
"data_objects",
")",
")",
"same_length",
"=",
"(",
"len",
"(",
"set",
"(",
"(",
"o",
".",
"number_values",
"for",
"o",
"in",
"data_objects",
")",
")",
")",
"==",
"1",
")",
"if",
"(",
"all_numpy",
"and",
"same_length",
")",
":",
"self",
".",
"_read_interleaved_numpy",
"(",
"f",
",",
"data_objects",
")",
"else",
":",
"self",
".",
"_read_interleaved",
"(",
"f",
",",
"data_objects",
")",
"else",
":",
"object_data",
"=",
"{",
"}",
"log",
".",
"debug",
"(",
"\"Data is contiguous\"",
")",
"for",
"obj",
"in",
"self",
".",
"ordered_objects",
":",
"if",
"obj",
".",
"has_data",
":",
"if",
"(",
"chunk",
"==",
"(",
"self",
".",
"num_chunks",
"-",
"1",
")",
"and",
"self",
".",
"final_chunk_proportion",
"!=",
"1.0",
")",
":",
"number_values",
"=",
"int",
"(",
"obj",
".",
"number_values",
"*",
"self",
".",
"final_chunk_proportion",
")",
"else",
":",
"number_values",
"=",
"obj",
".",
"number_values",
"object_data",
"[",
"obj",
".",
"path",
"]",
"=",
"(",
"obj",
".",
"_read_values",
"(",
"f",
",",
"number_values",
")",
")",
"for",
"obj",
"in",
"self",
".",
"ordered_objects",
":",
"if",
"obj",
".",
"has_data",
":",
"obj",
".",
"tdms_object",
".",
"_update_data",
"(",
"object_data",
"[",
"obj",
".",
"path",
"]",
")"
] | Read signal data from file | [
"Read",
"signal",
"data",
"from",
"file"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L487-L532 | train | 238,411 |
adamreeve/npTDMS | nptdms/tdms.py | _TdmsSegment._read_interleaved_numpy | def _read_interleaved_numpy(self, f, data_objects):
"""Read interleaved data where all channels have a numpy type"""
log.debug("Reading interleaved data all at once")
# Read all data into 1 byte unsigned ints first
all_channel_bytes = data_objects[0].raw_data_width
if all_channel_bytes == 0:
all_channel_bytes = sum((o.data_type.size for o in data_objects))
log.debug("all_channel_bytes: %d", all_channel_bytes)
number_bytes = int(all_channel_bytes * data_objects[0].number_values)
combined_data = fromfile(f, dtype=np.uint8, count=number_bytes)
# Reshape, so that one row is all bytes for all objects
combined_data = combined_data.reshape(-1, all_channel_bytes)
# Now set arrays for each channel
data_pos = 0
for (i, obj) in enumerate(data_objects):
byte_columns = tuple(
range(data_pos, obj.data_type.size + data_pos))
log.debug("Byte columns for channel %d: %s", i, byte_columns)
# Select columns for this channel, so that number of values will
# be number of bytes per point * number of data points.
# Then use ravel to flatten the results into a vector.
object_data = combined_data[:, byte_columns].ravel()
# Now set correct data type, so that the array length should
# be correct
object_data.dtype = (
np.dtype(obj.data_type.nptype).newbyteorder(self.endianness))
obj.tdms_object._update_data(object_data)
data_pos += obj.data_type.size | python | def _read_interleaved_numpy(self, f, data_objects):
"""Read interleaved data where all channels have a numpy type"""
log.debug("Reading interleaved data all at once")
# Read all data into 1 byte unsigned ints first
all_channel_bytes = data_objects[0].raw_data_width
if all_channel_bytes == 0:
all_channel_bytes = sum((o.data_type.size for o in data_objects))
log.debug("all_channel_bytes: %d", all_channel_bytes)
number_bytes = int(all_channel_bytes * data_objects[0].number_values)
combined_data = fromfile(f, dtype=np.uint8, count=number_bytes)
# Reshape, so that one row is all bytes for all objects
combined_data = combined_data.reshape(-1, all_channel_bytes)
# Now set arrays for each channel
data_pos = 0
for (i, obj) in enumerate(data_objects):
byte_columns = tuple(
range(data_pos, obj.data_type.size + data_pos))
log.debug("Byte columns for channel %d: %s", i, byte_columns)
# Select columns for this channel, so that number of values will
# be number of bytes per point * number of data points.
# Then use ravel to flatten the results into a vector.
object_data = combined_data[:, byte_columns].ravel()
# Now set correct data type, so that the array length should
# be correct
object_data.dtype = (
np.dtype(obj.data_type.nptype).newbyteorder(self.endianness))
obj.tdms_object._update_data(object_data)
data_pos += obj.data_type.size | [
"def",
"_read_interleaved_numpy",
"(",
"self",
",",
"f",
",",
"data_objects",
")",
":",
"log",
".",
"debug",
"(",
"\"Reading interleaved data all at once\"",
")",
"# Read all data into 1 byte unsigned ints first",
"all_channel_bytes",
"=",
"data_objects",
"[",
"0",
"]",
".",
"raw_data_width",
"if",
"all_channel_bytes",
"==",
"0",
":",
"all_channel_bytes",
"=",
"sum",
"(",
"(",
"o",
".",
"data_type",
".",
"size",
"for",
"o",
"in",
"data_objects",
")",
")",
"log",
".",
"debug",
"(",
"\"all_channel_bytes: %d\"",
",",
"all_channel_bytes",
")",
"number_bytes",
"=",
"int",
"(",
"all_channel_bytes",
"*",
"data_objects",
"[",
"0",
"]",
".",
"number_values",
")",
"combined_data",
"=",
"fromfile",
"(",
"f",
",",
"dtype",
"=",
"np",
".",
"uint8",
",",
"count",
"=",
"number_bytes",
")",
"# Reshape, so that one row is all bytes for all objects",
"combined_data",
"=",
"combined_data",
".",
"reshape",
"(",
"-",
"1",
",",
"all_channel_bytes",
")",
"# Now set arrays for each channel",
"data_pos",
"=",
"0",
"for",
"(",
"i",
",",
"obj",
")",
"in",
"enumerate",
"(",
"data_objects",
")",
":",
"byte_columns",
"=",
"tuple",
"(",
"range",
"(",
"data_pos",
",",
"obj",
".",
"data_type",
".",
"size",
"+",
"data_pos",
")",
")",
"log",
".",
"debug",
"(",
"\"Byte columns for channel %d: %s\"",
",",
"i",
",",
"byte_columns",
")",
"# Select columns for this channel, so that number of values will",
"# be number of bytes per point * number of data points.",
"# Then use ravel to flatten the results into a vector.",
"object_data",
"=",
"combined_data",
"[",
":",
",",
"byte_columns",
"]",
".",
"ravel",
"(",
")",
"# Now set correct data type, so that the array length should",
"# be correct",
"object_data",
".",
"dtype",
"=",
"(",
"np",
".",
"dtype",
"(",
"obj",
".",
"data_type",
".",
"nptype",
")",
".",
"newbyteorder",
"(",
"self",
".",
"endianness",
")",
")",
"obj",
".",
"tdms_object",
".",
"_update_data",
"(",
"object_data",
")",
"data_pos",
"+=",
"obj",
".",
"data_type",
".",
"size"
] | Read interleaved data where all channels have a numpy type | [
"Read",
"interleaved",
"data",
"where",
"all",
"channels",
"have",
"a",
"numpy",
"type"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L534-L562 | train | 238,412 |
adamreeve/npTDMS | nptdms/tdms.py | _TdmsSegment._read_interleaved | def _read_interleaved(self, f, data_objects):
"""Read interleaved data that doesn't have a numpy type"""
log.debug("Reading interleaved data point by point")
object_data = {}
points_added = {}
for obj in data_objects:
object_data[obj.path] = obj._new_segment_data()
points_added[obj.path] = 0
while any([points_added[o.path] < o.number_values
for o in data_objects]):
for obj in data_objects:
if points_added[obj.path] < obj.number_values:
object_data[obj.path][points_added[obj.path]] = (
obj._read_value(f))
points_added[obj.path] += 1
for obj in data_objects:
obj.tdms_object._update_data(object_data[obj.path]) | python | def _read_interleaved(self, f, data_objects):
"""Read interleaved data that doesn't have a numpy type"""
log.debug("Reading interleaved data point by point")
object_data = {}
points_added = {}
for obj in data_objects:
object_data[obj.path] = obj._new_segment_data()
points_added[obj.path] = 0
while any([points_added[o.path] < o.number_values
for o in data_objects]):
for obj in data_objects:
if points_added[obj.path] < obj.number_values:
object_data[obj.path][points_added[obj.path]] = (
obj._read_value(f))
points_added[obj.path] += 1
for obj in data_objects:
obj.tdms_object._update_data(object_data[obj.path]) | [
"def",
"_read_interleaved",
"(",
"self",
",",
"f",
",",
"data_objects",
")",
":",
"log",
".",
"debug",
"(",
"\"Reading interleaved data point by point\"",
")",
"object_data",
"=",
"{",
"}",
"points_added",
"=",
"{",
"}",
"for",
"obj",
"in",
"data_objects",
":",
"object_data",
"[",
"obj",
".",
"path",
"]",
"=",
"obj",
".",
"_new_segment_data",
"(",
")",
"points_added",
"[",
"obj",
".",
"path",
"]",
"=",
"0",
"while",
"any",
"(",
"[",
"points_added",
"[",
"o",
".",
"path",
"]",
"<",
"o",
".",
"number_values",
"for",
"o",
"in",
"data_objects",
"]",
")",
":",
"for",
"obj",
"in",
"data_objects",
":",
"if",
"points_added",
"[",
"obj",
".",
"path",
"]",
"<",
"obj",
".",
"number_values",
":",
"object_data",
"[",
"obj",
".",
"path",
"]",
"[",
"points_added",
"[",
"obj",
".",
"path",
"]",
"]",
"=",
"(",
"obj",
".",
"_read_value",
"(",
"f",
")",
")",
"points_added",
"[",
"obj",
".",
"path",
"]",
"+=",
"1",
"for",
"obj",
"in",
"data_objects",
":",
"obj",
".",
"tdms_object",
".",
"_update_data",
"(",
"object_data",
"[",
"obj",
".",
"path",
"]",
")"
] | Read interleaved data that doesn't have a numpy type | [
"Read",
"interleaved",
"data",
"that",
"doesn",
"t",
"have",
"a",
"numpy",
"type"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L564-L581 | train | 238,413 |
adamreeve/npTDMS | nptdms/tdms.py | TdmsObject.time_track | def time_track(self, absolute_time=False, accuracy='ns'):
"""Return an array of time or the independent variable for this channel
This depends on the object having the wf_increment
and wf_start_offset properties defined.
Note that wf_start_offset is usually zero for time-series data.
If you have time-series data channels with different start times,
you should use the absolute time or calculate the time offsets using
the wf_start_time property.
For larger timespans, the accuracy setting should be set lower.
The default setting is 'ns', which has a timespan of
[1678 AD, 2262 AD]. For the exact ranges, refer to
http://docs.scipy.org/doc/numpy/reference/arrays.datetime.html
section "Datetime Units".
:param absolute_time: Whether the returned time values are absolute
times rather than relative to the start time. If true, the
wf_start_time property must be set.
:param accuracy: The accuracy of the returned datetime64 array.
:rtype: NumPy array.
:raises: KeyError if required properties aren't found
"""
try:
increment = self.property('wf_increment')
offset = self.property('wf_start_offset')
except KeyError:
raise KeyError("Object does not have time properties available.")
periods = len(self._data)
relative_time = np.linspace(
offset,
offset + (periods - 1) * increment,
periods)
if not absolute_time:
return relative_time
try:
start_time = self.property('wf_start_time')
except KeyError:
raise KeyError(
"Object does not have start time property available.")
try:
unit_correction = {
's': 1e0,
'ms': 1e3,
'us': 1e6,
'ns': 1e9,
}[accuracy]
except KeyError:
raise KeyError("Invalid accuracy: {0}".format(accuracy))
# Because numpy only knows ints as its date datatype,
# convert to accuracy.
time_type = "timedelta64[{0}]".format(accuracy)
return (np.datetime64(start_time) +
(relative_time * unit_correction).astype(time_type)) | python | def time_track(self, absolute_time=False, accuracy='ns'):
"""Return an array of time or the independent variable for this channel
This depends on the object having the wf_increment
and wf_start_offset properties defined.
Note that wf_start_offset is usually zero for time-series data.
If you have time-series data channels with different start times,
you should use the absolute time or calculate the time offsets using
the wf_start_time property.
For larger timespans, the accuracy setting should be set lower.
The default setting is 'ns', which has a timespan of
[1678 AD, 2262 AD]. For the exact ranges, refer to
http://docs.scipy.org/doc/numpy/reference/arrays.datetime.html
section "Datetime Units".
:param absolute_time: Whether the returned time values are absolute
times rather than relative to the start time. If true, the
wf_start_time property must be set.
:param accuracy: The accuracy of the returned datetime64 array.
:rtype: NumPy array.
:raises: KeyError if required properties aren't found
"""
try:
increment = self.property('wf_increment')
offset = self.property('wf_start_offset')
except KeyError:
raise KeyError("Object does not have time properties available.")
periods = len(self._data)
relative_time = np.linspace(
offset,
offset + (periods - 1) * increment,
periods)
if not absolute_time:
return relative_time
try:
start_time = self.property('wf_start_time')
except KeyError:
raise KeyError(
"Object does not have start time property available.")
try:
unit_correction = {
's': 1e0,
'ms': 1e3,
'us': 1e6,
'ns': 1e9,
}[accuracy]
except KeyError:
raise KeyError("Invalid accuracy: {0}".format(accuracy))
# Because numpy only knows ints as its date datatype,
# convert to accuracy.
time_type = "timedelta64[{0}]".format(accuracy)
return (np.datetime64(start_time) +
(relative_time * unit_correction).astype(time_type)) | [
"def",
"time_track",
"(",
"self",
",",
"absolute_time",
"=",
"False",
",",
"accuracy",
"=",
"'ns'",
")",
":",
"try",
":",
"increment",
"=",
"self",
".",
"property",
"(",
"'wf_increment'",
")",
"offset",
"=",
"self",
".",
"property",
"(",
"'wf_start_offset'",
")",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Object does not have time properties available.\"",
")",
"periods",
"=",
"len",
"(",
"self",
".",
"_data",
")",
"relative_time",
"=",
"np",
".",
"linspace",
"(",
"offset",
",",
"offset",
"+",
"(",
"periods",
"-",
"1",
")",
"*",
"increment",
",",
"periods",
")",
"if",
"not",
"absolute_time",
":",
"return",
"relative_time",
"try",
":",
"start_time",
"=",
"self",
".",
"property",
"(",
"'wf_start_time'",
")",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Object does not have start time property available.\"",
")",
"try",
":",
"unit_correction",
"=",
"{",
"'s'",
":",
"1e0",
",",
"'ms'",
":",
"1e3",
",",
"'us'",
":",
"1e6",
",",
"'ns'",
":",
"1e9",
",",
"}",
"[",
"accuracy",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Invalid accuracy: {0}\"",
".",
"format",
"(",
"accuracy",
")",
")",
"# Because numpy only knows ints as its date datatype,",
"# convert to accuracy.",
"time_type",
"=",
"\"timedelta64[{0}]\"",
".",
"format",
"(",
"accuracy",
")",
"return",
"(",
"np",
".",
"datetime64",
"(",
"start_time",
")",
"+",
"(",
"relative_time",
"*",
"unit_correction",
")",
".",
"astype",
"(",
"time_type",
")",
")"
] | Return an array of time or the independent variable for this channel
This depends on the object having the wf_increment
and wf_start_offset properties defined.
Note that wf_start_offset is usually zero for time-series data.
If you have time-series data channels with different start times,
you should use the absolute time or calculate the time offsets using
the wf_start_time property.
For larger timespans, the accuracy setting should be set lower.
The default setting is 'ns', which has a timespan of
[1678 AD, 2262 AD]. For the exact ranges, refer to
http://docs.scipy.org/doc/numpy/reference/arrays.datetime.html
section "Datetime Units".
:param absolute_time: Whether the returned time values are absolute
times rather than relative to the start time. If true, the
wf_start_time property must be set.
:param accuracy: The accuracy of the returned datetime64 array.
:rtype: NumPy array.
:raises: KeyError if required properties aren't found | [
"Return",
"an",
"array",
"of",
"time",
"or",
"the",
"independent",
"variable",
"for",
"this",
"channel"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L645-L706 | train | 238,414 |
adamreeve/npTDMS | nptdms/tdms.py | TdmsObject._initialise_data | def _initialise_data(self, memmap_dir=None):
"""Initialise data array to zeros"""
if self.number_values == 0:
pass
elif self.data_type.nptype is None:
self._data = []
else:
if memmap_dir:
memmap_file = tempfile.NamedTemporaryFile(
mode='w+b', prefix="nptdms_", dir=memmap_dir)
self._data = np.memmap(
memmap_file.file,
mode='w+',
shape=(self.number_values,),
dtype=self.data_type.nptype)
else:
self._data = np.zeros(
self.number_values, dtype=self.data_type.nptype)
self._data_insert_position = 0
if self._data is not None:
log.debug("Allocated %d sample slots for %s", len(self._data),
self.path)
else:
log.debug("Allocated no space for %s", self.path) | python | def _initialise_data(self, memmap_dir=None):
"""Initialise data array to zeros"""
if self.number_values == 0:
pass
elif self.data_type.nptype is None:
self._data = []
else:
if memmap_dir:
memmap_file = tempfile.NamedTemporaryFile(
mode='w+b', prefix="nptdms_", dir=memmap_dir)
self._data = np.memmap(
memmap_file.file,
mode='w+',
shape=(self.number_values,),
dtype=self.data_type.nptype)
else:
self._data = np.zeros(
self.number_values, dtype=self.data_type.nptype)
self._data_insert_position = 0
if self._data is not None:
log.debug("Allocated %d sample slots for %s", len(self._data),
self.path)
else:
log.debug("Allocated no space for %s", self.path) | [
"def",
"_initialise_data",
"(",
"self",
",",
"memmap_dir",
"=",
"None",
")",
":",
"if",
"self",
".",
"number_values",
"==",
"0",
":",
"pass",
"elif",
"self",
".",
"data_type",
".",
"nptype",
"is",
"None",
":",
"self",
".",
"_data",
"=",
"[",
"]",
"else",
":",
"if",
"memmap_dir",
":",
"memmap_file",
"=",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"mode",
"=",
"'w+b'",
",",
"prefix",
"=",
"\"nptdms_\"",
",",
"dir",
"=",
"memmap_dir",
")",
"self",
".",
"_data",
"=",
"np",
".",
"memmap",
"(",
"memmap_file",
".",
"file",
",",
"mode",
"=",
"'w+'",
",",
"shape",
"=",
"(",
"self",
".",
"number_values",
",",
")",
",",
"dtype",
"=",
"self",
".",
"data_type",
".",
"nptype",
")",
"else",
":",
"self",
".",
"_data",
"=",
"np",
".",
"zeros",
"(",
"self",
".",
"number_values",
",",
"dtype",
"=",
"self",
".",
"data_type",
".",
"nptype",
")",
"self",
".",
"_data_insert_position",
"=",
"0",
"if",
"self",
".",
"_data",
"is",
"not",
"None",
":",
"log",
".",
"debug",
"(",
"\"Allocated %d sample slots for %s\"",
",",
"len",
"(",
"self",
".",
"_data",
")",
",",
"self",
".",
"path",
")",
"else",
":",
"log",
".",
"debug",
"(",
"\"Allocated no space for %s\"",
",",
"self",
".",
"path",
")"
] | Initialise data array to zeros | [
"Initialise",
"data",
"array",
"to",
"zeros"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L708-L732 | train | 238,415 |
adamreeve/npTDMS | nptdms/tdms.py | TdmsObject._update_data | def _update_data(self, new_data):
"""Update the object data with a new array of data"""
log.debug("Adding %d data points to data for %s" %
(len(new_data), self.path))
if self._data is None:
self._data = new_data
else:
if self.data_type.nptype is not None:
data_pos = (
self._data_insert_position,
self._data_insert_position + len(new_data))
self._data_insert_position += len(new_data)
self._data[data_pos[0]:data_pos[1]] = new_data
else:
self._data.extend(new_data) | python | def _update_data(self, new_data):
"""Update the object data with a new array of data"""
log.debug("Adding %d data points to data for %s" %
(len(new_data), self.path))
if self._data is None:
self._data = new_data
else:
if self.data_type.nptype is not None:
data_pos = (
self._data_insert_position,
self._data_insert_position + len(new_data))
self._data_insert_position += len(new_data)
self._data[data_pos[0]:data_pos[1]] = new_data
else:
self._data.extend(new_data) | [
"def",
"_update_data",
"(",
"self",
",",
"new_data",
")",
":",
"log",
".",
"debug",
"(",
"\"Adding %d data points to data for %s\"",
"%",
"(",
"len",
"(",
"new_data",
")",
",",
"self",
".",
"path",
")",
")",
"if",
"self",
".",
"_data",
"is",
"None",
":",
"self",
".",
"_data",
"=",
"new_data",
"else",
":",
"if",
"self",
".",
"data_type",
".",
"nptype",
"is",
"not",
"None",
":",
"data_pos",
"=",
"(",
"self",
".",
"_data_insert_position",
",",
"self",
".",
"_data_insert_position",
"+",
"len",
"(",
"new_data",
")",
")",
"self",
".",
"_data_insert_position",
"+=",
"len",
"(",
"new_data",
")",
"self",
".",
"_data",
"[",
"data_pos",
"[",
"0",
"]",
":",
"data_pos",
"[",
"1",
"]",
"]",
"=",
"new_data",
"else",
":",
"self",
".",
"_data",
".",
"extend",
"(",
"new_data",
")"
] | Update the object data with a new array of data | [
"Update",
"the",
"object",
"data",
"with",
"a",
"new",
"array",
"of",
"data"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L734-L749 | train | 238,416 |
adamreeve/npTDMS | nptdms/tdms.py | TdmsObject.as_dataframe | def as_dataframe(self, absolute_time=False):
"""
Converts the TDMS object to a DataFrame
:param absolute_time: Whether times should be absolute rather than
relative to the start time.
:return: The TDMS object data.
:rtype: pandas.DataFrame
"""
import pandas as pd
# When absolute_time is True,
# use the wf_start_time as offset for the time_track()
try:
time = self.time_track(absolute_time)
except KeyError:
time = None
if self.channel is None:
return pd.DataFrame.from_items(
[(ch.channel, pd.Series(ch.data))
for ch in self.tdms_file.group_channels(self.group)])
else:
return pd.DataFrame(self._data, index=time, columns=[self.path]) | python | def as_dataframe(self, absolute_time=False):
"""
Converts the TDMS object to a DataFrame
:param absolute_time: Whether times should be absolute rather than
relative to the start time.
:return: The TDMS object data.
:rtype: pandas.DataFrame
"""
import pandas as pd
# When absolute_time is True,
# use the wf_start_time as offset for the time_track()
try:
time = self.time_track(absolute_time)
except KeyError:
time = None
if self.channel is None:
return pd.DataFrame.from_items(
[(ch.channel, pd.Series(ch.data))
for ch in self.tdms_file.group_channels(self.group)])
else:
return pd.DataFrame(self._data, index=time, columns=[self.path]) | [
"def",
"as_dataframe",
"(",
"self",
",",
"absolute_time",
"=",
"False",
")",
":",
"import",
"pandas",
"as",
"pd",
"# When absolute_time is True,",
"# use the wf_start_time as offset for the time_track()",
"try",
":",
"time",
"=",
"self",
".",
"time_track",
"(",
"absolute_time",
")",
"except",
"KeyError",
":",
"time",
"=",
"None",
"if",
"self",
".",
"channel",
"is",
"None",
":",
"return",
"pd",
".",
"DataFrame",
".",
"from_items",
"(",
"[",
"(",
"ch",
".",
"channel",
",",
"pd",
".",
"Series",
"(",
"ch",
".",
"data",
")",
")",
"for",
"ch",
"in",
"self",
".",
"tdms_file",
".",
"group_channels",
"(",
"self",
".",
"group",
")",
"]",
")",
"else",
":",
"return",
"pd",
".",
"DataFrame",
"(",
"self",
".",
"_data",
",",
"index",
"=",
"time",
",",
"columns",
"=",
"[",
"self",
".",
"path",
"]",
")"
] | Converts the TDMS object to a DataFrame
:param absolute_time: Whether times should be absolute rather than
relative to the start time.
:return: The TDMS object data.
:rtype: pandas.DataFrame | [
"Converts",
"the",
"TDMS",
"object",
"to",
"a",
"DataFrame"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L751-L774 | train | 238,417 |
adamreeve/npTDMS | nptdms/tdms.py | TdmsObject.data | def data(self):
"""
NumPy array containing data if there is data for this object,
otherwise None.
"""
if self._data is None:
# self._data is None if data segment is empty
return np.empty((0, 1))
if self._data_scaled is None:
scale = scaling.get_scaling(self)
if scale is None:
self._data_scaled = self._data
else:
self._data_scaled = scale.scale(self._data)
return self._data_scaled | python | def data(self):
"""
NumPy array containing data if there is data for this object,
otherwise None.
"""
if self._data is None:
# self._data is None if data segment is empty
return np.empty((0, 1))
if self._data_scaled is None:
scale = scaling.get_scaling(self)
if scale is None:
self._data_scaled = self._data
else:
self._data_scaled = scale.scale(self._data)
return self._data_scaled | [
"def",
"data",
"(",
"self",
")",
":",
"if",
"self",
".",
"_data",
"is",
"None",
":",
"# self._data is None if data segment is empty",
"return",
"np",
".",
"empty",
"(",
"(",
"0",
",",
"1",
")",
")",
"if",
"self",
".",
"_data_scaled",
"is",
"None",
":",
"scale",
"=",
"scaling",
".",
"get_scaling",
"(",
"self",
")",
"if",
"scale",
"is",
"None",
":",
"self",
".",
"_data_scaled",
"=",
"self",
".",
"_data",
"else",
":",
"self",
".",
"_data_scaled",
"=",
"scale",
".",
"scale",
"(",
"self",
".",
"_data",
")",
"return",
"self",
".",
"_data_scaled"
] | NumPy array containing data if there is data for this object,
otherwise None. | [
"NumPy",
"array",
"containing",
"data",
"if",
"there",
"is",
"data",
"for",
"this",
"object",
"otherwise",
"None",
"."
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L777-L792 | train | 238,418 |
adamreeve/npTDMS | nptdms/tdms.py | _TdmsmxDAQMetadata._read_metadata | def _read_metadata(self, f, endianness):
"""
Read the metadata for a DAQmx raw segment. This is the raw
DAQmx-specific portion of the raw data index.
"""
self.data_type = types.tds_data_types[0xFFFFFFFF]
self.dimension = types.Uint32.read(f, endianness)
# In TDMS format version 2.0, 1 is the only valid value for dimension
if self.dimension != 1:
log.warning("Data dimension is not 1")
self.chunk_size = types.Uint64.read(f, endianness)
# size of vector of format changing scalers
self.scaler_vector_length = types.Uint32.read(f, endianness)
# Size of the vector
log.debug("mxDAQ format scaler vector size '%d'" %
(self.scaler_vector_length,))
if self.scaler_vector_length > 1:
log.error("mxDAQ multiple format changing scalers not implemented")
for idx in range(self.scaler_vector_length):
# WARNING: This code overwrites previous values with new
# values. At this time NI provides no documentation on
# how to use these scalers and sample TDMS files do not
# include more than one of these scalers.
self.scaler_data_type_code = types.Uint32.read(f, endianness)
self.scaler_data_type = (
types.tds_data_types[self.scaler_data_type_code])
# more info for format changing scaler
self.scaler_raw_buffer_index = types.Uint32.read(f, endianness)
self.scaler_raw_byte_offset = types.Uint32.read(f, endianness)
self.scaler_sample_format_bitmap = types.Uint32.read(f, endianness)
self.scale_id = types.Uint32.read(f, endianness)
raw_data_widths_length = types.Uint32.read(f, endianness)
self.raw_data_widths = np.zeros(raw_data_widths_length, dtype=np.int32)
for cnt in range(raw_data_widths_length):
self.raw_data_widths[cnt] = types.Uint32.read(f, endianness) | python | def _read_metadata(self, f, endianness):
"""
Read the metadata for a DAQmx raw segment. This is the raw
DAQmx-specific portion of the raw data index.
"""
self.data_type = types.tds_data_types[0xFFFFFFFF]
self.dimension = types.Uint32.read(f, endianness)
# In TDMS format version 2.0, 1 is the only valid value for dimension
if self.dimension != 1:
log.warning("Data dimension is not 1")
self.chunk_size = types.Uint64.read(f, endianness)
# size of vector of format changing scalers
self.scaler_vector_length = types.Uint32.read(f, endianness)
# Size of the vector
log.debug("mxDAQ format scaler vector size '%d'" %
(self.scaler_vector_length,))
if self.scaler_vector_length > 1:
log.error("mxDAQ multiple format changing scalers not implemented")
for idx in range(self.scaler_vector_length):
# WARNING: This code overwrites previous values with new
# values. At this time NI provides no documentation on
# how to use these scalers and sample TDMS files do not
# include more than one of these scalers.
self.scaler_data_type_code = types.Uint32.read(f, endianness)
self.scaler_data_type = (
types.tds_data_types[self.scaler_data_type_code])
# more info for format changing scaler
self.scaler_raw_buffer_index = types.Uint32.read(f, endianness)
self.scaler_raw_byte_offset = types.Uint32.read(f, endianness)
self.scaler_sample_format_bitmap = types.Uint32.read(f, endianness)
self.scale_id = types.Uint32.read(f, endianness)
raw_data_widths_length = types.Uint32.read(f, endianness)
self.raw_data_widths = np.zeros(raw_data_widths_length, dtype=np.int32)
for cnt in range(raw_data_widths_length):
self.raw_data_widths[cnt] = types.Uint32.read(f, endianness) | [
"def",
"_read_metadata",
"(",
"self",
",",
"f",
",",
"endianness",
")",
":",
"self",
".",
"data_type",
"=",
"types",
".",
"tds_data_types",
"[",
"0xFFFFFFFF",
"]",
"self",
".",
"dimension",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"# In TDMS format version 2.0, 1 is the only valid value for dimension",
"if",
"self",
".",
"dimension",
"!=",
"1",
":",
"log",
".",
"warning",
"(",
"\"Data dimension is not 1\"",
")",
"self",
".",
"chunk_size",
"=",
"types",
".",
"Uint64",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"# size of vector of format changing scalers",
"self",
".",
"scaler_vector_length",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"# Size of the vector",
"log",
".",
"debug",
"(",
"\"mxDAQ format scaler vector size '%d'\"",
"%",
"(",
"self",
".",
"scaler_vector_length",
",",
")",
")",
"if",
"self",
".",
"scaler_vector_length",
">",
"1",
":",
"log",
".",
"error",
"(",
"\"mxDAQ multiple format changing scalers not implemented\"",
")",
"for",
"idx",
"in",
"range",
"(",
"self",
".",
"scaler_vector_length",
")",
":",
"# WARNING: This code overwrites previous values with new",
"# values. At this time NI provides no documentation on",
"# how to use these scalers and sample TDMS files do not",
"# include more than one of these scalers.",
"self",
".",
"scaler_data_type_code",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"self",
".",
"scaler_data_type",
"=",
"(",
"types",
".",
"tds_data_types",
"[",
"self",
".",
"scaler_data_type_code",
"]",
")",
"# more info for format changing scaler",
"self",
".",
"scaler_raw_buffer_index",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"self",
".",
"scaler_raw_byte_offset",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"self",
".",
"scaler_sample_format_bitmap",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"self",
".",
"scale_id",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"raw_data_widths_length",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"endianness",
")",
"self",
".",
"raw_data_widths",
"=",
"np",
".",
"zeros",
"(",
"raw_data_widths_length",
",",
"dtype",
"=",
"np",
".",
"int32",
")",
"for",
"cnt",
"in",
"range",
"(",
"raw_data_widths_length",
")",
":",
"self",
".",
"raw_data_widths",
"[",
"cnt",
"]",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"endianness",
")"
] | Read the metadata for a DAQmx raw segment. This is the raw
DAQmx-specific portion of the raw data index. | [
"Read",
"the",
"metadata",
"for",
"a",
"DAQmx",
"raw",
"segment",
".",
"This",
"is",
"the",
"raw",
"DAQmx",
"-",
"specific",
"portion",
"of",
"the",
"raw",
"data",
"index",
"."
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L832-L869 | train | 238,419 |
adamreeve/npTDMS | nptdms/tdms.py | _TdmsSegmentObject._read_metadata | def _read_metadata(self, f):
"""Read object metadata and update object information"""
raw_data_index = types.Uint32.read(f, self.endianness)
log.debug("Reading metadata for object %s", self.tdms_object.path)
# Object has no data in this segment
if raw_data_index == 0xFFFFFFFF:
log.debug("Object has no data in this segment")
self.has_data = False
# Leave number_values and data_size as set previously,
# as these may be re-used by later segments.
# Data has same structure as previously
elif raw_data_index == 0x00000000:
log.debug(
"Object has same data structure as in the previous segment")
self.has_data = True
elif raw_data_index == 0x00001269 or raw_data_index == 0x00001369:
# This is a DAQmx raw data segment.
# 0x00001269 for segment containing Format Changing scaler.
# 0x00001369 for segment containing Digital Line scaler.
if raw_data_index == 0x00001369:
# special scaling for DAQ's digital input lines?
log.warning("DAQmx with Digital Line scaler has not tested")
# DAQmx raw data format metadata has its own class
self.has_data = True
self.tdms_object.has_data = True
info = self._read_metadata_mx(f)
self.dimension = info.dimension
self.data_type = info.data_type
# DAQmx format has special chunking
self.data_size = info.chunk_size
self.number_values = info.chunk_size
# segment reading code relies on a single consistent raw
# data width so assert that there is only one.
assert(len(info.raw_data_widths) == 1)
self.raw_data_width = info.raw_data_widths[0]
# fall through and read properties
else:
# Assume metadata format is legacy TDMS format.
# raw_data_index gives the length of the index information.
self.has_data = True
self.tdms_object.has_data = True
# Read the data type
try:
self.data_type = types.tds_data_types[
types.Uint32.read(f, self.endianness)]
except KeyError:
raise KeyError("Unrecognised data type")
if (self.tdms_object.data_type is not None and
self.data_type != self.tdms_object.data_type):
raise ValueError(
"Segment object doesn't have the same data "
"type as previous segments.")
else:
self.tdms_object.data_type = self.data_type
log.debug("Object data type: %r", self.tdms_object.data_type)
if (self.tdms_object.data_type.size is None and
self.tdms_object.data_type != types.String):
raise ValueError(
"Unsupported data type: %r" % self.tdms_object.data_type)
# Read data dimension
self.dimension = types.Uint32.read(f, self.endianness)
# In TDMS version 2.0, 1 is the only valid value for dimension
if self.dimension != 1:
log.warning("Data dimension is not 1")
# Read number of values
self.number_values = types.Uint64.read(f, self.endianness)
# Variable length data types have total size
if self.data_type in (types.String, ):
self.data_size = types.Uint64.read(f, self.endianness)
else:
self.data_size = (
self.number_values *
self.data_type.size * self.dimension)
log.debug(
"Object number of values in segment: %d", self.number_values)
# Read data properties
num_properties = types.Uint32.read(f, self.endianness)
log.debug("Reading %d properties", num_properties)
for i in range(num_properties):
prop_name, value = read_property(f, self.endianness)
self.tdms_object.properties[prop_name] = value | python | def _read_metadata(self, f):
"""Read object metadata and update object information"""
raw_data_index = types.Uint32.read(f, self.endianness)
log.debug("Reading metadata for object %s", self.tdms_object.path)
# Object has no data in this segment
if raw_data_index == 0xFFFFFFFF:
log.debug("Object has no data in this segment")
self.has_data = False
# Leave number_values and data_size as set previously,
# as these may be re-used by later segments.
# Data has same structure as previously
elif raw_data_index == 0x00000000:
log.debug(
"Object has same data structure as in the previous segment")
self.has_data = True
elif raw_data_index == 0x00001269 or raw_data_index == 0x00001369:
# This is a DAQmx raw data segment.
# 0x00001269 for segment containing Format Changing scaler.
# 0x00001369 for segment containing Digital Line scaler.
if raw_data_index == 0x00001369:
# special scaling for DAQ's digital input lines?
log.warning("DAQmx with Digital Line scaler has not tested")
# DAQmx raw data format metadata has its own class
self.has_data = True
self.tdms_object.has_data = True
info = self._read_metadata_mx(f)
self.dimension = info.dimension
self.data_type = info.data_type
# DAQmx format has special chunking
self.data_size = info.chunk_size
self.number_values = info.chunk_size
# segment reading code relies on a single consistent raw
# data width so assert that there is only one.
assert(len(info.raw_data_widths) == 1)
self.raw_data_width = info.raw_data_widths[0]
# fall through and read properties
else:
# Assume metadata format is legacy TDMS format.
# raw_data_index gives the length of the index information.
self.has_data = True
self.tdms_object.has_data = True
# Read the data type
try:
self.data_type = types.tds_data_types[
types.Uint32.read(f, self.endianness)]
except KeyError:
raise KeyError("Unrecognised data type")
if (self.tdms_object.data_type is not None and
self.data_type != self.tdms_object.data_type):
raise ValueError(
"Segment object doesn't have the same data "
"type as previous segments.")
else:
self.tdms_object.data_type = self.data_type
log.debug("Object data type: %r", self.tdms_object.data_type)
if (self.tdms_object.data_type.size is None and
self.tdms_object.data_type != types.String):
raise ValueError(
"Unsupported data type: %r" % self.tdms_object.data_type)
# Read data dimension
self.dimension = types.Uint32.read(f, self.endianness)
# In TDMS version 2.0, 1 is the only valid value for dimension
if self.dimension != 1:
log.warning("Data dimension is not 1")
# Read number of values
self.number_values = types.Uint64.read(f, self.endianness)
# Variable length data types have total size
if self.data_type in (types.String, ):
self.data_size = types.Uint64.read(f, self.endianness)
else:
self.data_size = (
self.number_values *
self.data_type.size * self.dimension)
log.debug(
"Object number of values in segment: %d", self.number_values)
# Read data properties
num_properties = types.Uint32.read(f, self.endianness)
log.debug("Reading %d properties", num_properties)
for i in range(num_properties):
prop_name, value = read_property(f, self.endianness)
self.tdms_object.properties[prop_name] = value | [
"def",
"_read_metadata",
"(",
"self",
",",
"f",
")",
":",
"raw_data_index",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"self",
".",
"endianness",
")",
"log",
".",
"debug",
"(",
"\"Reading metadata for object %s\"",
",",
"self",
".",
"tdms_object",
".",
"path",
")",
"# Object has no data in this segment",
"if",
"raw_data_index",
"==",
"0xFFFFFFFF",
":",
"log",
".",
"debug",
"(",
"\"Object has no data in this segment\"",
")",
"self",
".",
"has_data",
"=",
"False",
"# Leave number_values and data_size as set previously,",
"# as these may be re-used by later segments.",
"# Data has same structure as previously",
"elif",
"raw_data_index",
"==",
"0x00000000",
":",
"log",
".",
"debug",
"(",
"\"Object has same data structure as in the previous segment\"",
")",
"self",
".",
"has_data",
"=",
"True",
"elif",
"raw_data_index",
"==",
"0x00001269",
"or",
"raw_data_index",
"==",
"0x00001369",
":",
"# This is a DAQmx raw data segment.",
"# 0x00001269 for segment containing Format Changing scaler.",
"# 0x00001369 for segment containing Digital Line scaler.",
"if",
"raw_data_index",
"==",
"0x00001369",
":",
"# special scaling for DAQ's digital input lines?",
"log",
".",
"warning",
"(",
"\"DAQmx with Digital Line scaler has not tested\"",
")",
"# DAQmx raw data format metadata has its own class",
"self",
".",
"has_data",
"=",
"True",
"self",
".",
"tdms_object",
".",
"has_data",
"=",
"True",
"info",
"=",
"self",
".",
"_read_metadata_mx",
"(",
"f",
")",
"self",
".",
"dimension",
"=",
"info",
".",
"dimension",
"self",
".",
"data_type",
"=",
"info",
".",
"data_type",
"# DAQmx format has special chunking",
"self",
".",
"data_size",
"=",
"info",
".",
"chunk_size",
"self",
".",
"number_values",
"=",
"info",
".",
"chunk_size",
"# segment reading code relies on a single consistent raw",
"# data width so assert that there is only one.",
"assert",
"(",
"len",
"(",
"info",
".",
"raw_data_widths",
")",
"==",
"1",
")",
"self",
".",
"raw_data_width",
"=",
"info",
".",
"raw_data_widths",
"[",
"0",
"]",
"# fall through and read properties",
"else",
":",
"# Assume metadata format is legacy TDMS format.",
"# raw_data_index gives the length of the index information.",
"self",
".",
"has_data",
"=",
"True",
"self",
".",
"tdms_object",
".",
"has_data",
"=",
"True",
"# Read the data type",
"try",
":",
"self",
".",
"data_type",
"=",
"types",
".",
"tds_data_types",
"[",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"self",
".",
"endianness",
")",
"]",
"except",
"KeyError",
":",
"raise",
"KeyError",
"(",
"\"Unrecognised data type\"",
")",
"if",
"(",
"self",
".",
"tdms_object",
".",
"data_type",
"is",
"not",
"None",
"and",
"self",
".",
"data_type",
"!=",
"self",
".",
"tdms_object",
".",
"data_type",
")",
":",
"raise",
"ValueError",
"(",
"\"Segment object doesn't have the same data \"",
"\"type as previous segments.\"",
")",
"else",
":",
"self",
".",
"tdms_object",
".",
"data_type",
"=",
"self",
".",
"data_type",
"log",
".",
"debug",
"(",
"\"Object data type: %r\"",
",",
"self",
".",
"tdms_object",
".",
"data_type",
")",
"if",
"(",
"self",
".",
"tdms_object",
".",
"data_type",
".",
"size",
"is",
"None",
"and",
"self",
".",
"tdms_object",
".",
"data_type",
"!=",
"types",
".",
"String",
")",
":",
"raise",
"ValueError",
"(",
"\"Unsupported data type: %r\"",
"%",
"self",
".",
"tdms_object",
".",
"data_type",
")",
"# Read data dimension",
"self",
".",
"dimension",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"self",
".",
"endianness",
")",
"# In TDMS version 2.0, 1 is the only valid value for dimension",
"if",
"self",
".",
"dimension",
"!=",
"1",
":",
"log",
".",
"warning",
"(",
"\"Data dimension is not 1\"",
")",
"# Read number of values",
"self",
".",
"number_values",
"=",
"types",
".",
"Uint64",
".",
"read",
"(",
"f",
",",
"self",
".",
"endianness",
")",
"# Variable length data types have total size",
"if",
"self",
".",
"data_type",
"in",
"(",
"types",
".",
"String",
",",
")",
":",
"self",
".",
"data_size",
"=",
"types",
".",
"Uint64",
".",
"read",
"(",
"f",
",",
"self",
".",
"endianness",
")",
"else",
":",
"self",
".",
"data_size",
"=",
"(",
"self",
".",
"number_values",
"*",
"self",
".",
"data_type",
".",
"size",
"*",
"self",
".",
"dimension",
")",
"log",
".",
"debug",
"(",
"\"Object number of values in segment: %d\"",
",",
"self",
".",
"number_values",
")",
"# Read data properties",
"num_properties",
"=",
"types",
".",
"Uint32",
".",
"read",
"(",
"f",
",",
"self",
".",
"endianness",
")",
"log",
".",
"debug",
"(",
"\"Reading %d properties\"",
",",
"num_properties",
")",
"for",
"i",
"in",
"range",
"(",
"num_properties",
")",
":",
"prop_name",
",",
"value",
"=",
"read_property",
"(",
"f",
",",
"self",
".",
"endianness",
")",
"self",
".",
"tdms_object",
".",
"properties",
"[",
"prop_name",
"]",
"=",
"value"
] | Read object metadata and update object information | [
"Read",
"object",
"metadata",
"and",
"update",
"object",
"information"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L919-L1011 | train | 238,420 |
adamreeve/npTDMS | nptdms/tdms.py | _TdmsSegmentObject._read_value | def _read_value(self, file):
"""Read a single value from the given file"""
if self.data_type.nptype is not None:
dtype = (np.dtype(self.data_type.nptype).newbyteorder(
self.endianness))
return fromfile(file, dtype=dtype, count=1)
return self.data_type.read(file, self.endianness) | python | def _read_value(self, file):
"""Read a single value from the given file"""
if self.data_type.nptype is not None:
dtype = (np.dtype(self.data_type.nptype).newbyteorder(
self.endianness))
return fromfile(file, dtype=dtype, count=1)
return self.data_type.read(file, self.endianness) | [
"def",
"_read_value",
"(",
"self",
",",
"file",
")",
":",
"if",
"self",
".",
"data_type",
".",
"nptype",
"is",
"not",
"None",
":",
"dtype",
"=",
"(",
"np",
".",
"dtype",
"(",
"self",
".",
"data_type",
".",
"nptype",
")",
".",
"newbyteorder",
"(",
"self",
".",
"endianness",
")",
")",
"return",
"fromfile",
"(",
"file",
",",
"dtype",
"=",
"dtype",
",",
"count",
"=",
"1",
")",
"return",
"self",
".",
"data_type",
".",
"read",
"(",
"file",
",",
"self",
".",
"endianness",
")"
] | Read a single value from the given file | [
"Read",
"a",
"single",
"value",
"from",
"the",
"given",
"file"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L1017-L1024 | train | 238,421 |
adamreeve/npTDMS | nptdms/tdms.py | _TdmsSegmentObject._read_values | def _read_values(self, file, number_values):
"""Read all values for this object from a contiguous segment"""
if self.data_type.nptype is not None:
dtype = (np.dtype(self.data_type.nptype).newbyteorder(
self.endianness))
return fromfile(file, dtype=dtype, count=number_values)
elif self.data_type == types.String:
return read_string_data(file, number_values, self.endianness)
data = self._new_segment_data()
for i in range(number_values):
data[i] = self.data_type.read(file, self.endianness)
return data | python | def _read_values(self, file, number_values):
"""Read all values for this object from a contiguous segment"""
if self.data_type.nptype is not None:
dtype = (np.dtype(self.data_type.nptype).newbyteorder(
self.endianness))
return fromfile(file, dtype=dtype, count=number_values)
elif self.data_type == types.String:
return read_string_data(file, number_values, self.endianness)
data = self._new_segment_data()
for i in range(number_values):
data[i] = self.data_type.read(file, self.endianness)
return data | [
"def",
"_read_values",
"(",
"self",
",",
"file",
",",
"number_values",
")",
":",
"if",
"self",
".",
"data_type",
".",
"nptype",
"is",
"not",
"None",
":",
"dtype",
"=",
"(",
"np",
".",
"dtype",
"(",
"self",
".",
"data_type",
".",
"nptype",
")",
".",
"newbyteorder",
"(",
"self",
".",
"endianness",
")",
")",
"return",
"fromfile",
"(",
"file",
",",
"dtype",
"=",
"dtype",
",",
"count",
"=",
"number_values",
")",
"elif",
"self",
".",
"data_type",
"==",
"types",
".",
"String",
":",
"return",
"read_string_data",
"(",
"file",
",",
"number_values",
",",
"self",
".",
"endianness",
")",
"data",
"=",
"self",
".",
"_new_segment_data",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"number_values",
")",
":",
"data",
"[",
"i",
"]",
"=",
"self",
".",
"data_type",
".",
"read",
"(",
"file",
",",
"self",
".",
"endianness",
")",
"return",
"data"
] | Read all values for this object from a contiguous segment | [
"Read",
"all",
"values",
"for",
"this",
"object",
"from",
"a",
"contiguous",
"segment"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L1026-L1038 | train | 238,422 |
adamreeve/npTDMS | nptdms/tdms.py | _TdmsSegmentObject._new_segment_data | def _new_segment_data(self):
"""Return a new array to read the data of the current section into"""
if self.data_type.nptype is not None:
return np.zeros(self.number_values, dtype=self.data_type.nptype)
else:
return [None] * self.number_values | python | def _new_segment_data(self):
"""Return a new array to read the data of the current section into"""
if self.data_type.nptype is not None:
return np.zeros(self.number_values, dtype=self.data_type.nptype)
else:
return [None] * self.number_values | [
"def",
"_new_segment_data",
"(",
"self",
")",
":",
"if",
"self",
".",
"data_type",
".",
"nptype",
"is",
"not",
"None",
":",
"return",
"np",
".",
"zeros",
"(",
"self",
".",
"number_values",
",",
"dtype",
"=",
"self",
".",
"data_type",
".",
"nptype",
")",
"else",
":",
"return",
"[",
"None",
"]",
"*",
"self",
".",
"number_values"
] | Return a new array to read the data of the current section into | [
"Return",
"a",
"new",
"array",
"to",
"read",
"the",
"data",
"of",
"the",
"current",
"section",
"into"
] | d7d6632d4ebc2e78ed941477c2f1c56bd7493d74 | https://github.com/adamreeve/npTDMS/blob/d7d6632d4ebc2e78ed941477c2f1c56bd7493d74/nptdms/tdms.py#L1040-L1046 | train | 238,423 |
apriha/lineage | src/lineage/snps.py | detect_build | def detect_build(snps):
""" Detect build of SNPs.
Use the coordinates of common SNPs to identify the build / assembly of a genotype file
that is being loaded.
Notes
-----
rs3094315 : plus strand in 36, 37, and 38
rs11928389 : plus strand in 36, minus strand in 37 and 38
rs2500347 : plus strand in 36 and 37, minus strand in 38
rs964481 : plus strand in 36, 37, and 38
rs2341354 : plus strand in 36, 37, and 38
Parameters
----------
snps : pandas.DataFrame
SNPs to add
Returns
-------
int
detected build of SNPs, else None
References
----------
..[1] Yates et. al. (doi:10.1093/bioinformatics/btu613),
http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613
..[2] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
..[3] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K.
dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;29(1):308-11.
..[4] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center
for Biotechnology Information, National Library of Medicine. dbSNP accession: rs3094315,
rs11928389, rs2500347, rs964481, and rs2341354 (dbSNP Build ID: 151). Available from:
http://www.ncbi.nlm.nih.gov/SNP/
"""
def lookup_build_with_snp_pos(pos, s):
try:
return s.loc[s == pos].index[0]
except:
return None
build = None
rsids = ["rs3094315", "rs11928389", "rs2500347", "rs964481", "rs2341354"]
df = pd.DataFrame(
{
36: [742429, 50908372, 143649677, 27566744, 908436],
37: [752566, 50927009, 144938320, 27656823, 918573],
38: [817186, 50889578, 148946169, 27638706, 983193],
},
index=rsids,
)
for rsid in rsids:
if rsid in snps.index:
build = lookup_build_with_snp_pos(snps.loc[rsid].pos, df.loc[rsid])
if build is not None:
break
return build | python | def detect_build(snps):
""" Detect build of SNPs.
Use the coordinates of common SNPs to identify the build / assembly of a genotype file
that is being loaded.
Notes
-----
rs3094315 : plus strand in 36, 37, and 38
rs11928389 : plus strand in 36, minus strand in 37 and 38
rs2500347 : plus strand in 36 and 37, minus strand in 38
rs964481 : plus strand in 36, 37, and 38
rs2341354 : plus strand in 36, 37, and 38
Parameters
----------
snps : pandas.DataFrame
SNPs to add
Returns
-------
int
detected build of SNPs, else None
References
----------
..[1] Yates et. al. (doi:10.1093/bioinformatics/btu613),
http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613
..[2] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
..[3] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K.
dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;29(1):308-11.
..[4] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center
for Biotechnology Information, National Library of Medicine. dbSNP accession: rs3094315,
rs11928389, rs2500347, rs964481, and rs2341354 (dbSNP Build ID: 151). Available from:
http://www.ncbi.nlm.nih.gov/SNP/
"""
def lookup_build_with_snp_pos(pos, s):
try:
return s.loc[s == pos].index[0]
except:
return None
build = None
rsids = ["rs3094315", "rs11928389", "rs2500347", "rs964481", "rs2341354"]
df = pd.DataFrame(
{
36: [742429, 50908372, 143649677, 27566744, 908436],
37: [752566, 50927009, 144938320, 27656823, 918573],
38: [817186, 50889578, 148946169, 27638706, 983193],
},
index=rsids,
)
for rsid in rsids:
if rsid in snps.index:
build = lookup_build_with_snp_pos(snps.loc[rsid].pos, df.loc[rsid])
if build is not None:
break
return build | [
"def",
"detect_build",
"(",
"snps",
")",
":",
"def",
"lookup_build_with_snp_pos",
"(",
"pos",
",",
"s",
")",
":",
"try",
":",
"return",
"s",
".",
"loc",
"[",
"s",
"==",
"pos",
"]",
".",
"index",
"[",
"0",
"]",
"except",
":",
"return",
"None",
"build",
"=",
"None",
"rsids",
"=",
"[",
"\"rs3094315\"",
",",
"\"rs11928389\"",
",",
"\"rs2500347\"",
",",
"\"rs964481\"",
",",
"\"rs2341354\"",
"]",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"36",
":",
"[",
"742429",
",",
"50908372",
",",
"143649677",
",",
"27566744",
",",
"908436",
"]",
",",
"37",
":",
"[",
"752566",
",",
"50927009",
",",
"144938320",
",",
"27656823",
",",
"918573",
"]",
",",
"38",
":",
"[",
"817186",
",",
"50889578",
",",
"148946169",
",",
"27638706",
",",
"983193",
"]",
",",
"}",
",",
"index",
"=",
"rsids",
",",
")",
"for",
"rsid",
"in",
"rsids",
":",
"if",
"rsid",
"in",
"snps",
".",
"index",
":",
"build",
"=",
"lookup_build_with_snp_pos",
"(",
"snps",
".",
"loc",
"[",
"rsid",
"]",
".",
"pos",
",",
"df",
".",
"loc",
"[",
"rsid",
"]",
")",
"if",
"build",
"is",
"not",
"None",
":",
"break",
"return",
"build"
] | Detect build of SNPs.
Use the coordinates of common SNPs to identify the build / assembly of a genotype file
that is being loaded.
Notes
-----
rs3094315 : plus strand in 36, 37, and 38
rs11928389 : plus strand in 36, minus strand in 37 and 38
rs2500347 : plus strand in 36 and 37, minus strand in 38
rs964481 : plus strand in 36, 37, and 38
rs2341354 : plus strand in 36, 37, and 38
Parameters
----------
snps : pandas.DataFrame
SNPs to add
Returns
-------
int
detected build of SNPs, else None
References
----------
..[1] Yates et. al. (doi:10.1093/bioinformatics/btu613),
http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613
..[2] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
..[3] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K.
dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;29(1):308-11.
..[4] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center
for Biotechnology Information, National Library of Medicine. dbSNP accession: rs3094315,
rs11928389, rs2500347, rs964481, and rs2341354 (dbSNP Build ID: 151). Available from:
http://www.ncbi.nlm.nih.gov/SNP/ | [
"Detect",
"build",
"of",
"SNPs",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L491-L553 | train | 238,424 |
apriha/lineage | src/lineage/snps.py | get_chromosomes | def get_chromosomes(snps):
""" Get the chromosomes of SNPs.
Parameters
----------
snps : pandas.DataFrame
Returns
-------
list
list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes
"""
if isinstance(snps, pd.DataFrame):
return list(pd.unique(snps["chrom"]))
else:
return [] | python | def get_chromosomes(snps):
""" Get the chromosomes of SNPs.
Parameters
----------
snps : pandas.DataFrame
Returns
-------
list
list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes
"""
if isinstance(snps, pd.DataFrame):
return list(pd.unique(snps["chrom"]))
else:
return [] | [
"def",
"get_chromosomes",
"(",
"snps",
")",
":",
"if",
"isinstance",
"(",
"snps",
",",
"pd",
".",
"DataFrame",
")",
":",
"return",
"list",
"(",
"pd",
".",
"unique",
"(",
"snps",
"[",
"\"chrom\"",
"]",
")",
")",
"else",
":",
"return",
"[",
"]"
] | Get the chromosomes of SNPs.
Parameters
----------
snps : pandas.DataFrame
Returns
-------
list
list of str chromosomes (e.g., ['1', '2', '3', 'MT'], empty list if no chromosomes | [
"Get",
"the",
"chromosomes",
"of",
"SNPs",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L597-L613 | train | 238,425 |
apriha/lineage | src/lineage/snps.py | get_chromosomes_summary | def get_chromosomes_summary(snps):
""" Summary of the chromosomes of SNPs.
Parameters
----------
snps : pandas.DataFrame
Returns
-------
str
human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes
"""
if isinstance(snps, pd.DataFrame):
chroms = list(pd.unique(snps["chrom"]))
int_chroms = [int(chrom) for chrom in chroms if chrom.isdigit()]
str_chroms = [chrom for chrom in chroms if not chrom.isdigit()]
# https://codereview.stackexchange.com/a/5202
def as_range(iterable):
l = list(iterable)
if len(l) > 1:
return "{0}-{1}".format(l[0], l[-1])
else:
return "{0}".format(l[0])
# create str representations
int_chroms = ", ".join(
as_range(g)
for _, g in groupby(int_chroms, key=lambda n, c=count(): n - next(c))
)
str_chroms = ", ".join(str_chroms)
if int_chroms != "" and str_chroms != "":
int_chroms += ", "
return int_chroms + str_chroms
else:
return "" | python | def get_chromosomes_summary(snps):
""" Summary of the chromosomes of SNPs.
Parameters
----------
snps : pandas.DataFrame
Returns
-------
str
human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes
"""
if isinstance(snps, pd.DataFrame):
chroms = list(pd.unique(snps["chrom"]))
int_chroms = [int(chrom) for chrom in chroms if chrom.isdigit()]
str_chroms = [chrom for chrom in chroms if not chrom.isdigit()]
# https://codereview.stackexchange.com/a/5202
def as_range(iterable):
l = list(iterable)
if len(l) > 1:
return "{0}-{1}".format(l[0], l[-1])
else:
return "{0}".format(l[0])
# create str representations
int_chroms = ", ".join(
as_range(g)
for _, g in groupby(int_chroms, key=lambda n, c=count(): n - next(c))
)
str_chroms = ", ".join(str_chroms)
if int_chroms != "" and str_chroms != "":
int_chroms += ", "
return int_chroms + str_chroms
else:
return "" | [
"def",
"get_chromosomes_summary",
"(",
"snps",
")",
":",
"if",
"isinstance",
"(",
"snps",
",",
"pd",
".",
"DataFrame",
")",
":",
"chroms",
"=",
"list",
"(",
"pd",
".",
"unique",
"(",
"snps",
"[",
"\"chrom\"",
"]",
")",
")",
"int_chroms",
"=",
"[",
"int",
"(",
"chrom",
")",
"for",
"chrom",
"in",
"chroms",
"if",
"chrom",
".",
"isdigit",
"(",
")",
"]",
"str_chroms",
"=",
"[",
"chrom",
"for",
"chrom",
"in",
"chroms",
"if",
"not",
"chrom",
".",
"isdigit",
"(",
")",
"]",
"# https://codereview.stackexchange.com/a/5202",
"def",
"as_range",
"(",
"iterable",
")",
":",
"l",
"=",
"list",
"(",
"iterable",
")",
"if",
"len",
"(",
"l",
")",
">",
"1",
":",
"return",
"\"{0}-{1}\"",
".",
"format",
"(",
"l",
"[",
"0",
"]",
",",
"l",
"[",
"-",
"1",
"]",
")",
"else",
":",
"return",
"\"{0}\"",
".",
"format",
"(",
"l",
"[",
"0",
"]",
")",
"# create str representations",
"int_chroms",
"=",
"\", \"",
".",
"join",
"(",
"as_range",
"(",
"g",
")",
"for",
"_",
",",
"g",
"in",
"groupby",
"(",
"int_chroms",
",",
"key",
"=",
"lambda",
"n",
",",
"c",
"=",
"count",
"(",
")",
":",
"n",
"-",
"next",
"(",
"c",
")",
")",
")",
"str_chroms",
"=",
"\", \"",
".",
"join",
"(",
"str_chroms",
")",
"if",
"int_chroms",
"!=",
"\"\"",
"and",
"str_chroms",
"!=",
"\"\"",
":",
"int_chroms",
"+=",
"\", \"",
"return",
"int_chroms",
"+",
"str_chroms",
"else",
":",
"return",
"\"\""
] | Summary of the chromosomes of SNPs.
Parameters
----------
snps : pandas.DataFrame
Returns
-------
str
human-readable listing of chromosomes (e.g., '1-3, MT'), empty str if no chromosomes | [
"Summary",
"of",
"the",
"chromosomes",
"of",
"SNPs",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L616-L655 | train | 238,426 |
apriha/lineage | src/lineage/snps.py | determine_sex | def determine_sex(
snps, y_snps_not_null_threshold=0.1, heterozygous_x_snps_threshold=0.01
):
""" Determine sex from SNPs using thresholds.
Parameters
----------
snps : pandas.DataFrame
y_snps_not_null_threshold : float
percentage Y SNPs that are not null; above this threshold, Male is determined
heterozygous_x_snps_threshold : float
percentage heterozygous X SNPs; above this threshold, Female is determined
Returns
-------
str
'Male' or 'Female' if detected, else empty str
"""
if isinstance(snps, pd.DataFrame):
y_snps = len(snps.loc[(snps["chrom"] == "Y")])
if y_snps > 0:
y_snps_not_null = len(
snps.loc[(snps["chrom"] == "Y") & (snps["genotype"].notnull())]
)
if y_snps_not_null / y_snps > y_snps_not_null_threshold:
return "Male"
else:
return "Female"
x_snps = len(snps.loc[snps["chrom"] == "X"])
if x_snps == 0:
return ""
heterozygous_x_snps = len(
snps.loc[
(snps["chrom"] == "X")
& (snps["genotype"].notnull())
& (snps["genotype"].str[0] != snps["genotype"].str[1])
]
)
if heterozygous_x_snps / x_snps > heterozygous_x_snps_threshold:
return "Female"
else:
return "Male"
else:
return "" | python | def determine_sex(
snps, y_snps_not_null_threshold=0.1, heterozygous_x_snps_threshold=0.01
):
""" Determine sex from SNPs using thresholds.
Parameters
----------
snps : pandas.DataFrame
y_snps_not_null_threshold : float
percentage Y SNPs that are not null; above this threshold, Male is determined
heterozygous_x_snps_threshold : float
percentage heterozygous X SNPs; above this threshold, Female is determined
Returns
-------
str
'Male' or 'Female' if detected, else empty str
"""
if isinstance(snps, pd.DataFrame):
y_snps = len(snps.loc[(snps["chrom"] == "Y")])
if y_snps > 0:
y_snps_not_null = len(
snps.loc[(snps["chrom"] == "Y") & (snps["genotype"].notnull())]
)
if y_snps_not_null / y_snps > y_snps_not_null_threshold:
return "Male"
else:
return "Female"
x_snps = len(snps.loc[snps["chrom"] == "X"])
if x_snps == 0:
return ""
heterozygous_x_snps = len(
snps.loc[
(snps["chrom"] == "X")
& (snps["genotype"].notnull())
& (snps["genotype"].str[0] != snps["genotype"].str[1])
]
)
if heterozygous_x_snps / x_snps > heterozygous_x_snps_threshold:
return "Female"
else:
return "Male"
else:
return "" | [
"def",
"determine_sex",
"(",
"snps",
",",
"y_snps_not_null_threshold",
"=",
"0.1",
",",
"heterozygous_x_snps_threshold",
"=",
"0.01",
")",
":",
"if",
"isinstance",
"(",
"snps",
",",
"pd",
".",
"DataFrame",
")",
":",
"y_snps",
"=",
"len",
"(",
"snps",
".",
"loc",
"[",
"(",
"snps",
"[",
"\"chrom\"",
"]",
"==",
"\"Y\"",
")",
"]",
")",
"if",
"y_snps",
">",
"0",
":",
"y_snps_not_null",
"=",
"len",
"(",
"snps",
".",
"loc",
"[",
"(",
"snps",
"[",
"\"chrom\"",
"]",
"==",
"\"Y\"",
")",
"&",
"(",
"snps",
"[",
"\"genotype\"",
"]",
".",
"notnull",
"(",
")",
")",
"]",
")",
"if",
"y_snps_not_null",
"/",
"y_snps",
">",
"y_snps_not_null_threshold",
":",
"return",
"\"Male\"",
"else",
":",
"return",
"\"Female\"",
"x_snps",
"=",
"len",
"(",
"snps",
".",
"loc",
"[",
"snps",
"[",
"\"chrom\"",
"]",
"==",
"\"X\"",
"]",
")",
"if",
"x_snps",
"==",
"0",
":",
"return",
"\"\"",
"heterozygous_x_snps",
"=",
"len",
"(",
"snps",
".",
"loc",
"[",
"(",
"snps",
"[",
"\"chrom\"",
"]",
"==",
"\"X\"",
")",
"&",
"(",
"snps",
"[",
"\"genotype\"",
"]",
".",
"notnull",
"(",
")",
")",
"&",
"(",
"snps",
"[",
"\"genotype\"",
"]",
".",
"str",
"[",
"0",
"]",
"!=",
"snps",
"[",
"\"genotype\"",
"]",
".",
"str",
"[",
"1",
"]",
")",
"]",
")",
"if",
"heterozygous_x_snps",
"/",
"x_snps",
">",
"heterozygous_x_snps_threshold",
":",
"return",
"\"Female\"",
"else",
":",
"return",
"\"Male\"",
"else",
":",
"return",
"\"\""
] | Determine sex from SNPs using thresholds.
Parameters
----------
snps : pandas.DataFrame
y_snps_not_null_threshold : float
percentage Y SNPs that are not null; above this threshold, Male is determined
heterozygous_x_snps_threshold : float
percentage heterozygous X SNPs; above this threshold, Female is determined
Returns
-------
str
'Male' or 'Female' if detected, else empty str | [
"Determine",
"sex",
"from",
"SNPs",
"using",
"thresholds",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L658-L708 | train | 238,427 |
apriha/lineage | src/lineage/snps.py | sort_snps | def sort_snps(snps):
""" Sort SNPs based on ordered chromosome list and position. """
sorted_list = sorted(snps["chrom"].unique(), key=_natural_sort_key)
# move PAR and MT to the end of the dataframe
if "PAR" in sorted_list:
sorted_list.remove("PAR")
sorted_list.append("PAR")
if "MT" in sorted_list:
sorted_list.remove("MT")
sorted_list.append("MT")
# convert chrom column to category for sorting
# https://stackoverflow.com/a/26707444
snps["chrom"] = snps["chrom"].astype(
CategoricalDtype(categories=sorted_list, ordered=True)
)
# sort based on ordered chromosome list and position
snps = snps.sort_values(["chrom", "pos"])
# convert chromosome back to object
snps["chrom"] = snps["chrom"].astype(object)
return snps | python | def sort_snps(snps):
""" Sort SNPs based on ordered chromosome list and position. """
sorted_list = sorted(snps["chrom"].unique(), key=_natural_sort_key)
# move PAR and MT to the end of the dataframe
if "PAR" in sorted_list:
sorted_list.remove("PAR")
sorted_list.append("PAR")
if "MT" in sorted_list:
sorted_list.remove("MT")
sorted_list.append("MT")
# convert chrom column to category for sorting
# https://stackoverflow.com/a/26707444
snps["chrom"] = snps["chrom"].astype(
CategoricalDtype(categories=sorted_list, ordered=True)
)
# sort based on ordered chromosome list and position
snps = snps.sort_values(["chrom", "pos"])
# convert chromosome back to object
snps["chrom"] = snps["chrom"].astype(object)
return snps | [
"def",
"sort_snps",
"(",
"snps",
")",
":",
"sorted_list",
"=",
"sorted",
"(",
"snps",
"[",
"\"chrom\"",
"]",
".",
"unique",
"(",
")",
",",
"key",
"=",
"_natural_sort_key",
")",
"# move PAR and MT to the end of the dataframe",
"if",
"\"PAR\"",
"in",
"sorted_list",
":",
"sorted_list",
".",
"remove",
"(",
"\"PAR\"",
")",
"sorted_list",
".",
"append",
"(",
"\"PAR\"",
")",
"if",
"\"MT\"",
"in",
"sorted_list",
":",
"sorted_list",
".",
"remove",
"(",
"\"MT\"",
")",
"sorted_list",
".",
"append",
"(",
"\"MT\"",
")",
"# convert chrom column to category for sorting",
"# https://stackoverflow.com/a/26707444",
"snps",
"[",
"\"chrom\"",
"]",
"=",
"snps",
"[",
"\"chrom\"",
"]",
".",
"astype",
"(",
"CategoricalDtype",
"(",
"categories",
"=",
"sorted_list",
",",
"ordered",
"=",
"True",
")",
")",
"# sort based on ordered chromosome list and position",
"snps",
"=",
"snps",
".",
"sort_values",
"(",
"[",
"\"chrom\"",
",",
"\"pos\"",
"]",
")",
"# convert chromosome back to object",
"snps",
"[",
"\"chrom\"",
"]",
"=",
"snps",
"[",
"\"chrom\"",
"]",
".",
"astype",
"(",
"object",
")",
"return",
"snps"
] | Sort SNPs based on ordered chromosome list and position. | [
"Sort",
"SNPs",
"based",
"on",
"ordered",
"chromosome",
"list",
"and",
"position",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L711-L737 | train | 238,428 |
apriha/lineage | src/lineage/snps.py | SNPs.get_summary | def get_summary(self):
""" Get summary of ``SNPs``.
Returns
-------
dict
summary info, else None if ``SNPs`` is not valid
"""
if not self.is_valid():
return None
else:
return {
"source": self.source,
"assembly": self.assembly,
"build": self.build,
"build_detected": self.build_detected,
"snp_count": self.snp_count,
"chromosomes": self.chromosomes_summary,
"sex": self.sex,
} | python | def get_summary(self):
""" Get summary of ``SNPs``.
Returns
-------
dict
summary info, else None if ``SNPs`` is not valid
"""
if not self.is_valid():
return None
else:
return {
"source": self.source,
"assembly": self.assembly,
"build": self.build,
"build_detected": self.build_detected,
"snp_count": self.snp_count,
"chromosomes": self.chromosomes_summary,
"sex": self.sex,
} | [
"def",
"get_summary",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"is_valid",
"(",
")",
":",
"return",
"None",
"else",
":",
"return",
"{",
"\"source\"",
":",
"self",
".",
"source",
",",
"\"assembly\"",
":",
"self",
".",
"assembly",
",",
"\"build\"",
":",
"self",
".",
"build",
",",
"\"build_detected\"",
":",
"self",
".",
"build_detected",
",",
"\"snp_count\"",
":",
"self",
".",
"snp_count",
",",
"\"chromosomes\"",
":",
"self",
".",
"chromosomes_summary",
",",
"\"sex\"",
":",
"self",
".",
"sex",
",",
"}"
] | Get summary of ``SNPs``.
Returns
-------
dict
summary info, else None if ``SNPs`` is not valid | [
"Get",
"summary",
"of",
"SNPs",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L113-L132 | train | 238,429 |
apriha/lineage | src/lineage/snps.py | SNPs._read_23andme | def _read_23andme(file):
""" Read and parse 23andMe file.
https://www.23andme.com
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source
"""
df = pd.read_csv(
file,
comment="#",
sep="\t",
na_values="--",
names=["rsid", "chrom", "pos", "genotype"],
index_col=0,
dtype={"chrom": object},
)
return sort_snps(df), "23andMe" | python | def _read_23andme(file):
""" Read and parse 23andMe file.
https://www.23andme.com
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source
"""
df = pd.read_csv(
file,
comment="#",
sep="\t",
na_values="--",
names=["rsid", "chrom", "pos", "genotype"],
index_col=0,
dtype={"chrom": object},
)
return sort_snps(df), "23andMe" | [
"def",
"_read_23andme",
"(",
"file",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"file",
",",
"comment",
"=",
"\"#\"",
",",
"sep",
"=",
"\"\\t\"",
",",
"na_values",
"=",
"\"--\"",
",",
"names",
"=",
"[",
"\"rsid\"",
",",
"\"chrom\"",
",",
"\"pos\"",
",",
"\"genotype\"",
"]",
",",
"index_col",
"=",
"0",
",",
"dtype",
"=",
"{",
"\"chrom\"",
":",
"object",
"}",
",",
")",
"return",
"sort_snps",
"(",
"df",
")",
",",
"\"23andMe\""
] | Read and parse 23andMe file.
https://www.23andme.com
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source | [
"Read",
"and",
"parse",
"23andMe",
"file",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L204-L231 | train | 238,430 |
apriha/lineage | src/lineage/snps.py | SNPs._read_lineage_csv | def _read_lineage_csv(file, comments):
""" Read and parse CSV file generated by lineage.
Parameters
----------
file : str
path to file
comments : str
comments at beginning of file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source(s)
"""
source = ""
for comment in comments.split("\n"):
if "Source(s):" in comment:
source = comment.split("Source(s):")[1].strip()
break
df = pd.read_csv(
file,
comment="#",
header=0,
na_values="--",
names=["rsid", "chrom", "pos", "genotype"],
index_col=0,
dtype={"chrom": object, "pos": np.int64},
)
return sort_snps(df), source | python | def _read_lineage_csv(file, comments):
""" Read and parse CSV file generated by lineage.
Parameters
----------
file : str
path to file
comments : str
comments at beginning of file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source(s)
"""
source = ""
for comment in comments.split("\n"):
if "Source(s):" in comment:
source = comment.split("Source(s):")[1].strip()
break
df = pd.read_csv(
file,
comment="#",
header=0,
na_values="--",
names=["rsid", "chrom", "pos", "genotype"],
index_col=0,
dtype={"chrom": object, "pos": np.int64},
)
return sort_snps(df), source | [
"def",
"_read_lineage_csv",
"(",
"file",
",",
"comments",
")",
":",
"source",
"=",
"\"\"",
"for",
"comment",
"in",
"comments",
".",
"split",
"(",
"\"\\n\"",
")",
":",
"if",
"\"Source(s):\"",
"in",
"comment",
":",
"source",
"=",
"comment",
".",
"split",
"(",
"\"Source(s):\"",
")",
"[",
"1",
"]",
".",
"strip",
"(",
")",
"break",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"file",
",",
"comment",
"=",
"\"#\"",
",",
"header",
"=",
"0",
",",
"na_values",
"=",
"\"--\"",
",",
"names",
"=",
"[",
"\"rsid\"",
",",
"\"chrom\"",
",",
"\"pos\"",
",",
"\"genotype\"",
"]",
",",
"index_col",
"=",
"0",
",",
"dtype",
"=",
"{",
"\"chrom\"",
":",
"object",
",",
"\"pos\"",
":",
"np",
".",
"int64",
"}",
",",
")",
"return",
"sort_snps",
"(",
"df",
")",
",",
"source"
] | Read and parse CSV file generated by lineage.
Parameters
----------
file : str
path to file
comments : str
comments at beginning of file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source(s) | [
"Read",
"and",
"parse",
"CSV",
"file",
"generated",
"by",
"lineage",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L354-L387 | train | 238,431 |
apriha/lineage | src/lineage/snps.py | SNPs._read_generic_csv | def _read_generic_csv(file):
""" Read and parse generic CSV file.
Notes
-----
Assumes columns are 'rsid', 'chrom' / 'chromosome', 'pos' / 'position', and 'genotype';
values are comma separated; unreported genotypes are indicated by '--'; and one header row
precedes data. For example:
rsid,chromosome,position,genotype
rs1,1,1,AA
rs2,1,2,CC
rs3,1,3,--
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source
"""
df = pd.read_csv(
file,
skiprows=1,
na_values="--",
names=["rsid", "chrom", "pos", "genotype"],
index_col=0,
dtype={"chrom": object, "pos": np.int64},
)
return sort_snps(df), "generic" | python | def _read_generic_csv(file):
""" Read and parse generic CSV file.
Notes
-----
Assumes columns are 'rsid', 'chrom' / 'chromosome', 'pos' / 'position', and 'genotype';
values are comma separated; unreported genotypes are indicated by '--'; and one header row
precedes data. For example:
rsid,chromosome,position,genotype
rs1,1,1,AA
rs2,1,2,CC
rs3,1,3,--
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source
"""
df = pd.read_csv(
file,
skiprows=1,
na_values="--",
names=["rsid", "chrom", "pos", "genotype"],
index_col=0,
dtype={"chrom": object, "pos": np.int64},
)
return sort_snps(df), "generic" | [
"def",
"_read_generic_csv",
"(",
"file",
")",
":",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"file",
",",
"skiprows",
"=",
"1",
",",
"na_values",
"=",
"\"--\"",
",",
"names",
"=",
"[",
"\"rsid\"",
",",
"\"chrom\"",
",",
"\"pos\"",
",",
"\"genotype\"",
"]",
",",
"index_col",
"=",
"0",
",",
"dtype",
"=",
"{",
"\"chrom\"",
":",
"object",
",",
"\"pos\"",
":",
"np",
".",
"int64",
"}",
",",
")",
"return",
"sort_snps",
"(",
"df",
")",
",",
"\"generic\""
] | Read and parse generic CSV file.
Notes
-----
Assumes columns are 'rsid', 'chrom' / 'chromosome', 'pos' / 'position', and 'genotype';
values are comma separated; unreported genotypes are indicated by '--'; and one header row
precedes data. For example:
rsid,chromosome,position,genotype
rs1,1,1,AA
rs2,1,2,CC
rs3,1,3,--
Parameters
----------
file : str
path to file
Returns
-------
pandas.DataFrame
individual's genetic data normalized for use with `lineage`
str
name of data source | [
"Read",
"and",
"parse",
"generic",
"CSV",
"file",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L390-L425 | train | 238,432 |
apriha/lineage | src/lineage/snps.py | SNPs._assign_par_snps | def _assign_par_snps(self):
""" Assign PAR SNPs to the X or Y chromosome using SNP position.
References
-----
..[1] National Center for Biotechnology Information, Variation Services, RefSNP,
https://api.ncbi.nlm.nih.gov/variation/v0/
..[2] Yates et. al. (doi:10.1093/bioinformatics/btu613),
http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613
..[3] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
..[4] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K.
dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;
29(1):308-11.
..[5] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center
for Biotechnology Information, National Library of Medicine. dbSNP accession: rs28736870,
rs113313554, and rs758419898 (dbSNP Build ID: 151). Available from:
http://www.ncbi.nlm.nih.gov/SNP/
"""
rest_client = EnsemblRestClient(server="https://api.ncbi.nlm.nih.gov")
for rsid in self.snps.loc[self.snps["chrom"] == "PAR"].index.values:
if "rs" in rsid:
try:
id = rsid.split("rs")[1]
response = rest_client.perform_rest_action(
"/variation/v0/beta/refsnp/" + id
)
if response is not None:
for item in response["primary_snapshot_data"][
"placements_with_allele"
]:
if "NC_000023" in item["seq_id"]:
assigned = self._assign_snp(rsid, item["alleles"], "X")
elif "NC_000024" in item["seq_id"]:
assigned = self._assign_snp(rsid, item["alleles"], "Y")
else:
assigned = False
if assigned:
if not self.build_detected:
self.build = self._extract_build(item)
self.build_detected = True
continue
except Exception as err:
print(err) | python | def _assign_par_snps(self):
""" Assign PAR SNPs to the X or Y chromosome using SNP position.
References
-----
..[1] National Center for Biotechnology Information, Variation Services, RefSNP,
https://api.ncbi.nlm.nih.gov/variation/v0/
..[2] Yates et. al. (doi:10.1093/bioinformatics/btu613),
http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613
..[3] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
..[4] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K.
dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;
29(1):308-11.
..[5] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center
for Biotechnology Information, National Library of Medicine. dbSNP accession: rs28736870,
rs113313554, and rs758419898 (dbSNP Build ID: 151). Available from:
http://www.ncbi.nlm.nih.gov/SNP/
"""
rest_client = EnsemblRestClient(server="https://api.ncbi.nlm.nih.gov")
for rsid in self.snps.loc[self.snps["chrom"] == "PAR"].index.values:
if "rs" in rsid:
try:
id = rsid.split("rs")[1]
response = rest_client.perform_rest_action(
"/variation/v0/beta/refsnp/" + id
)
if response is not None:
for item in response["primary_snapshot_data"][
"placements_with_allele"
]:
if "NC_000023" in item["seq_id"]:
assigned = self._assign_snp(rsid, item["alleles"], "X")
elif "NC_000024" in item["seq_id"]:
assigned = self._assign_snp(rsid, item["alleles"], "Y")
else:
assigned = False
if assigned:
if not self.build_detected:
self.build = self._extract_build(item)
self.build_detected = True
continue
except Exception as err:
print(err) | [
"def",
"_assign_par_snps",
"(",
"self",
")",
":",
"rest_client",
"=",
"EnsemblRestClient",
"(",
"server",
"=",
"\"https://api.ncbi.nlm.nih.gov\"",
")",
"for",
"rsid",
"in",
"self",
".",
"snps",
".",
"loc",
"[",
"self",
".",
"snps",
"[",
"\"chrom\"",
"]",
"==",
"\"PAR\"",
"]",
".",
"index",
".",
"values",
":",
"if",
"\"rs\"",
"in",
"rsid",
":",
"try",
":",
"id",
"=",
"rsid",
".",
"split",
"(",
"\"rs\"",
")",
"[",
"1",
"]",
"response",
"=",
"rest_client",
".",
"perform_rest_action",
"(",
"\"/variation/v0/beta/refsnp/\"",
"+",
"id",
")",
"if",
"response",
"is",
"not",
"None",
":",
"for",
"item",
"in",
"response",
"[",
"\"primary_snapshot_data\"",
"]",
"[",
"\"placements_with_allele\"",
"]",
":",
"if",
"\"NC_000023\"",
"in",
"item",
"[",
"\"seq_id\"",
"]",
":",
"assigned",
"=",
"self",
".",
"_assign_snp",
"(",
"rsid",
",",
"item",
"[",
"\"alleles\"",
"]",
",",
"\"X\"",
")",
"elif",
"\"NC_000024\"",
"in",
"item",
"[",
"\"seq_id\"",
"]",
":",
"assigned",
"=",
"self",
".",
"_assign_snp",
"(",
"rsid",
",",
"item",
"[",
"\"alleles\"",
"]",
",",
"\"Y\"",
")",
"else",
":",
"assigned",
"=",
"False",
"if",
"assigned",
":",
"if",
"not",
"self",
".",
"build_detected",
":",
"self",
".",
"build",
"=",
"self",
".",
"_extract_build",
"(",
"item",
")",
"self",
".",
"build_detected",
"=",
"True",
"continue",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"err",
")"
] | Assign PAR SNPs to the X or Y chromosome using SNP position.
References
-----
..[1] National Center for Biotechnology Information, Variation Services, RefSNP,
https://api.ncbi.nlm.nih.gov/variation/v0/
..[2] Yates et. al. (doi:10.1093/bioinformatics/btu613),
http://europepmc.org/search/?query=DOI:10.1093/bioinformatics/btu613
..[3] Zerbino et. al. (doi.org/10.1093/nar/gkx1098), https://doi.org/10.1093/nar/gkx1098
..[4] Sherry ST, Ward MH, Kholodov M, Baker J, Phan L, Smigielski EM, Sirotkin K.
dbSNP: the NCBI database of genetic variation. Nucleic Acids Res. 2001 Jan 1;
29(1):308-11.
..[5] Database of Single Nucleotide Polymorphisms (dbSNP). Bethesda (MD): National Center
for Biotechnology Information, National Library of Medicine. dbSNP accession: rs28736870,
rs113313554, and rs758419898 (dbSNP Build ID: 151). Available from:
http://www.ncbi.nlm.nih.gov/SNP/ | [
"Assign",
"PAR",
"SNPs",
"to",
"the",
"X",
"or",
"Y",
"chromosome",
"using",
"SNP",
"position",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/snps.py#L427-L472 | train | 238,433 |
apriha/lineage | src/lineage/visualization.py | plot_chromosomes | def plot_chromosomes(one_chrom_match, two_chrom_match, cytobands, path, title, build):
""" Plots chromosomes with designated markers.
Parameters
----------
one_chrom_match : list of dicts
segments to highlight on the chromosomes representing one shared chromosome
two_chrom_match : list of dicts
segments to highlight on the chromosomes representing two shared chromosomes
cytobands : pandas.DataFrame
cytobands table loaded with Resources
path : str
path to destination `.png` file
title : str
title for plot
build : {37}
human genome build
"""
# Height of each chromosome
chrom_height = 1.25
# Spacing between consecutive chromosomes
chrom_spacing = 1
# Decide which chromosomes to use
chromosome_list = ["chr%s" % i for i in range(1, 23)]
chromosome_list.append("chrY")
chromosome_list.append("chrX")
# Keep track of the y positions for chromosomes, and the center of each chromosome
# (which is where we'll put the ytick labels)
ybase = 0
chrom_ybase = {}
chrom_centers = {}
# Iterate in reverse so that items in the beginning of `chromosome_list` will
# appear at the top of the plot
for chrom in chromosome_list[::-1]:
chrom_ybase[chrom] = ybase
chrom_centers[chrom] = ybase + chrom_height / 2.0
ybase += chrom_height + chrom_spacing
# Colors for different chromosome stains
color_lookup = {
"gneg": (202 / 255, 202 / 255, 202 / 255), # background
"one_chrom": (0 / 255, 176 / 255, 240 / 255),
"two_chrom": (66 / 255, 69 / 255, 121 / 255),
"centromere": (1, 1, 1, 0.6),
}
df = _patch_chromosomal_features(cytobands, one_chrom_match, two_chrom_match)
# Add a new column for colors
df["colors"] = df["gie_stain"].apply(lambda x: color_lookup[x])
# Width, height (in inches)
figsize = (6.5, 9)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# Now all we have to do is call our function for the chromosome data...
for collection in _chromosome_collections(df, chrom_ybase, chrom_height):
ax.add_collection(collection)
# Axes tweaking
ax.set_yticks([chrom_centers[i] for i in chromosome_list])
ax.set_yticklabels(chromosome_list)
ax.margins(0.01)
ax.axis("tight")
handles = []
# setup legend
if len(one_chrom_match) > 0:
one_chrom_patch = patches.Patch(
color=color_lookup["one_chrom"], label="One chromosome shared"
)
handles.append(one_chrom_patch)
if len(two_chrom_match) > 0:
two_chrom_patch = patches.Patch(
color=color_lookup["two_chrom"], label="Two chromosomes shared"
)
handles.append(two_chrom_patch)
no_match_patch = patches.Patch(color=color_lookup["gneg"], label="No shared DNA")
handles.append(no_match_patch)
centromere_patch = patches.Patch(
color=(234 / 255, 234 / 255, 234 / 255), label="Centromere"
)
handles.append(centromere_patch)
plt.legend(handles=handles, loc="lower right", bbox_to_anchor=(0.95, 0.05))
ax.set_title(title, fontsize=14, fontweight="bold")
plt.xlabel("Build " + str(build) + " Chromosome Position", fontsize=10)
print("Saving " + os.path.relpath(path))
plt.tight_layout()
plt.savefig(path) | python | def plot_chromosomes(one_chrom_match, two_chrom_match, cytobands, path, title, build):
""" Plots chromosomes with designated markers.
Parameters
----------
one_chrom_match : list of dicts
segments to highlight on the chromosomes representing one shared chromosome
two_chrom_match : list of dicts
segments to highlight on the chromosomes representing two shared chromosomes
cytobands : pandas.DataFrame
cytobands table loaded with Resources
path : str
path to destination `.png` file
title : str
title for plot
build : {37}
human genome build
"""
# Height of each chromosome
chrom_height = 1.25
# Spacing between consecutive chromosomes
chrom_spacing = 1
# Decide which chromosomes to use
chromosome_list = ["chr%s" % i for i in range(1, 23)]
chromosome_list.append("chrY")
chromosome_list.append("chrX")
# Keep track of the y positions for chromosomes, and the center of each chromosome
# (which is where we'll put the ytick labels)
ybase = 0
chrom_ybase = {}
chrom_centers = {}
# Iterate in reverse so that items in the beginning of `chromosome_list` will
# appear at the top of the plot
for chrom in chromosome_list[::-1]:
chrom_ybase[chrom] = ybase
chrom_centers[chrom] = ybase + chrom_height / 2.0
ybase += chrom_height + chrom_spacing
# Colors for different chromosome stains
color_lookup = {
"gneg": (202 / 255, 202 / 255, 202 / 255), # background
"one_chrom": (0 / 255, 176 / 255, 240 / 255),
"two_chrom": (66 / 255, 69 / 255, 121 / 255),
"centromere": (1, 1, 1, 0.6),
}
df = _patch_chromosomal_features(cytobands, one_chrom_match, two_chrom_match)
# Add a new column for colors
df["colors"] = df["gie_stain"].apply(lambda x: color_lookup[x])
# Width, height (in inches)
figsize = (6.5, 9)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
# Now all we have to do is call our function for the chromosome data...
for collection in _chromosome_collections(df, chrom_ybase, chrom_height):
ax.add_collection(collection)
# Axes tweaking
ax.set_yticks([chrom_centers[i] for i in chromosome_list])
ax.set_yticklabels(chromosome_list)
ax.margins(0.01)
ax.axis("tight")
handles = []
# setup legend
if len(one_chrom_match) > 0:
one_chrom_patch = patches.Patch(
color=color_lookup["one_chrom"], label="One chromosome shared"
)
handles.append(one_chrom_patch)
if len(two_chrom_match) > 0:
two_chrom_patch = patches.Patch(
color=color_lookup["two_chrom"], label="Two chromosomes shared"
)
handles.append(two_chrom_patch)
no_match_patch = patches.Patch(color=color_lookup["gneg"], label="No shared DNA")
handles.append(no_match_patch)
centromere_patch = patches.Patch(
color=(234 / 255, 234 / 255, 234 / 255), label="Centromere"
)
handles.append(centromere_patch)
plt.legend(handles=handles, loc="lower right", bbox_to_anchor=(0.95, 0.05))
ax.set_title(title, fontsize=14, fontweight="bold")
plt.xlabel("Build " + str(build) + " Chromosome Position", fontsize=10)
print("Saving " + os.path.relpath(path))
plt.tight_layout()
plt.savefig(path) | [
"def",
"plot_chromosomes",
"(",
"one_chrom_match",
",",
"two_chrom_match",
",",
"cytobands",
",",
"path",
",",
"title",
",",
"build",
")",
":",
"# Height of each chromosome",
"chrom_height",
"=",
"1.25",
"# Spacing between consecutive chromosomes",
"chrom_spacing",
"=",
"1",
"# Decide which chromosomes to use",
"chromosome_list",
"=",
"[",
"\"chr%s\"",
"%",
"i",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"23",
")",
"]",
"chromosome_list",
".",
"append",
"(",
"\"chrY\"",
")",
"chromosome_list",
".",
"append",
"(",
"\"chrX\"",
")",
"# Keep track of the y positions for chromosomes, and the center of each chromosome",
"# (which is where we'll put the ytick labels)",
"ybase",
"=",
"0",
"chrom_ybase",
"=",
"{",
"}",
"chrom_centers",
"=",
"{",
"}",
"# Iterate in reverse so that items in the beginning of `chromosome_list` will",
"# appear at the top of the plot",
"for",
"chrom",
"in",
"chromosome_list",
"[",
":",
":",
"-",
"1",
"]",
":",
"chrom_ybase",
"[",
"chrom",
"]",
"=",
"ybase",
"chrom_centers",
"[",
"chrom",
"]",
"=",
"ybase",
"+",
"chrom_height",
"/",
"2.0",
"ybase",
"+=",
"chrom_height",
"+",
"chrom_spacing",
"# Colors for different chromosome stains",
"color_lookup",
"=",
"{",
"\"gneg\"",
":",
"(",
"202",
"/",
"255",
",",
"202",
"/",
"255",
",",
"202",
"/",
"255",
")",
",",
"# background",
"\"one_chrom\"",
":",
"(",
"0",
"/",
"255",
",",
"176",
"/",
"255",
",",
"240",
"/",
"255",
")",
",",
"\"two_chrom\"",
":",
"(",
"66",
"/",
"255",
",",
"69",
"/",
"255",
",",
"121",
"/",
"255",
")",
",",
"\"centromere\"",
":",
"(",
"1",
",",
"1",
",",
"1",
",",
"0.6",
")",
",",
"}",
"df",
"=",
"_patch_chromosomal_features",
"(",
"cytobands",
",",
"one_chrom_match",
",",
"two_chrom_match",
")",
"# Add a new column for colors",
"df",
"[",
"\"colors\"",
"]",
"=",
"df",
"[",
"\"gie_stain\"",
"]",
".",
"apply",
"(",
"lambda",
"x",
":",
"color_lookup",
"[",
"x",
"]",
")",
"# Width, height (in inches)",
"figsize",
"=",
"(",
"6.5",
",",
"9",
")",
"fig",
"=",
"plt",
".",
"figure",
"(",
"figsize",
"=",
"figsize",
")",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"111",
")",
"# Now all we have to do is call our function for the chromosome data...",
"for",
"collection",
"in",
"_chromosome_collections",
"(",
"df",
",",
"chrom_ybase",
",",
"chrom_height",
")",
":",
"ax",
".",
"add_collection",
"(",
"collection",
")",
"# Axes tweaking",
"ax",
".",
"set_yticks",
"(",
"[",
"chrom_centers",
"[",
"i",
"]",
"for",
"i",
"in",
"chromosome_list",
"]",
")",
"ax",
".",
"set_yticklabels",
"(",
"chromosome_list",
")",
"ax",
".",
"margins",
"(",
"0.01",
")",
"ax",
".",
"axis",
"(",
"\"tight\"",
")",
"handles",
"=",
"[",
"]",
"# setup legend",
"if",
"len",
"(",
"one_chrom_match",
")",
">",
"0",
":",
"one_chrom_patch",
"=",
"patches",
".",
"Patch",
"(",
"color",
"=",
"color_lookup",
"[",
"\"one_chrom\"",
"]",
",",
"label",
"=",
"\"One chromosome shared\"",
")",
"handles",
".",
"append",
"(",
"one_chrom_patch",
")",
"if",
"len",
"(",
"two_chrom_match",
")",
">",
"0",
":",
"two_chrom_patch",
"=",
"patches",
".",
"Patch",
"(",
"color",
"=",
"color_lookup",
"[",
"\"two_chrom\"",
"]",
",",
"label",
"=",
"\"Two chromosomes shared\"",
")",
"handles",
".",
"append",
"(",
"two_chrom_patch",
")",
"no_match_patch",
"=",
"patches",
".",
"Patch",
"(",
"color",
"=",
"color_lookup",
"[",
"\"gneg\"",
"]",
",",
"label",
"=",
"\"No shared DNA\"",
")",
"handles",
".",
"append",
"(",
"no_match_patch",
")",
"centromere_patch",
"=",
"patches",
".",
"Patch",
"(",
"color",
"=",
"(",
"234",
"/",
"255",
",",
"234",
"/",
"255",
",",
"234",
"/",
"255",
")",
",",
"label",
"=",
"\"Centromere\"",
")",
"handles",
".",
"append",
"(",
"centromere_patch",
")",
"plt",
".",
"legend",
"(",
"handles",
"=",
"handles",
",",
"loc",
"=",
"\"lower right\"",
",",
"bbox_to_anchor",
"=",
"(",
"0.95",
",",
"0.05",
")",
")",
"ax",
".",
"set_title",
"(",
"title",
",",
"fontsize",
"=",
"14",
",",
"fontweight",
"=",
"\"bold\"",
")",
"plt",
".",
"xlabel",
"(",
"\"Build \"",
"+",
"str",
"(",
"build",
")",
"+",
"\" Chromosome Position\"",
",",
"fontsize",
"=",
"10",
")",
"print",
"(",
"\"Saving \"",
"+",
"os",
".",
"path",
".",
"relpath",
"(",
"path",
")",
")",
"plt",
".",
"tight_layout",
"(",
")",
"plt",
".",
"savefig",
"(",
"path",
")"
] | Plots chromosomes with designated markers.
Parameters
----------
one_chrom_match : list of dicts
segments to highlight on the chromosomes representing one shared chromosome
two_chrom_match : list of dicts
segments to highlight on the chromosomes representing two shared chromosomes
cytobands : pandas.DataFrame
cytobands table loaded with Resources
path : str
path to destination `.png` file
title : str
title for plot
build : {37}
human genome build | [
"Plots",
"chromosomes",
"with",
"designated",
"markers",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/visualization.py#L69-L169 | train | 238,434 |
apriha/lineage | src/lineage/__init__.py | create_dir | def create_dir(path):
""" Create directory specified by `path` if it doesn't already exist.
Parameters
----------
path : str
path to directory
Returns
-------
bool
True if `path` exists
"""
# https://stackoverflow.com/a/5032238
try:
os.makedirs(path, exist_ok=True)
except Exception as err:
print(err)
return False
if os.path.exists(path):
return True
else:
return False | python | def create_dir(path):
""" Create directory specified by `path` if it doesn't already exist.
Parameters
----------
path : str
path to directory
Returns
-------
bool
True if `path` exists
"""
# https://stackoverflow.com/a/5032238
try:
os.makedirs(path, exist_ok=True)
except Exception as err:
print(err)
return False
if os.path.exists(path):
return True
else:
return False | [
"def",
"create_dir",
"(",
"path",
")",
":",
"# https://stackoverflow.com/a/5032238",
"try",
":",
"os",
".",
"makedirs",
"(",
"path",
",",
"exist_ok",
"=",
"True",
")",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"err",
")",
"return",
"False",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"path",
")",
":",
"return",
"True",
"else",
":",
"return",
"False"
] | Create directory specified by `path` if it doesn't already exist.
Parameters
----------
path : str
path to directory
Returns
-------
bool
True if `path` exists | [
"Create",
"directory",
"specified",
"by",
"path",
"if",
"it",
"doesn",
"t",
"already",
"exist",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/__init__.py#L859-L882 | train | 238,435 |
apriha/lineage | src/lineage/__init__.py | save_df_as_csv | def save_df_as_csv(df, path, filename, comment=None, **kwargs):
""" Save dataframe to a CSV file.
Parameters
----------
df : pandas.DataFrame
dataframe to save
path : str
path to directory where to save CSV file
filename : str
filename of CSV file
comment : str
header comment(s); one or more lines starting with '#'
**kwargs
additional parameters to `pandas.DataFrame.to_csv`
Returns
-------
str
path to saved file, else empty str
"""
if isinstance(df, pd.DataFrame) and len(df) > 0:
try:
if not create_dir(path):
return ""
destination = os.path.join(path, filename)
print("Saving " + os.path.relpath(destination))
s = (
"# Generated by lineage v{}, https://github.com/apriha/lineage\n"
"# Generated at {} UTC\n"
)
s = s.format(
__version__, datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
)
if isinstance(comment, str):
s += comment
with open(destination, "w") as f:
f.write(s)
# https://stackoverflow.com/a/29233924/4727627
with open(destination, "a") as f:
df.to_csv(f, na_rep="--", **kwargs)
return destination
except Exception as err:
print(err)
return ""
else:
print("no data to save...")
return "" | python | def save_df_as_csv(df, path, filename, comment=None, **kwargs):
""" Save dataframe to a CSV file.
Parameters
----------
df : pandas.DataFrame
dataframe to save
path : str
path to directory where to save CSV file
filename : str
filename of CSV file
comment : str
header comment(s); one or more lines starting with '#'
**kwargs
additional parameters to `pandas.DataFrame.to_csv`
Returns
-------
str
path to saved file, else empty str
"""
if isinstance(df, pd.DataFrame) and len(df) > 0:
try:
if not create_dir(path):
return ""
destination = os.path.join(path, filename)
print("Saving " + os.path.relpath(destination))
s = (
"# Generated by lineage v{}, https://github.com/apriha/lineage\n"
"# Generated at {} UTC\n"
)
s = s.format(
__version__, datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
)
if isinstance(comment, str):
s += comment
with open(destination, "w") as f:
f.write(s)
# https://stackoverflow.com/a/29233924/4727627
with open(destination, "a") as f:
df.to_csv(f, na_rep="--", **kwargs)
return destination
except Exception as err:
print(err)
return ""
else:
print("no data to save...")
return "" | [
"def",
"save_df_as_csv",
"(",
"df",
",",
"path",
",",
"filename",
",",
"comment",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"df",
",",
"pd",
".",
"DataFrame",
")",
"and",
"len",
"(",
"df",
")",
">",
"0",
":",
"try",
":",
"if",
"not",
"create_dir",
"(",
"path",
")",
":",
"return",
"\"\"",
"destination",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"filename",
")",
"print",
"(",
"\"Saving \"",
"+",
"os",
".",
"path",
".",
"relpath",
"(",
"destination",
")",
")",
"s",
"=",
"(",
"\"# Generated by lineage v{}, https://github.com/apriha/lineage\\n\"",
"\"# Generated at {} UTC\\n\"",
")",
"s",
"=",
"s",
".",
"format",
"(",
"__version__",
",",
"datetime",
".",
"datetime",
".",
"utcnow",
"(",
")",
".",
"strftime",
"(",
"\"%Y-%m-%d %H:%M:%S\"",
")",
")",
"if",
"isinstance",
"(",
"comment",
",",
"str",
")",
":",
"s",
"+=",
"comment",
"with",
"open",
"(",
"destination",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"s",
")",
"# https://stackoverflow.com/a/29233924/4727627",
"with",
"open",
"(",
"destination",
",",
"\"a\"",
")",
"as",
"f",
":",
"df",
".",
"to_csv",
"(",
"f",
",",
"na_rep",
"=",
"\"--\"",
",",
"*",
"*",
"kwargs",
")",
"return",
"destination",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"err",
")",
"return",
"\"\"",
"else",
":",
"print",
"(",
"\"no data to save...\"",
")",
"return",
"\"\""
] | Save dataframe to a CSV file.
Parameters
----------
df : pandas.DataFrame
dataframe to save
path : str
path to directory where to save CSV file
filename : str
filename of CSV file
comment : str
header comment(s); one or more lines starting with '#'
**kwargs
additional parameters to `pandas.DataFrame.to_csv`
Returns
-------
str
path to saved file, else empty str | [
"Save",
"dataframe",
"to",
"a",
"CSV",
"file",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/__init__.py#L885-L940 | train | 238,436 |
apriha/lineage | src/lineage/resources.py | Resources.get_genetic_map_HapMapII_GRCh37 | def get_genetic_map_HapMapII_GRCh37(self):
""" Get International HapMap Consortium HapMap Phase II genetic map for Build 37.
Returns
-------
dict
dict of pandas.DataFrame HapMapII genetic maps if loading was successful, else None
"""
if self._genetic_map_HapMapII_GRCh37 is None:
self._genetic_map_HapMapII_GRCh37 = self._load_genetic_map(
self._get_path_genetic_map_HapMapII_GRCh37()
)
return self._genetic_map_HapMapII_GRCh37 | python | def get_genetic_map_HapMapII_GRCh37(self):
""" Get International HapMap Consortium HapMap Phase II genetic map for Build 37.
Returns
-------
dict
dict of pandas.DataFrame HapMapII genetic maps if loading was successful, else None
"""
if self._genetic_map_HapMapII_GRCh37 is None:
self._genetic_map_HapMapII_GRCh37 = self._load_genetic_map(
self._get_path_genetic_map_HapMapII_GRCh37()
)
return self._genetic_map_HapMapII_GRCh37 | [
"def",
"get_genetic_map_HapMapII_GRCh37",
"(",
"self",
")",
":",
"if",
"self",
".",
"_genetic_map_HapMapII_GRCh37",
"is",
"None",
":",
"self",
".",
"_genetic_map_HapMapII_GRCh37",
"=",
"self",
".",
"_load_genetic_map",
"(",
"self",
".",
"_get_path_genetic_map_HapMapII_GRCh37",
"(",
")",
")",
"return",
"self",
".",
"_genetic_map_HapMapII_GRCh37"
] | Get International HapMap Consortium HapMap Phase II genetic map for Build 37.
Returns
-------
dict
dict of pandas.DataFrame HapMapII genetic maps if loading was successful, else None | [
"Get",
"International",
"HapMap",
"Consortium",
"HapMap",
"Phase",
"II",
"genetic",
"map",
"for",
"Build",
"37",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L83-L96 | train | 238,437 |
apriha/lineage | src/lineage/resources.py | Resources.get_cytoBand_hg19 | def get_cytoBand_hg19(self):
""" Get UCSC cytoBand table for Build 37.
Returns
-------
pandas.DataFrame
cytoBand table if loading was successful, else None
"""
if self._cytoBand_hg19 is None:
self._cytoBand_hg19 = self._load_cytoBand(self._get_path_cytoBand_hg19())
return self._cytoBand_hg19 | python | def get_cytoBand_hg19(self):
""" Get UCSC cytoBand table for Build 37.
Returns
-------
pandas.DataFrame
cytoBand table if loading was successful, else None
"""
if self._cytoBand_hg19 is None:
self._cytoBand_hg19 = self._load_cytoBand(self._get_path_cytoBand_hg19())
return self._cytoBand_hg19 | [
"def",
"get_cytoBand_hg19",
"(",
"self",
")",
":",
"if",
"self",
".",
"_cytoBand_hg19",
"is",
"None",
":",
"self",
".",
"_cytoBand_hg19",
"=",
"self",
".",
"_load_cytoBand",
"(",
"self",
".",
"_get_path_cytoBand_hg19",
"(",
")",
")",
"return",
"self",
".",
"_cytoBand_hg19"
] | Get UCSC cytoBand table for Build 37.
Returns
-------
pandas.DataFrame
cytoBand table if loading was successful, else None | [
"Get",
"UCSC",
"cytoBand",
"table",
"for",
"Build",
"37",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L98-L109 | train | 238,438 |
apriha/lineage | src/lineage/resources.py | Resources.get_knownGene_hg19 | def get_knownGene_hg19(self):
""" Get UCSC knownGene table for Build 37.
Returns
-------
pandas.DataFrame
knownGene table if loading was successful, else None
"""
if self._knownGene_hg19 is None:
self._knownGene_hg19 = self._load_knownGene(self._get_path_knownGene_hg19())
return self._knownGene_hg19 | python | def get_knownGene_hg19(self):
""" Get UCSC knownGene table for Build 37.
Returns
-------
pandas.DataFrame
knownGene table if loading was successful, else None
"""
if self._knownGene_hg19 is None:
self._knownGene_hg19 = self._load_knownGene(self._get_path_knownGene_hg19())
return self._knownGene_hg19 | [
"def",
"get_knownGene_hg19",
"(",
"self",
")",
":",
"if",
"self",
".",
"_knownGene_hg19",
"is",
"None",
":",
"self",
".",
"_knownGene_hg19",
"=",
"self",
".",
"_load_knownGene",
"(",
"self",
".",
"_get_path_knownGene_hg19",
"(",
")",
")",
"return",
"self",
".",
"_knownGene_hg19"
] | Get UCSC knownGene table for Build 37.
Returns
-------
pandas.DataFrame
knownGene table if loading was successful, else None | [
"Get",
"UCSC",
"knownGene",
"table",
"for",
"Build",
"37",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L111-L122 | train | 238,439 |
apriha/lineage | src/lineage/resources.py | Resources.get_kgXref_hg19 | def get_kgXref_hg19(self):
""" Get UCSC kgXref table for Build 37.
Returns
-------
pandas.DataFrame
kgXref table if loading was successful, else None
"""
if self._kgXref_hg19 is None:
self._kgXref_hg19 = self._load_kgXref(self._get_path_kgXref_hg19())
return self._kgXref_hg19 | python | def get_kgXref_hg19(self):
""" Get UCSC kgXref table for Build 37.
Returns
-------
pandas.DataFrame
kgXref table if loading was successful, else None
"""
if self._kgXref_hg19 is None:
self._kgXref_hg19 = self._load_kgXref(self._get_path_kgXref_hg19())
return self._kgXref_hg19 | [
"def",
"get_kgXref_hg19",
"(",
"self",
")",
":",
"if",
"self",
".",
"_kgXref_hg19",
"is",
"None",
":",
"self",
".",
"_kgXref_hg19",
"=",
"self",
".",
"_load_kgXref",
"(",
"self",
".",
"_get_path_kgXref_hg19",
"(",
")",
")",
"return",
"self",
".",
"_kgXref_hg19"
] | Get UCSC kgXref table for Build 37.
Returns
-------
pandas.DataFrame
kgXref table if loading was successful, else None | [
"Get",
"UCSC",
"kgXref",
"table",
"for",
"Build",
"37",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L124-L135 | train | 238,440 |
apriha/lineage | src/lineage/resources.py | Resources.get_assembly_mapping_data | def get_assembly_mapping_data(self, source_assembly, target_assembly):
""" Get assembly mapping data.
Parameters
----------
source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap from
target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap to
Returns
-------
dict
dict of json assembly mapping data if loading was successful, else None
"""
return self._load_assembly_mapping_data(
self._get_path_assembly_mapping_data(source_assembly, target_assembly)
) | python | def get_assembly_mapping_data(self, source_assembly, target_assembly):
""" Get assembly mapping data.
Parameters
----------
source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap from
target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap to
Returns
-------
dict
dict of json assembly mapping data if loading was successful, else None
"""
return self._load_assembly_mapping_data(
self._get_path_assembly_mapping_data(source_assembly, target_assembly)
) | [
"def",
"get_assembly_mapping_data",
"(",
"self",
",",
"source_assembly",
",",
"target_assembly",
")",
":",
"return",
"self",
".",
"_load_assembly_mapping_data",
"(",
"self",
".",
"_get_path_assembly_mapping_data",
"(",
"source_assembly",
",",
"target_assembly",
")",
")"
] | Get assembly mapping data.
Parameters
----------
source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap from
target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap to
Returns
-------
dict
dict of json assembly mapping data if loading was successful, else None | [
"Get",
"assembly",
"mapping",
"data",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L137-L154 | train | 238,441 |
apriha/lineage | src/lineage/resources.py | Resources._load_assembly_mapping_data | def _load_assembly_mapping_data(filename):
""" Load assembly mapping data.
Parameters
----------
filename : str
path to compressed archive with assembly mapping data
Returns
-------
assembly_mapping_data : dict
dict of assembly maps if loading was successful, else None
Notes
-----
Keys of returned dict are chromosomes and values are the corresponding assembly map.
"""
try:
assembly_mapping_data = {}
with tarfile.open(filename, "r") as tar:
# http://stackoverflow.com/a/2018576
for member in tar.getmembers():
if ".json" in member.name:
with tar.extractfile(member) as tar_file:
tar_bytes = tar_file.read()
# https://stackoverflow.com/a/42683509/4727627
assembly_mapping_data[member.name.split(".")[0]] = json.loads(
tar_bytes.decode("utf-8")
)
return assembly_mapping_data
except Exception as err:
print(err)
return None | python | def _load_assembly_mapping_data(filename):
""" Load assembly mapping data.
Parameters
----------
filename : str
path to compressed archive with assembly mapping data
Returns
-------
assembly_mapping_data : dict
dict of assembly maps if loading was successful, else None
Notes
-----
Keys of returned dict are chromosomes and values are the corresponding assembly map.
"""
try:
assembly_mapping_data = {}
with tarfile.open(filename, "r") as tar:
# http://stackoverflow.com/a/2018576
for member in tar.getmembers():
if ".json" in member.name:
with tar.extractfile(member) as tar_file:
tar_bytes = tar_file.read()
# https://stackoverflow.com/a/42683509/4727627
assembly_mapping_data[member.name.split(".")[0]] = json.loads(
tar_bytes.decode("utf-8")
)
return assembly_mapping_data
except Exception as err:
print(err)
return None | [
"def",
"_load_assembly_mapping_data",
"(",
"filename",
")",
":",
"try",
":",
"assembly_mapping_data",
"=",
"{",
"}",
"with",
"tarfile",
".",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"tar",
":",
"# http://stackoverflow.com/a/2018576",
"for",
"member",
"in",
"tar",
".",
"getmembers",
"(",
")",
":",
"if",
"\".json\"",
"in",
"member",
".",
"name",
":",
"with",
"tar",
".",
"extractfile",
"(",
"member",
")",
"as",
"tar_file",
":",
"tar_bytes",
"=",
"tar_file",
".",
"read",
"(",
")",
"# https://stackoverflow.com/a/42683509/4727627",
"assembly_mapping_data",
"[",
"member",
".",
"name",
".",
"split",
"(",
"\".\"",
")",
"[",
"0",
"]",
"]",
"=",
"json",
".",
"loads",
"(",
"tar_bytes",
".",
"decode",
"(",
"\"utf-8\"",
")",
")",
"return",
"assembly_mapping_data",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"err",
")",
"return",
"None"
] | Load assembly mapping data.
Parameters
----------
filename : str
path to compressed archive with assembly mapping data
Returns
-------
assembly_mapping_data : dict
dict of assembly maps if loading was successful, else None
Notes
-----
Keys of returned dict are chromosomes and values are the corresponding assembly map. | [
"Load",
"assembly",
"mapping",
"data",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L312-L346 | train | 238,442 |
apriha/lineage | src/lineage/resources.py | Resources._load_cytoBand | def _load_cytoBand(filename):
""" Load UCSC cytoBand table.
Parameters
----------
filename : str
path to cytoBand file
Returns
-------
df : pandas.DataFrame
cytoBand table if loading was successful, else None
References
----------
..[1] Ryan Dale, GitHub Gist,
https://gist.github.com/daler/c98fc410282d7570efc3#file-ideograms-py
"""
try:
# adapted from chromosome plotting code (see [1]_)
df = pd.read_table(
filename, names=["chrom", "start", "end", "name", "gie_stain"]
)
df["chrom"] = df["chrom"].str[3:]
return df
except Exception as err:
print(err)
return None | python | def _load_cytoBand(filename):
""" Load UCSC cytoBand table.
Parameters
----------
filename : str
path to cytoBand file
Returns
-------
df : pandas.DataFrame
cytoBand table if loading was successful, else None
References
----------
..[1] Ryan Dale, GitHub Gist,
https://gist.github.com/daler/c98fc410282d7570efc3#file-ideograms-py
"""
try:
# adapted from chromosome plotting code (see [1]_)
df = pd.read_table(
filename, names=["chrom", "start", "end", "name", "gie_stain"]
)
df["chrom"] = df["chrom"].str[3:]
return df
except Exception as err:
print(err)
return None | [
"def",
"_load_cytoBand",
"(",
"filename",
")",
":",
"try",
":",
"# adapted from chromosome plotting code (see [1]_)",
"df",
"=",
"pd",
".",
"read_table",
"(",
"filename",
",",
"names",
"=",
"[",
"\"chrom\"",
",",
"\"start\"",
",",
"\"end\"",
",",
"\"name\"",
",",
"\"gie_stain\"",
"]",
")",
"df",
"[",
"\"chrom\"",
"]",
"=",
"df",
"[",
"\"chrom\"",
"]",
".",
"str",
"[",
"3",
":",
"]",
"return",
"df",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"err",
")",
"return",
"None"
] | Load UCSC cytoBand table.
Parameters
----------
filename : str
path to cytoBand file
Returns
-------
df : pandas.DataFrame
cytoBand table if loading was successful, else None
References
----------
..[1] Ryan Dale, GitHub Gist,
https://gist.github.com/daler/c98fc410282d7570efc3#file-ideograms-py | [
"Load",
"UCSC",
"cytoBand",
"table",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L349-L376 | train | 238,443 |
apriha/lineage | src/lineage/resources.py | Resources._load_knownGene | def _load_knownGene(filename):
""" Load UCSC knownGene table.
Parameters
----------
filename : str
path to knownGene file
Returns
-------
df : pandas.DataFrame
knownGene table if loading was successful, else None
"""
try:
df = pd.read_table(
filename,
names=[
"name",
"chrom",
"strand",
"txStart",
"txEnd",
"cdsStart",
"cdsEnd",
"exonCount",
"exonStarts",
"exonEnds",
"proteinID",
"alignID",
],
index_col=0,
)
df["chrom"] = df["chrom"].str[3:]
return df
except Exception as err:
print(err)
return None | python | def _load_knownGene(filename):
""" Load UCSC knownGene table.
Parameters
----------
filename : str
path to knownGene file
Returns
-------
df : pandas.DataFrame
knownGene table if loading was successful, else None
"""
try:
df = pd.read_table(
filename,
names=[
"name",
"chrom",
"strand",
"txStart",
"txEnd",
"cdsStart",
"cdsEnd",
"exonCount",
"exonStarts",
"exonEnds",
"proteinID",
"alignID",
],
index_col=0,
)
df["chrom"] = df["chrom"].str[3:]
return df
except Exception as err:
print(err)
return None | [
"def",
"_load_knownGene",
"(",
"filename",
")",
":",
"try",
":",
"df",
"=",
"pd",
".",
"read_table",
"(",
"filename",
",",
"names",
"=",
"[",
"\"name\"",
",",
"\"chrom\"",
",",
"\"strand\"",
",",
"\"txStart\"",
",",
"\"txEnd\"",
",",
"\"cdsStart\"",
",",
"\"cdsEnd\"",
",",
"\"exonCount\"",
",",
"\"exonStarts\"",
",",
"\"exonEnds\"",
",",
"\"proteinID\"",
",",
"\"alignID\"",
",",
"]",
",",
"index_col",
"=",
"0",
",",
")",
"df",
"[",
"\"chrom\"",
"]",
"=",
"df",
"[",
"\"chrom\"",
"]",
".",
"str",
"[",
"3",
":",
"]",
"return",
"df",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"err",
")",
"return",
"None"
] | Load UCSC knownGene table.
Parameters
----------
filename : str
path to knownGene file
Returns
-------
df : pandas.DataFrame
knownGene table if loading was successful, else None | [
"Load",
"UCSC",
"knownGene",
"table",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L379-L415 | train | 238,444 |
apriha/lineage | src/lineage/resources.py | Resources._load_kgXref | def _load_kgXref(filename):
""" Load UCSC kgXref table.
Parameters
----------
filename : str
path to kgXref file
Returns
-------
df : pandas.DataFrame
kgXref table if loading was successful, else None
"""
try:
df = pd.read_table(
filename,
names=[
"kgID",
"mRNA",
"spID",
"spDisplayID",
"geneSymbol",
"refseq",
"protAcc",
"description",
"rfamAcc",
"tRnaName",
],
index_col=0,
dtype=object,
)
return df
except Exception as err:
print(err)
return None | python | def _load_kgXref(filename):
""" Load UCSC kgXref table.
Parameters
----------
filename : str
path to kgXref file
Returns
-------
df : pandas.DataFrame
kgXref table if loading was successful, else None
"""
try:
df = pd.read_table(
filename,
names=[
"kgID",
"mRNA",
"spID",
"spDisplayID",
"geneSymbol",
"refseq",
"protAcc",
"description",
"rfamAcc",
"tRnaName",
],
index_col=0,
dtype=object,
)
return df
except Exception as err:
print(err)
return None | [
"def",
"_load_kgXref",
"(",
"filename",
")",
":",
"try",
":",
"df",
"=",
"pd",
".",
"read_table",
"(",
"filename",
",",
"names",
"=",
"[",
"\"kgID\"",
",",
"\"mRNA\"",
",",
"\"spID\"",
",",
"\"spDisplayID\"",
",",
"\"geneSymbol\"",
",",
"\"refseq\"",
",",
"\"protAcc\"",
",",
"\"description\"",
",",
"\"rfamAcc\"",
",",
"\"tRnaName\"",
",",
"]",
",",
"index_col",
"=",
"0",
",",
"dtype",
"=",
"object",
",",
")",
"return",
"df",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"err",
")",
"return",
"None"
] | Load UCSC kgXref table.
Parameters
----------
filename : str
path to kgXref file
Returns
-------
df : pandas.DataFrame
kgXref table if loading was successful, else None | [
"Load",
"UCSC",
"kgXref",
"table",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L418-L452 | train | 238,445 |
apriha/lineage | src/lineage/resources.py | Resources._get_path_assembly_mapping_data | def _get_path_assembly_mapping_data(
self, source_assembly, target_assembly, retries=10
):
""" Get local path to assembly mapping data, downloading if necessary.
Parameters
----------
source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap from
target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap to
retries : int
number of retries per chromosome to download assembly mapping data
Returns
-------
str
path to <source_assembly>_<target_assembly>.tar.gz
References
----------
..[1] Ensembl, Assembly Information Endpoint,
https://rest.ensembl.org/documentation/info/assembly_info
..[2] Ensembl, Assembly Map Endpoint,
http://rest.ensembl.org/documentation/info/assembly_map
"""
if not lineage.create_dir(self._resources_dir):
return None
chroms = [
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"X",
"Y",
"MT",
]
assembly_mapping_data = source_assembly + "_" + target_assembly
destination = os.path.join(
self._resources_dir, assembly_mapping_data + ".tar.gz"
)
if not os.path.exists(destination) or not self._all_chroms_in_tar(
chroms, destination
):
print("Downloading {}".format(os.path.relpath(destination)))
try:
with tarfile.open(destination, "w:gz") as out_tar:
for chrom in chroms:
file = chrom + ".json"
map_endpoint = (
"/map/human/"
+ source_assembly
+ "/"
+ chrom
+ "/"
+ target_assembly
+ "?"
)
# get assembly mapping data
response = None
retry = 0
while response is None and retry < retries:
response = self._ensembl_rest_client.perform_rest_action(
map_endpoint
)
retry += 1
if response is not None:
# open temp file, save json response to file, close temp file
with tempfile.NamedTemporaryFile(
delete=False, mode="w"
) as f:
json.dump(response, f)
# add temp file to archive
out_tar.add(f.name, arcname=file)
# remove temp file
os.remove(f.name)
except Exception as err:
print(err)
return None
return destination | python | def _get_path_assembly_mapping_data(
self, source_assembly, target_assembly, retries=10
):
""" Get local path to assembly mapping data, downloading if necessary.
Parameters
----------
source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap from
target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap to
retries : int
number of retries per chromosome to download assembly mapping data
Returns
-------
str
path to <source_assembly>_<target_assembly>.tar.gz
References
----------
..[1] Ensembl, Assembly Information Endpoint,
https://rest.ensembl.org/documentation/info/assembly_info
..[2] Ensembl, Assembly Map Endpoint,
http://rest.ensembl.org/documentation/info/assembly_map
"""
if not lineage.create_dir(self._resources_dir):
return None
chroms = [
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
"13",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"X",
"Y",
"MT",
]
assembly_mapping_data = source_assembly + "_" + target_assembly
destination = os.path.join(
self._resources_dir, assembly_mapping_data + ".tar.gz"
)
if not os.path.exists(destination) or not self._all_chroms_in_tar(
chroms, destination
):
print("Downloading {}".format(os.path.relpath(destination)))
try:
with tarfile.open(destination, "w:gz") as out_tar:
for chrom in chroms:
file = chrom + ".json"
map_endpoint = (
"/map/human/"
+ source_assembly
+ "/"
+ chrom
+ "/"
+ target_assembly
+ "?"
)
# get assembly mapping data
response = None
retry = 0
while response is None and retry < retries:
response = self._ensembl_rest_client.perform_rest_action(
map_endpoint
)
retry += 1
if response is not None:
# open temp file, save json response to file, close temp file
with tempfile.NamedTemporaryFile(
delete=False, mode="w"
) as f:
json.dump(response, f)
# add temp file to archive
out_tar.add(f.name, arcname=file)
# remove temp file
os.remove(f.name)
except Exception as err:
print(err)
return None
return destination | [
"def",
"_get_path_assembly_mapping_data",
"(",
"self",
",",
"source_assembly",
",",
"target_assembly",
",",
"retries",
"=",
"10",
")",
":",
"if",
"not",
"lineage",
".",
"create_dir",
"(",
"self",
".",
"_resources_dir",
")",
":",
"return",
"None",
"chroms",
"=",
"[",
"\"1\"",
",",
"\"2\"",
",",
"\"3\"",
",",
"\"4\"",
",",
"\"5\"",
",",
"\"6\"",
",",
"\"7\"",
",",
"\"8\"",
",",
"\"9\"",
",",
"\"10\"",
",",
"\"11\"",
",",
"\"12\"",
",",
"\"13\"",
",",
"\"14\"",
",",
"\"15\"",
",",
"\"16\"",
",",
"\"17\"",
",",
"\"18\"",
",",
"\"19\"",
",",
"\"20\"",
",",
"\"21\"",
",",
"\"22\"",
",",
"\"X\"",
",",
"\"Y\"",
",",
"\"MT\"",
",",
"]",
"assembly_mapping_data",
"=",
"source_assembly",
"+",
"\"_\"",
"+",
"target_assembly",
"destination",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_resources_dir",
",",
"assembly_mapping_data",
"+",
"\".tar.gz\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"destination",
")",
"or",
"not",
"self",
".",
"_all_chroms_in_tar",
"(",
"chroms",
",",
"destination",
")",
":",
"print",
"(",
"\"Downloading {}\"",
".",
"format",
"(",
"os",
".",
"path",
".",
"relpath",
"(",
"destination",
")",
")",
")",
"try",
":",
"with",
"tarfile",
".",
"open",
"(",
"destination",
",",
"\"w:gz\"",
")",
"as",
"out_tar",
":",
"for",
"chrom",
"in",
"chroms",
":",
"file",
"=",
"chrom",
"+",
"\".json\"",
"map_endpoint",
"=",
"(",
"\"/map/human/\"",
"+",
"source_assembly",
"+",
"\"/\"",
"+",
"chrom",
"+",
"\"/\"",
"+",
"target_assembly",
"+",
"\"?\"",
")",
"# get assembly mapping data",
"response",
"=",
"None",
"retry",
"=",
"0",
"while",
"response",
"is",
"None",
"and",
"retry",
"<",
"retries",
":",
"response",
"=",
"self",
".",
"_ensembl_rest_client",
".",
"perform_rest_action",
"(",
"map_endpoint",
")",
"retry",
"+=",
"1",
"if",
"response",
"is",
"not",
"None",
":",
"# open temp file, save json response to file, close temp file",
"with",
"tempfile",
".",
"NamedTemporaryFile",
"(",
"delete",
"=",
"False",
",",
"mode",
"=",
"\"w\"",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"response",
",",
"f",
")",
"# add temp file to archive",
"out_tar",
".",
"add",
"(",
"f",
".",
"name",
",",
"arcname",
"=",
"file",
")",
"# remove temp file",
"os",
".",
"remove",
"(",
"f",
".",
"name",
")",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"err",
")",
"return",
"None",
"return",
"destination"
] | Get local path to assembly mapping data, downloading if necessary.
Parameters
----------
source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap from
target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'}
assembly to remap to
retries : int
number of retries per chromosome to download assembly mapping data
Returns
-------
str
path to <source_assembly>_<target_assembly>.tar.gz
References
----------
..[1] Ensembl, Assembly Information Endpoint,
https://rest.ensembl.org/documentation/info/assembly_info
..[2] Ensembl, Assembly Map Endpoint,
http://rest.ensembl.org/documentation/info/assembly_map | [
"Get",
"local",
"path",
"to",
"assembly",
"mapping",
"data",
"downloading",
"if",
"necessary",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L517-L626 | train | 238,446 |
apriha/lineage | src/lineage/resources.py | Resources._download_file | def _download_file(self, url, filename, compress=False, timeout=30):
""" Download a file to the resources folder.
Download data from `url`, save as `filename`, and optionally compress with gzip.
Parameters
----------
url : str
URL to download data from
filename : str
name of file to save; if compress, ensure '.gz' is appended
compress : bool
compress with gzip
timeout : int
seconds for timeout of download request
Returns
-------
str
path to downloaded file, None if error
"""
if not lineage.create_dir(self._resources_dir):
return None
if compress and filename[-3:] != ".gz":
filename += ".gz"
destination = os.path.join(self._resources_dir, filename)
if not os.path.exists(destination):
try:
if compress:
open_func = gzip.open
else:
open_func = open
# get file if it hasn't already been downloaded
# http://stackoverflow.com/a/7244263
with urllib.request.urlopen(
url, timeout=timeout
) as response, open_func(destination, "wb") as f:
self._print_download_msg(destination)
data = response.read() # a `bytes` object
f.write(data)
except urllib.error.URLError as err:
print(err)
destination = None
# try HTTP if an FTP error occurred
if "ftp://" in url:
destination = self._download_file(
url.replace("ftp://", "http://"),
filename,
compress=compress,
timeout=timeout,
)
except Exception as err:
print(err)
return None
return destination | python | def _download_file(self, url, filename, compress=False, timeout=30):
""" Download a file to the resources folder.
Download data from `url`, save as `filename`, and optionally compress with gzip.
Parameters
----------
url : str
URL to download data from
filename : str
name of file to save; if compress, ensure '.gz' is appended
compress : bool
compress with gzip
timeout : int
seconds for timeout of download request
Returns
-------
str
path to downloaded file, None if error
"""
if not lineage.create_dir(self._resources_dir):
return None
if compress and filename[-3:] != ".gz":
filename += ".gz"
destination = os.path.join(self._resources_dir, filename)
if not os.path.exists(destination):
try:
if compress:
open_func = gzip.open
else:
open_func = open
# get file if it hasn't already been downloaded
# http://stackoverflow.com/a/7244263
with urllib.request.urlopen(
url, timeout=timeout
) as response, open_func(destination, "wb") as f:
self._print_download_msg(destination)
data = response.read() # a `bytes` object
f.write(data)
except urllib.error.URLError as err:
print(err)
destination = None
# try HTTP if an FTP error occurred
if "ftp://" in url:
destination = self._download_file(
url.replace("ftp://", "http://"),
filename,
compress=compress,
timeout=timeout,
)
except Exception as err:
print(err)
return None
return destination | [
"def",
"_download_file",
"(",
"self",
",",
"url",
",",
"filename",
",",
"compress",
"=",
"False",
",",
"timeout",
"=",
"30",
")",
":",
"if",
"not",
"lineage",
".",
"create_dir",
"(",
"self",
".",
"_resources_dir",
")",
":",
"return",
"None",
"if",
"compress",
"and",
"filename",
"[",
"-",
"3",
":",
"]",
"!=",
"\".gz\"",
":",
"filename",
"+=",
"\".gz\"",
"destination",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_resources_dir",
",",
"filename",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"destination",
")",
":",
"try",
":",
"if",
"compress",
":",
"open_func",
"=",
"gzip",
".",
"open",
"else",
":",
"open_func",
"=",
"open",
"# get file if it hasn't already been downloaded",
"# http://stackoverflow.com/a/7244263",
"with",
"urllib",
".",
"request",
".",
"urlopen",
"(",
"url",
",",
"timeout",
"=",
"timeout",
")",
"as",
"response",
",",
"open_func",
"(",
"destination",
",",
"\"wb\"",
")",
"as",
"f",
":",
"self",
".",
"_print_download_msg",
"(",
"destination",
")",
"data",
"=",
"response",
".",
"read",
"(",
")",
"# a `bytes` object",
"f",
".",
"write",
"(",
"data",
")",
"except",
"urllib",
".",
"error",
".",
"URLError",
"as",
"err",
":",
"print",
"(",
"err",
")",
"destination",
"=",
"None",
"# try HTTP if an FTP error occurred",
"if",
"\"ftp://\"",
"in",
"url",
":",
"destination",
"=",
"self",
".",
"_download_file",
"(",
"url",
".",
"replace",
"(",
"\"ftp://\"",
",",
"\"http://\"",
")",
",",
"filename",
",",
"compress",
"=",
"compress",
",",
"timeout",
"=",
"timeout",
",",
")",
"except",
"Exception",
"as",
"err",
":",
"print",
"(",
"err",
")",
"return",
"None",
"return",
"destination"
] | Download a file to the resources folder.
Download data from `url`, save as `filename`, and optionally compress with gzip.
Parameters
----------
url : str
URL to download data from
filename : str
name of file to save; if compress, ensure '.gz' is appended
compress : bool
compress with gzip
timeout : int
seconds for timeout of download request
Returns
-------
str
path to downloaded file, None if error | [
"Download",
"a",
"file",
"to",
"the",
"resources",
"folder",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/resources.py#L642-L701 | train | 238,447 |
apriha/lineage | src/lineage/individual.py | Individual.load_snps | def load_snps(
self,
raw_data,
discrepant_snp_positions_threshold=100,
discrepant_genotypes_threshold=500,
save_output=False,
):
""" Load raw genotype data.
Parameters
----------
raw_data : list or str
path(s) to file(s) with raw genotype data
discrepant_snp_positions_threshold : int
threshold for discrepant SNP positions between existing data and data to be loaded,
a large value could indicate mismatched genome assemblies
discrepant_genotypes_threshold : int
threshold for discrepant genotype data between existing data and data to be loaded,
a large value could indicated mismatched individuals
save_output : bool
specifies whether to save discrepant SNP output to CSV files in the output directory
"""
if type(raw_data) is list:
for file in raw_data:
self._load_snps_helper(
file,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
)
elif type(raw_data) is str:
self._load_snps_helper(
raw_data,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
)
else:
raise TypeError("invalid filetype") | python | def load_snps(
self,
raw_data,
discrepant_snp_positions_threshold=100,
discrepant_genotypes_threshold=500,
save_output=False,
):
""" Load raw genotype data.
Parameters
----------
raw_data : list or str
path(s) to file(s) with raw genotype data
discrepant_snp_positions_threshold : int
threshold for discrepant SNP positions between existing data and data to be loaded,
a large value could indicate mismatched genome assemblies
discrepant_genotypes_threshold : int
threshold for discrepant genotype data between existing data and data to be loaded,
a large value could indicated mismatched individuals
save_output : bool
specifies whether to save discrepant SNP output to CSV files in the output directory
"""
if type(raw_data) is list:
for file in raw_data:
self._load_snps_helper(
file,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
)
elif type(raw_data) is str:
self._load_snps_helper(
raw_data,
discrepant_snp_positions_threshold,
discrepant_genotypes_threshold,
save_output,
)
else:
raise TypeError("invalid filetype") | [
"def",
"load_snps",
"(",
"self",
",",
"raw_data",
",",
"discrepant_snp_positions_threshold",
"=",
"100",
",",
"discrepant_genotypes_threshold",
"=",
"500",
",",
"save_output",
"=",
"False",
",",
")",
":",
"if",
"type",
"(",
"raw_data",
")",
"is",
"list",
":",
"for",
"file",
"in",
"raw_data",
":",
"self",
".",
"_load_snps_helper",
"(",
"file",
",",
"discrepant_snp_positions_threshold",
",",
"discrepant_genotypes_threshold",
",",
"save_output",
",",
")",
"elif",
"type",
"(",
"raw_data",
")",
"is",
"str",
":",
"self",
".",
"_load_snps_helper",
"(",
"raw_data",
",",
"discrepant_snp_positions_threshold",
",",
"discrepant_genotypes_threshold",
",",
"save_output",
",",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"invalid filetype\"",
")"
] | Load raw genotype data.
Parameters
----------
raw_data : list or str
path(s) to file(s) with raw genotype data
discrepant_snp_positions_threshold : int
threshold for discrepant SNP positions between existing data and data to be loaded,
a large value could indicate mismatched genome assemblies
discrepant_genotypes_threshold : int
threshold for discrepant genotype data between existing data and data to be loaded,
a large value could indicated mismatched individuals
save_output : bool
specifies whether to save discrepant SNP output to CSV files in the output directory | [
"Load",
"raw",
"genotype",
"data",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/individual.py#L205-L243 | train | 238,448 |
apriha/lineage | src/lineage/individual.py | Individual.save_snps | def save_snps(self, filename=None):
""" Save SNPs to file.
Parameters
----------
filename : str
filename for file to save
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str
"""
comment = (
"# Source(s): {}\n"
"# Assembly: {}\n"
"# SNPs: {}\n"
"# Chromosomes: {}\n".format(
self.source, self.assembly, self.snp_count, self.chromosomes_summary
)
)
if filename is None:
filename = self.get_var_name() + "_lineage_" + self.assembly + ".csv"
return lineage.save_df_as_csv(
self._snps,
self._output_dir,
filename,
comment=comment,
header=["chromosome", "position", "genotype"],
) | python | def save_snps(self, filename=None):
""" Save SNPs to file.
Parameters
----------
filename : str
filename for file to save
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str
"""
comment = (
"# Source(s): {}\n"
"# Assembly: {}\n"
"# SNPs: {}\n"
"# Chromosomes: {}\n".format(
self.source, self.assembly, self.snp_count, self.chromosomes_summary
)
)
if filename is None:
filename = self.get_var_name() + "_lineage_" + self.assembly + ".csv"
return lineage.save_df_as_csv(
self._snps,
self._output_dir,
filename,
comment=comment,
header=["chromosome", "position", "genotype"],
) | [
"def",
"save_snps",
"(",
"self",
",",
"filename",
"=",
"None",
")",
":",
"comment",
"=",
"(",
"\"# Source(s): {}\\n\"",
"\"# Assembly: {}\\n\"",
"\"# SNPs: {}\\n\"",
"\"# Chromosomes: {}\\n\"",
".",
"format",
"(",
"self",
".",
"source",
",",
"self",
".",
"assembly",
",",
"self",
".",
"snp_count",
",",
"self",
".",
"chromosomes_summary",
")",
")",
"if",
"filename",
"is",
"None",
":",
"filename",
"=",
"self",
".",
"get_var_name",
"(",
")",
"+",
"\"_lineage_\"",
"+",
"self",
".",
"assembly",
"+",
"\".csv\"",
"return",
"lineage",
".",
"save_df_as_csv",
"(",
"self",
".",
"_snps",
",",
"self",
".",
"_output_dir",
",",
"filename",
",",
"comment",
"=",
"comment",
",",
"header",
"=",
"[",
"\"chromosome\"",
",",
"\"position\"",
",",
"\"genotype\"",
"]",
",",
")"
] | Save SNPs to file.
Parameters
----------
filename : str
filename for file to save
Returns
-------
str
path to file in output directory if SNPs were saved, else empty str | [
"Save",
"SNPs",
"to",
"file",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/individual.py#L267-L298 | train | 238,449 |
apriha/lineage | src/lineage/individual.py | Individual.remap_snps | def remap_snps(self, target_assembly, complement_bases=True):
""" Remap the SNP coordinates of this ``Individual`` from one assembly to another.
This method is a wrapper for `remap_snps` in the ``Lineage`` class.
This method uses the assembly map endpoint of the Ensembl REST API service to convert SNP
coordinates / positions from one assembly to another. After remapping, the coordinates /
positions for the ``Individual``'s SNPs will be that of the target assembly.
If the SNPs are already mapped relative to the target assembly, remapping will not be
performed.
Parameters
----------
target_assembly : {'NCBI36', 'GRCh37', 'GRCh38', 36, 37, 38}
assembly to remap to
complement_bases : bool
complement bases when remapping SNPs to the minus strand
Returns
-------
chromosomes_remapped : list of str
chromosomes remapped; empty if None
chromosomes_not_remapped : list of str
chromosomes not remapped; empty if None
Notes
-----
An assembly is also know as a "build." For example:
Assembly NCBI36 = Build 36
Assembly GRCh37 = Build 37
Assembly GRCh38 = Build 38
See https://www.ncbi.nlm.nih.gov/assembly for more information about assemblies and
remapping.
References
----------
..[1] Ensembl, Assembly Map Endpoint,
http://rest.ensembl.org/documentation/info/assembly_map
"""
from lineage import Lineage
l = Lineage()
return l.remap_snps(self, target_assembly, complement_bases) | python | def remap_snps(self, target_assembly, complement_bases=True):
""" Remap the SNP coordinates of this ``Individual`` from one assembly to another.
This method is a wrapper for `remap_snps` in the ``Lineage`` class.
This method uses the assembly map endpoint of the Ensembl REST API service to convert SNP
coordinates / positions from one assembly to another. After remapping, the coordinates /
positions for the ``Individual``'s SNPs will be that of the target assembly.
If the SNPs are already mapped relative to the target assembly, remapping will not be
performed.
Parameters
----------
target_assembly : {'NCBI36', 'GRCh37', 'GRCh38', 36, 37, 38}
assembly to remap to
complement_bases : bool
complement bases when remapping SNPs to the minus strand
Returns
-------
chromosomes_remapped : list of str
chromosomes remapped; empty if None
chromosomes_not_remapped : list of str
chromosomes not remapped; empty if None
Notes
-----
An assembly is also know as a "build." For example:
Assembly NCBI36 = Build 36
Assembly GRCh37 = Build 37
Assembly GRCh38 = Build 38
See https://www.ncbi.nlm.nih.gov/assembly for more information about assemblies and
remapping.
References
----------
..[1] Ensembl, Assembly Map Endpoint,
http://rest.ensembl.org/documentation/info/assembly_map
"""
from lineage import Lineage
l = Lineage()
return l.remap_snps(self, target_assembly, complement_bases) | [
"def",
"remap_snps",
"(",
"self",
",",
"target_assembly",
",",
"complement_bases",
"=",
"True",
")",
":",
"from",
"lineage",
"import",
"Lineage",
"l",
"=",
"Lineage",
"(",
")",
"return",
"l",
".",
"remap_snps",
"(",
"self",
",",
"target_assembly",
",",
"complement_bases",
")"
] | Remap the SNP coordinates of this ``Individual`` from one assembly to another.
This method is a wrapper for `remap_snps` in the ``Lineage`` class.
This method uses the assembly map endpoint of the Ensembl REST API service to convert SNP
coordinates / positions from one assembly to another. After remapping, the coordinates /
positions for the ``Individual``'s SNPs will be that of the target assembly.
If the SNPs are already mapped relative to the target assembly, remapping will not be
performed.
Parameters
----------
target_assembly : {'NCBI36', 'GRCh37', 'GRCh38', 36, 37, 38}
assembly to remap to
complement_bases : bool
complement bases when remapping SNPs to the minus strand
Returns
-------
chromosomes_remapped : list of str
chromosomes remapped; empty if None
chromosomes_not_remapped : list of str
chromosomes not remapped; empty if None
Notes
-----
An assembly is also know as a "build." For example:
Assembly NCBI36 = Build 36
Assembly GRCh37 = Build 37
Assembly GRCh38 = Build 38
See https://www.ncbi.nlm.nih.gov/assembly for more information about assemblies and
remapping.
References
----------
..[1] Ensembl, Assembly Map Endpoint,
http://rest.ensembl.org/documentation/info/assembly_map | [
"Remap",
"the",
"SNP",
"coordinates",
"of",
"this",
"Individual",
"from",
"one",
"assembly",
"to",
"another",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/individual.py#L382-L427 | train | 238,450 |
apriha/lineage | src/lineage/individual.py | Individual._set_snps | def _set_snps(self, snps, build=37):
""" Set `_snps` and `_build` properties of this ``Individual``.
Notes
-----
Intended to be used internally to `lineage`.
Parameters
----------
snps : pandas.DataFrame
individual's genetic data normalized for use with `lineage`
build : int
build of this ``Individual``'s SNPs
"""
self._snps = snps
self._build = build | python | def _set_snps(self, snps, build=37):
""" Set `_snps` and `_build` properties of this ``Individual``.
Notes
-----
Intended to be used internally to `lineage`.
Parameters
----------
snps : pandas.DataFrame
individual's genetic data normalized for use with `lineage`
build : int
build of this ``Individual``'s SNPs
"""
self._snps = snps
self._build = build | [
"def",
"_set_snps",
"(",
"self",
",",
"snps",
",",
"build",
"=",
"37",
")",
":",
"self",
".",
"_snps",
"=",
"snps",
"self",
".",
"_build",
"=",
"build"
] | Set `_snps` and `_build` properties of this ``Individual``.
Notes
-----
Intended to be used internally to `lineage`.
Parameters
----------
snps : pandas.DataFrame
individual's genetic data normalized for use with `lineage`
build : int
build of this ``Individual``'s SNPs | [
"Set",
"_snps",
"and",
"_build",
"properties",
"of",
"this",
"Individual",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/individual.py#L429-L444 | train | 238,451 |
apriha/lineage | src/lineage/individual.py | Individual._double_single_alleles | def _double_single_alleles(df, chrom):
""" Double any single alleles in the specified chromosome.
Parameters
----------
df : pandas.DataFrame
SNPs
chrom : str
chromosome of alleles to double
Returns
-------
df : pandas.DataFrame
SNPs with specified chromosome's single alleles doubled
"""
# find all single alleles of the specified chromosome
single_alleles = np.where(
(df["chrom"] == chrom) & (df["genotype"].str.len() == 1)
)[0]
# double those alleles
df.ix[single_alleles, "genotype"] = df.ix[single_alleles, "genotype"] * 2
return df | python | def _double_single_alleles(df, chrom):
""" Double any single alleles in the specified chromosome.
Parameters
----------
df : pandas.DataFrame
SNPs
chrom : str
chromosome of alleles to double
Returns
-------
df : pandas.DataFrame
SNPs with specified chromosome's single alleles doubled
"""
# find all single alleles of the specified chromosome
single_alleles = np.where(
(df["chrom"] == chrom) & (df["genotype"].str.len() == 1)
)[0]
# double those alleles
df.ix[single_alleles, "genotype"] = df.ix[single_alleles, "genotype"] * 2
return df | [
"def",
"_double_single_alleles",
"(",
"df",
",",
"chrom",
")",
":",
"# find all single alleles of the specified chromosome",
"single_alleles",
"=",
"np",
".",
"where",
"(",
"(",
"df",
"[",
"\"chrom\"",
"]",
"==",
"chrom",
")",
"&",
"(",
"df",
"[",
"\"genotype\"",
"]",
".",
"str",
".",
"len",
"(",
")",
"==",
"1",
")",
")",
"[",
"0",
"]",
"# double those alleles",
"df",
".",
"ix",
"[",
"single_alleles",
",",
"\"genotype\"",
"]",
"=",
"df",
".",
"ix",
"[",
"single_alleles",
",",
"\"genotype\"",
"]",
"*",
"2",
"return",
"df"
] | Double any single alleles in the specified chromosome.
Parameters
----------
df : pandas.DataFrame
SNPs
chrom : str
chromosome of alleles to double
Returns
-------
df : pandas.DataFrame
SNPs with specified chromosome's single alleles doubled | [
"Double",
"any",
"single",
"alleles",
"in",
"the",
"specified",
"chromosome",
"."
] | 13106a62a959a80ac26c68d1566422de08aa877b | https://github.com/apriha/lineage/blob/13106a62a959a80ac26c68d1566422de08aa877b/src/lineage/individual.py#L605-L628 | train | 238,452 |
tBuLi/symfit | symfit/core/support.py | seperate_symbols | def seperate_symbols(func):
"""
Seperate the symbols in symbolic function func. Return them in alphabetical
order.
:param func: scipy symbolic function.
:return: (vars, params), a tuple of all variables and parameters, each
sorted in alphabetical order.
:raises TypeError: only symfit Variable and Parameter are allowed, not sympy
Symbols.
"""
params = []
vars = []
for symbol in func.free_symbols:
if not isidentifier(str(symbol)):
continue # E.g. Indexed objects might print to A[i, j]
if isinstance(symbol, Parameter):
params.append(symbol)
elif isinstance(symbol, Idx):
# Idx objects are not seen as parameters or vars.
pass
elif isinstance(symbol, (MatrixExpr, Expr)):
vars.append(symbol)
else:
raise TypeError('model contains an unknown symbol type, {}'.format(type(symbol)))
for der in func.atoms(sympy.Derivative):
# Used by jacobians and hessians, where derivatives are treated as
# Variables. This way of writing it is purposefully discriminatory
# against derivatives wrt variables, since such derivatives should be
# performed explicitly in the case of jacs/hess, and are treated
# differently in the case of ODEModels.
if der.expr in vars and all(isinstance(s, Parameter) for s in der.variables):
vars.append(der)
params.sort(key=lambda symbol: symbol.name)
vars.sort(key=lambda symbol: symbol.name)
return vars, params | python | def seperate_symbols(func):
"""
Seperate the symbols in symbolic function func. Return them in alphabetical
order.
:param func: scipy symbolic function.
:return: (vars, params), a tuple of all variables and parameters, each
sorted in alphabetical order.
:raises TypeError: only symfit Variable and Parameter are allowed, not sympy
Symbols.
"""
params = []
vars = []
for symbol in func.free_symbols:
if not isidentifier(str(symbol)):
continue # E.g. Indexed objects might print to A[i, j]
if isinstance(symbol, Parameter):
params.append(symbol)
elif isinstance(symbol, Idx):
# Idx objects are not seen as parameters or vars.
pass
elif isinstance(symbol, (MatrixExpr, Expr)):
vars.append(symbol)
else:
raise TypeError('model contains an unknown symbol type, {}'.format(type(symbol)))
for der in func.atoms(sympy.Derivative):
# Used by jacobians and hessians, where derivatives are treated as
# Variables. This way of writing it is purposefully discriminatory
# against derivatives wrt variables, since such derivatives should be
# performed explicitly in the case of jacs/hess, and are treated
# differently in the case of ODEModels.
if der.expr in vars and all(isinstance(s, Parameter) for s in der.variables):
vars.append(der)
params.sort(key=lambda symbol: symbol.name)
vars.sort(key=lambda symbol: symbol.name)
return vars, params | [
"def",
"seperate_symbols",
"(",
"func",
")",
":",
"params",
"=",
"[",
"]",
"vars",
"=",
"[",
"]",
"for",
"symbol",
"in",
"func",
".",
"free_symbols",
":",
"if",
"not",
"isidentifier",
"(",
"str",
"(",
"symbol",
")",
")",
":",
"continue",
"# E.g. Indexed objects might print to A[i, j]",
"if",
"isinstance",
"(",
"symbol",
",",
"Parameter",
")",
":",
"params",
".",
"append",
"(",
"symbol",
")",
"elif",
"isinstance",
"(",
"symbol",
",",
"Idx",
")",
":",
"# Idx objects are not seen as parameters or vars.",
"pass",
"elif",
"isinstance",
"(",
"symbol",
",",
"(",
"MatrixExpr",
",",
"Expr",
")",
")",
":",
"vars",
".",
"append",
"(",
"symbol",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'model contains an unknown symbol type, {}'",
".",
"format",
"(",
"type",
"(",
"symbol",
")",
")",
")",
"for",
"der",
"in",
"func",
".",
"atoms",
"(",
"sympy",
".",
"Derivative",
")",
":",
"# Used by jacobians and hessians, where derivatives are treated as",
"# Variables. This way of writing it is purposefully discriminatory",
"# against derivatives wrt variables, since such derivatives should be",
"# performed explicitly in the case of jacs/hess, and are treated",
"# differently in the case of ODEModels.",
"if",
"der",
".",
"expr",
"in",
"vars",
"and",
"all",
"(",
"isinstance",
"(",
"s",
",",
"Parameter",
")",
"for",
"s",
"in",
"der",
".",
"variables",
")",
":",
"vars",
".",
"append",
"(",
"der",
")",
"params",
".",
"sort",
"(",
"key",
"=",
"lambda",
"symbol",
":",
"symbol",
".",
"name",
")",
"vars",
".",
"sort",
"(",
"key",
"=",
"lambda",
"symbol",
":",
"symbol",
".",
"name",
")",
"return",
"vars",
",",
"params"
] | Seperate the symbols in symbolic function func. Return them in alphabetical
order.
:param func: scipy symbolic function.
:return: (vars, params), a tuple of all variables and parameters, each
sorted in alphabetical order.
:raises TypeError: only symfit Variable and Parameter are allowed, not sympy
Symbols. | [
"Seperate",
"the",
"symbols",
"in",
"symbolic",
"function",
"func",
".",
"Return",
"them",
"in",
"alphabetical",
"order",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L69-L106 | train | 238,453 |
tBuLi/symfit | symfit/core/support.py | sympy_to_py | def sympy_to_py(func, args):
"""
Turn a symbolic expression into a Python lambda function,
which has the names of the variables and parameters as it's argument names.
:param func: sympy expression
:param args: variables and parameters in this model
:return: lambda function to be used for numerical evaluation of the model.
"""
# replace the derivatives with printable variables.
derivatives = {var: Variable(var.name) for var in args
if isinstance(var, sympy.Derivative)}
func = func.xreplace(derivatives)
args = [derivatives[var] if isinstance(var, sympy.Derivative) else var
for var in args]
lambdafunc = lambdify(args, func, printer=SymfitNumPyPrinter,
dummify=False)
# Check if the names of the lambda function are what we expect
signature = inspect_sig.signature(lambdafunc)
sig_parameters = OrderedDict(signature.parameters)
for arg, lambda_arg in zip(args, sig_parameters):
if arg.name != lambda_arg:
break
else: # Lambdifying succesful!
return lambdafunc
# If we are here (very rare), then one of the lambda arg is still a Dummy.
# In this case we will manually handle the naming.
lambda_names = sig_parameters.keys()
arg_names = [arg.name for arg in args]
conversion = dict(zip(arg_names, lambda_names))
# Wrap the lambda such that arg names are translated into the correct dummy
# symbol names
@wraps(lambdafunc)
def wrapped_lambdafunc(*ordered_args, **kwargs):
converted_kwargs = {conversion[k]: v for k, v in kwargs.items()}
return lambdafunc(*ordered_args, **converted_kwargs)
# Update the signature of wrapped_lambdafunc to math our args
new_sig_parameters = OrderedDict()
for arg_name, dummy_name in conversion.items():
if arg_name == dummy_name: # Already has the correct name
new_sig_parameters[arg_name] = sig_parameters[arg_name]
else: # Change the dummy inspect.Parameter to the correct name
param = sig_parameters[dummy_name]
param = param.replace(name=arg_name)
new_sig_parameters[arg_name] = param
wrapped_lambdafunc.__signature__ = signature.replace(
parameters=new_sig_parameters.values()
)
return wrapped_lambdafunc | python | def sympy_to_py(func, args):
"""
Turn a symbolic expression into a Python lambda function,
which has the names of the variables and parameters as it's argument names.
:param func: sympy expression
:param args: variables and parameters in this model
:return: lambda function to be used for numerical evaluation of the model.
"""
# replace the derivatives with printable variables.
derivatives = {var: Variable(var.name) for var in args
if isinstance(var, sympy.Derivative)}
func = func.xreplace(derivatives)
args = [derivatives[var] if isinstance(var, sympy.Derivative) else var
for var in args]
lambdafunc = lambdify(args, func, printer=SymfitNumPyPrinter,
dummify=False)
# Check if the names of the lambda function are what we expect
signature = inspect_sig.signature(lambdafunc)
sig_parameters = OrderedDict(signature.parameters)
for arg, lambda_arg in zip(args, sig_parameters):
if arg.name != lambda_arg:
break
else: # Lambdifying succesful!
return lambdafunc
# If we are here (very rare), then one of the lambda arg is still a Dummy.
# In this case we will manually handle the naming.
lambda_names = sig_parameters.keys()
arg_names = [arg.name for arg in args]
conversion = dict(zip(arg_names, lambda_names))
# Wrap the lambda such that arg names are translated into the correct dummy
# symbol names
@wraps(lambdafunc)
def wrapped_lambdafunc(*ordered_args, **kwargs):
converted_kwargs = {conversion[k]: v for k, v in kwargs.items()}
return lambdafunc(*ordered_args, **converted_kwargs)
# Update the signature of wrapped_lambdafunc to math our args
new_sig_parameters = OrderedDict()
for arg_name, dummy_name in conversion.items():
if arg_name == dummy_name: # Already has the correct name
new_sig_parameters[arg_name] = sig_parameters[arg_name]
else: # Change the dummy inspect.Parameter to the correct name
param = sig_parameters[dummy_name]
param = param.replace(name=arg_name)
new_sig_parameters[arg_name] = param
wrapped_lambdafunc.__signature__ = signature.replace(
parameters=new_sig_parameters.values()
)
return wrapped_lambdafunc | [
"def",
"sympy_to_py",
"(",
"func",
",",
"args",
")",
":",
"# replace the derivatives with printable variables.",
"derivatives",
"=",
"{",
"var",
":",
"Variable",
"(",
"var",
".",
"name",
")",
"for",
"var",
"in",
"args",
"if",
"isinstance",
"(",
"var",
",",
"sympy",
".",
"Derivative",
")",
"}",
"func",
"=",
"func",
".",
"xreplace",
"(",
"derivatives",
")",
"args",
"=",
"[",
"derivatives",
"[",
"var",
"]",
"if",
"isinstance",
"(",
"var",
",",
"sympy",
".",
"Derivative",
")",
"else",
"var",
"for",
"var",
"in",
"args",
"]",
"lambdafunc",
"=",
"lambdify",
"(",
"args",
",",
"func",
",",
"printer",
"=",
"SymfitNumPyPrinter",
",",
"dummify",
"=",
"False",
")",
"# Check if the names of the lambda function are what we expect",
"signature",
"=",
"inspect_sig",
".",
"signature",
"(",
"lambdafunc",
")",
"sig_parameters",
"=",
"OrderedDict",
"(",
"signature",
".",
"parameters",
")",
"for",
"arg",
",",
"lambda_arg",
"in",
"zip",
"(",
"args",
",",
"sig_parameters",
")",
":",
"if",
"arg",
".",
"name",
"!=",
"lambda_arg",
":",
"break",
"else",
":",
"# Lambdifying succesful!",
"return",
"lambdafunc",
"# If we are here (very rare), then one of the lambda arg is still a Dummy.",
"# In this case we will manually handle the naming.",
"lambda_names",
"=",
"sig_parameters",
".",
"keys",
"(",
")",
"arg_names",
"=",
"[",
"arg",
".",
"name",
"for",
"arg",
"in",
"args",
"]",
"conversion",
"=",
"dict",
"(",
"zip",
"(",
"arg_names",
",",
"lambda_names",
")",
")",
"# Wrap the lambda such that arg names are translated into the correct dummy",
"# symbol names",
"@",
"wraps",
"(",
"lambdafunc",
")",
"def",
"wrapped_lambdafunc",
"(",
"*",
"ordered_args",
",",
"*",
"*",
"kwargs",
")",
":",
"converted_kwargs",
"=",
"{",
"conversion",
"[",
"k",
"]",
":",
"v",
"for",
"k",
",",
"v",
"in",
"kwargs",
".",
"items",
"(",
")",
"}",
"return",
"lambdafunc",
"(",
"*",
"ordered_args",
",",
"*",
"*",
"converted_kwargs",
")",
"# Update the signature of wrapped_lambdafunc to math our args",
"new_sig_parameters",
"=",
"OrderedDict",
"(",
")",
"for",
"arg_name",
",",
"dummy_name",
"in",
"conversion",
".",
"items",
"(",
")",
":",
"if",
"arg_name",
"==",
"dummy_name",
":",
"# Already has the correct name",
"new_sig_parameters",
"[",
"arg_name",
"]",
"=",
"sig_parameters",
"[",
"arg_name",
"]",
"else",
":",
"# Change the dummy inspect.Parameter to the correct name",
"param",
"=",
"sig_parameters",
"[",
"dummy_name",
"]",
"param",
"=",
"param",
".",
"replace",
"(",
"name",
"=",
"arg_name",
")",
"new_sig_parameters",
"[",
"arg_name",
"]",
"=",
"param",
"wrapped_lambdafunc",
".",
"__signature__",
"=",
"signature",
".",
"replace",
"(",
"parameters",
"=",
"new_sig_parameters",
".",
"values",
"(",
")",
")",
"return",
"wrapped_lambdafunc"
] | Turn a symbolic expression into a Python lambda function,
which has the names of the variables and parameters as it's argument names.
:param func: sympy expression
:param args: variables and parameters in this model
:return: lambda function to be used for numerical evaluation of the model. | [
"Turn",
"a",
"symbolic",
"expression",
"into",
"a",
"Python",
"lambda",
"function",
"which",
"has",
"the",
"names",
"of",
"the",
"variables",
"and",
"parameters",
"as",
"it",
"s",
"argument",
"names",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L108-L160 | train | 238,454 |
tBuLi/symfit | symfit/core/support.py | sympy_to_scipy | def sympy_to_scipy(func, vars, params):
"""
Convert a symbolic expression to one scipy digs. Not used by ``symfit`` any more.
:param func: sympy expression
:param vars: variables
:param params: parameters
:return: Scipy-style function to be used for numerical evaluation of the model.
"""
lambda_func = sympy_to_py(func, vars, params)
def f(x, p):
"""
Scipy style function.
:param x: list of arrays, NxM
:param p: tuple of parameter values.
"""
x = np.atleast_2d(x)
y = [x[i] for i in range(len(x))] if len(x[0]) else []
try:
ans = lambda_func(*(y + list(p)))
except TypeError:
# Possibly this is a constant function in which case it only has Parameters.
ans = lambda_func(*list(p))# * np.ones(x_shape)
return ans
return f | python | def sympy_to_scipy(func, vars, params):
"""
Convert a symbolic expression to one scipy digs. Not used by ``symfit`` any more.
:param func: sympy expression
:param vars: variables
:param params: parameters
:return: Scipy-style function to be used for numerical evaluation of the model.
"""
lambda_func = sympy_to_py(func, vars, params)
def f(x, p):
"""
Scipy style function.
:param x: list of arrays, NxM
:param p: tuple of parameter values.
"""
x = np.atleast_2d(x)
y = [x[i] for i in range(len(x))] if len(x[0]) else []
try:
ans = lambda_func(*(y + list(p)))
except TypeError:
# Possibly this is a constant function in which case it only has Parameters.
ans = lambda_func(*list(p))# * np.ones(x_shape)
return ans
return f | [
"def",
"sympy_to_scipy",
"(",
"func",
",",
"vars",
",",
"params",
")",
":",
"lambda_func",
"=",
"sympy_to_py",
"(",
"func",
",",
"vars",
",",
"params",
")",
"def",
"f",
"(",
"x",
",",
"p",
")",
":",
"\"\"\"\n Scipy style function.\n\n :param x: list of arrays, NxM\n :param p: tuple of parameter values.\n \"\"\"",
"x",
"=",
"np",
".",
"atleast_2d",
"(",
"x",
")",
"y",
"=",
"[",
"x",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"x",
")",
")",
"]",
"if",
"len",
"(",
"x",
"[",
"0",
"]",
")",
"else",
"[",
"]",
"try",
":",
"ans",
"=",
"lambda_func",
"(",
"*",
"(",
"y",
"+",
"list",
"(",
"p",
")",
")",
")",
"except",
"TypeError",
":",
"# Possibly this is a constant function in which case it only has Parameters.",
"ans",
"=",
"lambda_func",
"(",
"*",
"list",
"(",
"p",
")",
")",
"# * np.ones(x_shape)",
"return",
"ans",
"return",
"f"
] | Convert a symbolic expression to one scipy digs. Not used by ``symfit`` any more.
:param func: sympy expression
:param vars: variables
:param params: parameters
:return: Scipy-style function to be used for numerical evaluation of the model. | [
"Convert",
"a",
"symbolic",
"expression",
"to",
"one",
"scipy",
"digs",
".",
"Not",
"used",
"by",
"symfit",
"any",
"more",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L162-L188 | train | 238,455 |
tBuLi/symfit | symfit/core/support.py | jacobian | def jacobian(expr, symbols):
"""
Derive a symbolic expr w.r.t. each symbol in symbols. This returns a symbolic jacobian vector.
:param expr: A sympy Expr.
:param symbols: The symbols w.r.t. which to derive.
"""
jac = []
for symbol in symbols:
# Differentiate to every param
f = sympy.diff(expr, symbol)
jac.append(f)
return jac | python | def jacobian(expr, symbols):
"""
Derive a symbolic expr w.r.t. each symbol in symbols. This returns a symbolic jacobian vector.
:param expr: A sympy Expr.
:param symbols: The symbols w.r.t. which to derive.
"""
jac = []
for symbol in symbols:
# Differentiate to every param
f = sympy.diff(expr, symbol)
jac.append(f)
return jac | [
"def",
"jacobian",
"(",
"expr",
",",
"symbols",
")",
":",
"jac",
"=",
"[",
"]",
"for",
"symbol",
"in",
"symbols",
":",
"# Differentiate to every param",
"f",
"=",
"sympy",
".",
"diff",
"(",
"expr",
",",
"symbol",
")",
"jac",
".",
"append",
"(",
"f",
")",
"return",
"jac"
] | Derive a symbolic expr w.r.t. each symbol in symbols. This returns a symbolic jacobian vector.
:param expr: A sympy Expr.
:param symbols: The symbols w.r.t. which to derive. | [
"Derive",
"a",
"symbolic",
"expr",
"w",
".",
"r",
".",
"t",
".",
"each",
"symbol",
"in",
"symbols",
".",
"This",
"returns",
"a",
"symbolic",
"jacobian",
"vector",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L300-L312 | train | 238,456 |
tBuLi/symfit | symfit/core/support.py | name | def name(self):
"""
Save name which can be used for alphabetic sorting and can be turned
into a kwarg.
"""
base_str = 'd{}{}_'.format(self.derivative_count if
self.derivative_count > 1 else '', self.expr)
for var, count in self.variable_count:
base_str += 'd{}{}'.format(var, count if count > 1 else '')
return base_str | python | def name(self):
"""
Save name which can be used for alphabetic sorting and can be turned
into a kwarg.
"""
base_str = 'd{}{}_'.format(self.derivative_count if
self.derivative_count > 1 else '', self.expr)
for var, count in self.variable_count:
base_str += 'd{}{}'.format(var, count if count > 1 else '')
return base_str | [
"def",
"name",
"(",
"self",
")",
":",
"base_str",
"=",
"'d{}{}_'",
".",
"format",
"(",
"self",
".",
"derivative_count",
"if",
"self",
".",
"derivative_count",
">",
"1",
"else",
"''",
",",
"self",
".",
"expr",
")",
"for",
"var",
",",
"count",
"in",
"self",
".",
"variable_count",
":",
"base_str",
"+=",
"'d{}{}'",
".",
"format",
"(",
"var",
",",
"count",
"if",
"count",
">",
"1",
"else",
"''",
")",
"return",
"base_str"
] | Save name which can be used for alphabetic sorting and can be turned
into a kwarg. | [
"Save",
"name",
"which",
"can",
"be",
"used",
"for",
"alphabetic",
"sorting",
"and",
"can",
"be",
"turned",
"into",
"a",
"kwarg",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/support.py#L433-L442 | train | 238,457 |
tBuLi/symfit | symfit/core/minimizers.py | BaseMinimizer._baseobjective_from_callable | def _baseobjective_from_callable(self, func, objective_type=MinimizeModel):
"""
symfit works with BaseObjective subclasses internally. If a custom
objective is provided, we wrap it into a BaseObjective, MinimizeModel by
default.
:param func: Callable. If already an instance of BaseObjective, it is
returned immediately. If not, it is turned into a BaseObjective of
type ``objective_type``.
:param objective_type:
:return:
"""
if isinstance(func, BaseObjective) or (hasattr(func, '__self__') and
isinstance(func.__self__, BaseObjective)):
# The latter condition is added to make sure .eval_jacobian methods
# are still considered correct, and not doubly wrapped.
return func
else:
from .fit import CallableNumericalModel, BaseModel
if isinstance(func, BaseModel):
model = func
else:
# Minimize the provided custom objective instead. We therefore
# wrap it into a CallableNumericalModel, thats what they are for
y = sympy.Dummy()
model = CallableNumericalModel(
{y: func},
connectivity_mapping={y: set(self.parameters)}
)
return objective_type(model,
data={y: None for y in model.dependent_vars}) | python | def _baseobjective_from_callable(self, func, objective_type=MinimizeModel):
"""
symfit works with BaseObjective subclasses internally. If a custom
objective is provided, we wrap it into a BaseObjective, MinimizeModel by
default.
:param func: Callable. If already an instance of BaseObjective, it is
returned immediately. If not, it is turned into a BaseObjective of
type ``objective_type``.
:param objective_type:
:return:
"""
if isinstance(func, BaseObjective) or (hasattr(func, '__self__') and
isinstance(func.__self__, BaseObjective)):
# The latter condition is added to make sure .eval_jacobian methods
# are still considered correct, and not doubly wrapped.
return func
else:
from .fit import CallableNumericalModel, BaseModel
if isinstance(func, BaseModel):
model = func
else:
# Minimize the provided custom objective instead. We therefore
# wrap it into a CallableNumericalModel, thats what they are for
y = sympy.Dummy()
model = CallableNumericalModel(
{y: func},
connectivity_mapping={y: set(self.parameters)}
)
return objective_type(model,
data={y: None for y in model.dependent_vars}) | [
"def",
"_baseobjective_from_callable",
"(",
"self",
",",
"func",
",",
"objective_type",
"=",
"MinimizeModel",
")",
":",
"if",
"isinstance",
"(",
"func",
",",
"BaseObjective",
")",
"or",
"(",
"hasattr",
"(",
"func",
",",
"'__self__'",
")",
"and",
"isinstance",
"(",
"func",
".",
"__self__",
",",
"BaseObjective",
")",
")",
":",
"# The latter condition is added to make sure .eval_jacobian methods",
"# are still considered correct, and not doubly wrapped.",
"return",
"func",
"else",
":",
"from",
".",
"fit",
"import",
"CallableNumericalModel",
",",
"BaseModel",
"if",
"isinstance",
"(",
"func",
",",
"BaseModel",
")",
":",
"model",
"=",
"func",
"else",
":",
"# Minimize the provided custom objective instead. We therefore",
"# wrap it into a CallableNumericalModel, thats what they are for",
"y",
"=",
"sympy",
".",
"Dummy",
"(",
")",
"model",
"=",
"CallableNumericalModel",
"(",
"{",
"y",
":",
"func",
"}",
",",
"connectivity_mapping",
"=",
"{",
"y",
":",
"set",
"(",
"self",
".",
"parameters",
")",
"}",
")",
"return",
"objective_type",
"(",
"model",
",",
"data",
"=",
"{",
"y",
":",
"None",
"for",
"y",
"in",
"model",
".",
"dependent_vars",
"}",
")"
] | symfit works with BaseObjective subclasses internally. If a custom
objective is provided, we wrap it into a BaseObjective, MinimizeModel by
default.
:param func: Callable. If already an instance of BaseObjective, it is
returned immediately. If not, it is turned into a BaseObjective of
type ``objective_type``.
:param objective_type:
:return: | [
"symfit",
"works",
"with",
"BaseObjective",
"subclasses",
"internally",
".",
"If",
"a",
"custom",
"objective",
"is",
"provided",
"we",
"wrap",
"it",
"into",
"a",
"BaseObjective",
"MinimizeModel",
"by",
"default",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L44-L74 | train | 238,458 |
tBuLi/symfit | symfit/core/minimizers.py | GradientMinimizer.resize_jac | def resize_jac(self, func):
"""
Removes values with identical indices to fixed parameters from the
output of func. func has to return the jacobian of a scalar function.
:param func: Jacobian function to be wrapped. Is assumed to be the
jacobian of a scalar function.
:return: Jacobian corresponding to non-fixed parameters only.
"""
if func is None:
return None
@wraps(func)
def resized(*args, **kwargs):
out = func(*args, **kwargs)
# Make one dimensional, corresponding to a scalar function.
out = np.atleast_1d(np.squeeze(out))
mask = [p not in self._fixed_params for p in self.parameters]
return out[mask]
return resized | python | def resize_jac(self, func):
"""
Removes values with identical indices to fixed parameters from the
output of func. func has to return the jacobian of a scalar function.
:param func: Jacobian function to be wrapped. Is assumed to be the
jacobian of a scalar function.
:return: Jacobian corresponding to non-fixed parameters only.
"""
if func is None:
return None
@wraps(func)
def resized(*args, **kwargs):
out = func(*args, **kwargs)
# Make one dimensional, corresponding to a scalar function.
out = np.atleast_1d(np.squeeze(out))
mask = [p not in self._fixed_params for p in self.parameters]
return out[mask]
return resized | [
"def",
"resize_jac",
"(",
"self",
",",
"func",
")",
":",
"if",
"func",
"is",
"None",
":",
"return",
"None",
"@",
"wraps",
"(",
"func",
")",
"def",
"resized",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Make one dimensional, corresponding to a scalar function.",
"out",
"=",
"np",
".",
"atleast_1d",
"(",
"np",
".",
"squeeze",
"(",
"out",
")",
")",
"mask",
"=",
"[",
"p",
"not",
"in",
"self",
".",
"_fixed_params",
"for",
"p",
"in",
"self",
".",
"parameters",
"]",
"return",
"out",
"[",
"mask",
"]",
"return",
"resized"
] | Removes values with identical indices to fixed parameters from the
output of func. func has to return the jacobian of a scalar function.
:param func: Jacobian function to be wrapped. Is assumed to be the
jacobian of a scalar function.
:return: Jacobian corresponding to non-fixed parameters only. | [
"Removes",
"values",
"with",
"identical",
"indices",
"to",
"fixed",
"parameters",
"from",
"the",
"output",
"of",
"func",
".",
"func",
"has",
"to",
"return",
"the",
"jacobian",
"of",
"a",
"scalar",
"function",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L143-L161 | train | 238,459 |
tBuLi/symfit | symfit/core/minimizers.py | HessianMinimizer.resize_hess | def resize_hess(self, func):
"""
Removes values with identical indices to fixed parameters from the
output of func. func has to return the Hessian of a scalar function.
:param func: Hessian function to be wrapped. Is assumed to be the
Hessian of a scalar function.
:return: Hessian corresponding to free parameters only.
"""
if func is None:
return None
@wraps(func)
def resized(*args, **kwargs):
out = func(*args, **kwargs)
# Make two dimensional, corresponding to a scalar function.
out = np.atleast_2d(np.squeeze(out))
mask = [p not in self._fixed_params for p in self.parameters]
return np.atleast_2d(out[mask, mask])
return resized | python | def resize_hess(self, func):
"""
Removes values with identical indices to fixed parameters from the
output of func. func has to return the Hessian of a scalar function.
:param func: Hessian function to be wrapped. Is assumed to be the
Hessian of a scalar function.
:return: Hessian corresponding to free parameters only.
"""
if func is None:
return None
@wraps(func)
def resized(*args, **kwargs):
out = func(*args, **kwargs)
# Make two dimensional, corresponding to a scalar function.
out = np.atleast_2d(np.squeeze(out))
mask = [p not in self._fixed_params for p in self.parameters]
return np.atleast_2d(out[mask, mask])
return resized | [
"def",
"resize_hess",
"(",
"self",
",",
"func",
")",
":",
"if",
"func",
"is",
"None",
":",
"return",
"None",
"@",
"wraps",
"(",
"func",
")",
"def",
"resized",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# Make two dimensional, corresponding to a scalar function.",
"out",
"=",
"np",
".",
"atleast_2d",
"(",
"np",
".",
"squeeze",
"(",
"out",
")",
")",
"mask",
"=",
"[",
"p",
"not",
"in",
"self",
".",
"_fixed_params",
"for",
"p",
"in",
"self",
".",
"parameters",
"]",
"return",
"np",
".",
"atleast_2d",
"(",
"out",
"[",
"mask",
",",
"mask",
"]",
")",
"return",
"resized"
] | Removes values with identical indices to fixed parameters from the
output of func. func has to return the Hessian of a scalar function.
:param func: Hessian function to be wrapped. Is assumed to be the
Hessian of a scalar function.
:return: Hessian corresponding to free parameters only. | [
"Removes",
"values",
"with",
"identical",
"indices",
"to",
"fixed",
"parameters",
"from",
"the",
"output",
"of",
"func",
".",
"func",
"has",
"to",
"return",
"the",
"Hessian",
"of",
"a",
"scalar",
"function",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L179-L197 | train | 238,460 |
tBuLi/symfit | symfit/core/minimizers.py | ScipyMinimize.execute | def execute(self, bounds=None, jacobian=None, hessian=None, constraints=None, **minimize_options):
"""
Calls the wrapped algorithm.
:param bounds: The bounds for the parameters. Usually filled by
:class:`~symfit.core.minimizers.BoundedMinimizer`.
:param jacobian: The Jacobian. Usually filled by
:class:`~symfit.core.minimizers.ScipyGradientMinimize`.
:param \*\*minimize_options: Further keywords to pass to
:func:`scipy.optimize.minimize`. Note that your `method` will
usually be filled by a specific subclass.
"""
ans = minimize(
self.objective,
self.initial_guesses,
method=self.method_name(),
bounds=bounds,
constraints=constraints,
jac=jacobian,
hess=hessian,
**minimize_options
)
return self._pack_output(ans) | python | def execute(self, bounds=None, jacobian=None, hessian=None, constraints=None, **minimize_options):
"""
Calls the wrapped algorithm.
:param bounds: The bounds for the parameters. Usually filled by
:class:`~symfit.core.minimizers.BoundedMinimizer`.
:param jacobian: The Jacobian. Usually filled by
:class:`~symfit.core.minimizers.ScipyGradientMinimize`.
:param \*\*minimize_options: Further keywords to pass to
:func:`scipy.optimize.minimize`. Note that your `method` will
usually be filled by a specific subclass.
"""
ans = minimize(
self.objective,
self.initial_guesses,
method=self.method_name(),
bounds=bounds,
constraints=constraints,
jac=jacobian,
hess=hessian,
**minimize_options
)
return self._pack_output(ans) | [
"def",
"execute",
"(",
"self",
",",
"bounds",
"=",
"None",
",",
"jacobian",
"=",
"None",
",",
"hessian",
"=",
"None",
",",
"constraints",
"=",
"None",
",",
"*",
"*",
"minimize_options",
")",
":",
"ans",
"=",
"minimize",
"(",
"self",
".",
"objective",
",",
"self",
".",
"initial_guesses",
",",
"method",
"=",
"self",
".",
"method_name",
"(",
")",
",",
"bounds",
"=",
"bounds",
",",
"constraints",
"=",
"constraints",
",",
"jac",
"=",
"jacobian",
",",
"hess",
"=",
"hessian",
",",
"*",
"*",
"minimize_options",
")",
"return",
"self",
".",
"_pack_output",
"(",
"ans",
")"
] | Calls the wrapped algorithm.
:param bounds: The bounds for the parameters. Usually filled by
:class:`~symfit.core.minimizers.BoundedMinimizer`.
:param jacobian: The Jacobian. Usually filled by
:class:`~symfit.core.minimizers.ScipyGradientMinimize`.
:param \*\*minimize_options: Further keywords to pass to
:func:`scipy.optimize.minimize`. Note that your `method` will
usually be filled by a specific subclass. | [
"Calls",
"the",
"wrapped",
"algorithm",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L331-L353 | train | 238,461 |
tBuLi/symfit | symfit/core/minimizers.py | ScipyConstrainedMinimize.scipy_constraints | def scipy_constraints(self, constraints):
"""
Returns all constraints in a scipy compatible format.
:param constraints: List of either MinimizeModel instances (this is what
is provided by :class:`~symfit.core.fit.Fit`),
:class:`~symfit.core.fit.BaseModel`, or
:class:`sympy.core.relational.Relational`.
:return: dict of scipy compatible statements.
"""
cons = []
types = { # scipy only distinguishes two types of constraint.
sympy.Eq: 'eq', sympy.Ge: 'ineq',
}
for constraint in constraints:
if isinstance(constraint, MinimizeModel):
# Typically the case when called by `Fit
constraint_type = constraint.model.constraint_type
elif hasattr(constraint, 'constraint_type'):
# Model object, not provided by `Fit`. Do the best we can.
if self.parameters != constraint.params:
raise AssertionError('The constraint should accept the same'
' parameters as used for the fit.')
constraint_type = constraint.constraint_type
constraint = MinimizeModel(constraint, data=self.objective.data)
elif isinstance(constraint, sympy.Rel):
constraint_type = constraint.__class__
constraint = self.objective.model.__class__.as_constraint(
constraint, self.objective.model
)
constraint = MinimizeModel(constraint, data=self.objective.data)
else:
raise TypeError('Unknown type for a constraint.')
con = {
'type': types[constraint_type],
'fun': constraint,
}
cons.append(con)
cons = tuple(cons)
return cons | python | def scipy_constraints(self, constraints):
"""
Returns all constraints in a scipy compatible format.
:param constraints: List of either MinimizeModel instances (this is what
is provided by :class:`~symfit.core.fit.Fit`),
:class:`~symfit.core.fit.BaseModel`, or
:class:`sympy.core.relational.Relational`.
:return: dict of scipy compatible statements.
"""
cons = []
types = { # scipy only distinguishes two types of constraint.
sympy.Eq: 'eq', sympy.Ge: 'ineq',
}
for constraint in constraints:
if isinstance(constraint, MinimizeModel):
# Typically the case when called by `Fit
constraint_type = constraint.model.constraint_type
elif hasattr(constraint, 'constraint_type'):
# Model object, not provided by `Fit`. Do the best we can.
if self.parameters != constraint.params:
raise AssertionError('The constraint should accept the same'
' parameters as used for the fit.')
constraint_type = constraint.constraint_type
constraint = MinimizeModel(constraint, data=self.objective.data)
elif isinstance(constraint, sympy.Rel):
constraint_type = constraint.__class__
constraint = self.objective.model.__class__.as_constraint(
constraint, self.objective.model
)
constraint = MinimizeModel(constraint, data=self.objective.data)
else:
raise TypeError('Unknown type for a constraint.')
con = {
'type': types[constraint_type],
'fun': constraint,
}
cons.append(con)
cons = tuple(cons)
return cons | [
"def",
"scipy_constraints",
"(",
"self",
",",
"constraints",
")",
":",
"cons",
"=",
"[",
"]",
"types",
"=",
"{",
"# scipy only distinguishes two types of constraint.",
"sympy",
".",
"Eq",
":",
"'eq'",
",",
"sympy",
".",
"Ge",
":",
"'ineq'",
",",
"}",
"for",
"constraint",
"in",
"constraints",
":",
"if",
"isinstance",
"(",
"constraint",
",",
"MinimizeModel",
")",
":",
"# Typically the case when called by `Fit",
"constraint_type",
"=",
"constraint",
".",
"model",
".",
"constraint_type",
"elif",
"hasattr",
"(",
"constraint",
",",
"'constraint_type'",
")",
":",
"# Model object, not provided by `Fit`. Do the best we can.",
"if",
"self",
".",
"parameters",
"!=",
"constraint",
".",
"params",
":",
"raise",
"AssertionError",
"(",
"'The constraint should accept the same'",
"' parameters as used for the fit.'",
")",
"constraint_type",
"=",
"constraint",
".",
"constraint_type",
"constraint",
"=",
"MinimizeModel",
"(",
"constraint",
",",
"data",
"=",
"self",
".",
"objective",
".",
"data",
")",
"elif",
"isinstance",
"(",
"constraint",
",",
"sympy",
".",
"Rel",
")",
":",
"constraint_type",
"=",
"constraint",
".",
"__class__",
"constraint",
"=",
"self",
".",
"objective",
".",
"model",
".",
"__class__",
".",
"as_constraint",
"(",
"constraint",
",",
"self",
".",
"objective",
".",
"model",
")",
"constraint",
"=",
"MinimizeModel",
"(",
"constraint",
",",
"data",
"=",
"self",
".",
"objective",
".",
"data",
")",
"else",
":",
"raise",
"TypeError",
"(",
"'Unknown type for a constraint.'",
")",
"con",
"=",
"{",
"'type'",
":",
"types",
"[",
"constraint_type",
"]",
",",
"'fun'",
":",
"constraint",
",",
"}",
"cons",
".",
"append",
"(",
"con",
")",
"cons",
"=",
"tuple",
"(",
"cons",
")",
"return",
"cons"
] | Returns all constraints in a scipy compatible format.
:param constraints: List of either MinimizeModel instances (this is what
is provided by :class:`~symfit.core.fit.Fit`),
:class:`~symfit.core.fit.BaseModel`, or
:class:`sympy.core.relational.Relational`.
:return: dict of scipy compatible statements. | [
"Returns",
"all",
"constraints",
"in",
"a",
"scipy",
"compatible",
"format",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L477-L517 | train | 238,462 |
tBuLi/symfit | symfit/core/minimizers.py | TrustConstr._get_jacobian_hessian_strategy | def _get_jacobian_hessian_strategy(self):
"""
Figure out how to calculate the jacobian and hessian. Will return a
tuple describing how best to calculate the jacobian and hessian,
repectively. If None, it should be calculated using the available
analytical method.
:return: tuple of jacobian_method, hessian_method
"""
if self.jacobian is not None and self.hessian is None:
jacobian = None
hessian = 'cs'
elif self.jacobian is None and self.hessian is None:
jacobian = 'cs'
hessian = soBFGS(exception_strategy='damp_update')
else:
jacobian = None
hessian = None
return jacobian, hessian | python | def _get_jacobian_hessian_strategy(self):
"""
Figure out how to calculate the jacobian and hessian. Will return a
tuple describing how best to calculate the jacobian and hessian,
repectively. If None, it should be calculated using the available
analytical method.
:return: tuple of jacobian_method, hessian_method
"""
if self.jacobian is not None and self.hessian is None:
jacobian = None
hessian = 'cs'
elif self.jacobian is None and self.hessian is None:
jacobian = 'cs'
hessian = soBFGS(exception_strategy='damp_update')
else:
jacobian = None
hessian = None
return jacobian, hessian | [
"def",
"_get_jacobian_hessian_strategy",
"(",
"self",
")",
":",
"if",
"self",
".",
"jacobian",
"is",
"not",
"None",
"and",
"self",
".",
"hessian",
"is",
"None",
":",
"jacobian",
"=",
"None",
"hessian",
"=",
"'cs'",
"elif",
"self",
".",
"jacobian",
"is",
"None",
"and",
"self",
".",
"hessian",
"is",
"None",
":",
"jacobian",
"=",
"'cs'",
"hessian",
"=",
"soBFGS",
"(",
"exception_strategy",
"=",
"'damp_update'",
")",
"else",
":",
"jacobian",
"=",
"None",
"hessian",
"=",
"None",
"return",
"jacobian",
",",
"hessian"
] | Figure out how to calculate the jacobian and hessian. Will return a
tuple describing how best to calculate the jacobian and hessian,
repectively. If None, it should be calculated using the available
analytical method.
:return: tuple of jacobian_method, hessian_method | [
"Figure",
"out",
"how",
"to",
"calculate",
"the",
"jacobian",
"and",
"hessian",
".",
"Will",
"return",
"a",
"tuple",
"describing",
"how",
"best",
"to",
"calculate",
"the",
"jacobian",
"and",
"hessian",
"repectively",
".",
"If",
"None",
"it",
"should",
"be",
"calculated",
"using",
"the",
"available",
"analytical",
"method",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L566-L584 | train | 238,463 |
tBuLi/symfit | symfit/core/minimizers.py | BasinHopping.execute | def execute(self, **minimize_options):
"""
Execute the basin-hopping minimization.
:param minimize_options: options to be passed on to
:func:`scipy.optimize.basinhopping`.
:return: :class:`symfit.core.fit_results.FitResults`
"""
if 'minimizer_kwargs' not in minimize_options:
minimize_options['minimizer_kwargs'] = {}
if 'method' not in minimize_options['minimizer_kwargs']:
# If no minimizer was set by the user upon execute, use local_minimizer
minimize_options['minimizer_kwargs']['method'] = self.local_minimizer.method_name()
if 'jac' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, GradientMinimizer):
# Assign the jacobian
minimize_options['minimizer_kwargs']['jac'] = self.local_minimizer.wrapped_jacobian
if 'constraints' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, ConstrainedMinimizer):
# Assign constraints
minimize_options['minimizer_kwargs']['constraints'] = self.local_minimizer.wrapped_constraints
if 'bounds' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, BoundedMinimizer):
# Assign bounds
minimize_options['minimizer_kwargs']['bounds'] = self.local_minimizer.bounds
ans = basinhopping(
self.objective,
self.initial_guesses,
**minimize_options
)
return self._pack_output(ans) | python | def execute(self, **minimize_options):
"""
Execute the basin-hopping minimization.
:param minimize_options: options to be passed on to
:func:`scipy.optimize.basinhopping`.
:return: :class:`symfit.core.fit_results.FitResults`
"""
if 'minimizer_kwargs' not in minimize_options:
minimize_options['minimizer_kwargs'] = {}
if 'method' not in minimize_options['minimizer_kwargs']:
# If no minimizer was set by the user upon execute, use local_minimizer
minimize_options['minimizer_kwargs']['method'] = self.local_minimizer.method_name()
if 'jac' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, GradientMinimizer):
# Assign the jacobian
minimize_options['minimizer_kwargs']['jac'] = self.local_minimizer.wrapped_jacobian
if 'constraints' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, ConstrainedMinimizer):
# Assign constraints
minimize_options['minimizer_kwargs']['constraints'] = self.local_minimizer.wrapped_constraints
if 'bounds' not in minimize_options['minimizer_kwargs'] and isinstance(self.local_minimizer, BoundedMinimizer):
# Assign bounds
minimize_options['minimizer_kwargs']['bounds'] = self.local_minimizer.bounds
ans = basinhopping(
self.objective,
self.initial_guesses,
**minimize_options
)
return self._pack_output(ans) | [
"def",
"execute",
"(",
"self",
",",
"*",
"*",
"minimize_options",
")",
":",
"if",
"'minimizer_kwargs'",
"not",
"in",
"minimize_options",
":",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"=",
"{",
"}",
"if",
"'method'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
":",
"# If no minimizer was set by the user upon execute, use local_minimizer",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'method'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"method_name",
"(",
")",
"if",
"'jac'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"and",
"isinstance",
"(",
"self",
".",
"local_minimizer",
",",
"GradientMinimizer",
")",
":",
"# Assign the jacobian",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'jac'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"wrapped_jacobian",
"if",
"'constraints'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"and",
"isinstance",
"(",
"self",
".",
"local_minimizer",
",",
"ConstrainedMinimizer",
")",
":",
"# Assign constraints",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'constraints'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"wrapped_constraints",
"if",
"'bounds'",
"not",
"in",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"and",
"isinstance",
"(",
"self",
".",
"local_minimizer",
",",
"BoundedMinimizer",
")",
":",
"# Assign bounds",
"minimize_options",
"[",
"'minimizer_kwargs'",
"]",
"[",
"'bounds'",
"]",
"=",
"self",
".",
"local_minimizer",
".",
"bounds",
"ans",
"=",
"basinhopping",
"(",
"self",
".",
"objective",
",",
"self",
".",
"initial_guesses",
",",
"*",
"*",
"minimize_options",
")",
"return",
"self",
".",
"_pack_output",
"(",
"ans",
")"
] | Execute the basin-hopping minimization.
:param minimize_options: options to be passed on to
:func:`scipy.optimize.basinhopping`.
:return: :class:`symfit.core.fit_results.FitResults` | [
"Execute",
"the",
"basin",
"-",
"hopping",
"minimization",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/minimizers.py#L719-L748 | train | 238,464 |
tBuLi/symfit | symfit/core/printing.py | SymfitNumPyPrinter._print_MatMul | def _print_MatMul(self, expr):
"""
Matrix multiplication printer. The sympy one turns everything into a
dot product without type-checking.
"""
from sympy import MatrixExpr
links = []
for i, j in zip(expr.args[1:], expr.args[:-1]):
if isinstance(i, MatrixExpr) and isinstance(j, MatrixExpr):
links.append(').dot(')
else:
links.append('*')
printouts = [self._print(i) for i in expr.args]
result = [printouts[0]]
for link, printout in zip(links, printouts[1:]):
result.extend([link, printout])
return '({0})'.format(''.join(result)) | python | def _print_MatMul(self, expr):
"""
Matrix multiplication printer. The sympy one turns everything into a
dot product without type-checking.
"""
from sympy import MatrixExpr
links = []
for i, j in zip(expr.args[1:], expr.args[:-1]):
if isinstance(i, MatrixExpr) and isinstance(j, MatrixExpr):
links.append(').dot(')
else:
links.append('*')
printouts = [self._print(i) for i in expr.args]
result = [printouts[0]]
for link, printout in zip(links, printouts[1:]):
result.extend([link, printout])
return '({0})'.format(''.join(result)) | [
"def",
"_print_MatMul",
"(",
"self",
",",
"expr",
")",
":",
"from",
"sympy",
"import",
"MatrixExpr",
"links",
"=",
"[",
"]",
"for",
"i",
",",
"j",
"in",
"zip",
"(",
"expr",
".",
"args",
"[",
"1",
":",
"]",
",",
"expr",
".",
"args",
"[",
":",
"-",
"1",
"]",
")",
":",
"if",
"isinstance",
"(",
"i",
",",
"MatrixExpr",
")",
"and",
"isinstance",
"(",
"j",
",",
"MatrixExpr",
")",
":",
"links",
".",
"append",
"(",
"').dot('",
")",
"else",
":",
"links",
".",
"append",
"(",
"'*'",
")",
"printouts",
"=",
"[",
"self",
".",
"_print",
"(",
"i",
")",
"for",
"i",
"in",
"expr",
".",
"args",
"]",
"result",
"=",
"[",
"printouts",
"[",
"0",
"]",
"]",
"for",
"link",
",",
"printout",
"in",
"zip",
"(",
"links",
",",
"printouts",
"[",
"1",
":",
"]",
")",
":",
"result",
".",
"extend",
"(",
"[",
"link",
",",
"printout",
"]",
")",
"return",
"'({0})'",
".",
"format",
"(",
"''",
".",
"join",
"(",
"result",
")",
")"
] | Matrix multiplication printer. The sympy one turns everything into a
dot product without type-checking. | [
"Matrix",
"multiplication",
"printer",
".",
"The",
"sympy",
"one",
"turns",
"everything",
"into",
"a",
"dot",
"product",
"without",
"type",
"-",
"checking",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/printing.py#L34-L50 | train | 238,465 |
tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | InteractiveGuess.execute | def execute(self, **kwargs):
"""
Execute the interactive guessing procedure.
:param show: Whether or not to show the figure. Useful for testing.
:type show: bool
:param block: Blocking call to matplotlib
:type show: bool
Any additional keyword arguments are passed to
matplotlib.pyplot.show().
"""
show = kwargs.pop('show')
if show:
# self.fig.show() # Apparently this does something else,
# see https://github.com/matplotlib/matplotlib/issues/6138
plt.show(**kwargs) | python | def execute(self, **kwargs):
"""
Execute the interactive guessing procedure.
:param show: Whether or not to show the figure. Useful for testing.
:type show: bool
:param block: Blocking call to matplotlib
:type show: bool
Any additional keyword arguments are passed to
matplotlib.pyplot.show().
"""
show = kwargs.pop('show')
if show:
# self.fig.show() # Apparently this does something else,
# see https://github.com/matplotlib/matplotlib/issues/6138
plt.show(**kwargs) | [
"def",
"execute",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"show",
"=",
"kwargs",
".",
"pop",
"(",
"'show'",
")",
"if",
"show",
":",
"# self.fig.show() # Apparently this does something else,",
"# see https://github.com/matplotlib/matplotlib/issues/6138",
"plt",
".",
"show",
"(",
"*",
"*",
"kwargs",
")"
] | Execute the interactive guessing procedure.
:param show: Whether or not to show the figure. Useful for testing.
:type show: bool
:param block: Blocking call to matplotlib
:type show: bool
Any additional keyword arguments are passed to
matplotlib.pyplot.show(). | [
"Execute",
"the",
"interactive",
"guessing",
"procedure",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L99-L115 | train | 238,466 |
tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | InteractiveGuess._set_up_sliders | def _set_up_sliders(self):
"""
Creates an slider for every parameter.
"""
i = 0.05
self._sliders = {}
for param in self.model.params:
if not param.fixed:
axbg = 'lightgoldenrodyellow'
else:
axbg = 'red'
# start-x, start-y, width, height
ax = self.fig.add_axes((0.162, i, 0.68, 0.03),
facecolor=axbg, label=param)
val = param.value
if not hasattr(param, 'min') or param.min is None:
minimum = 0
else:
minimum = param.min
if not hasattr(param, 'max') or param.max is None:
maximum = 2 * val
else:
maximum = param.max
slid = plt.Slider(ax, param, minimum, maximum,
valinit=val, valfmt='% 5.4g')
self._sliders[param] = slid
slid.on_changed(self._update_plot)
i += 0.05 | python | def _set_up_sliders(self):
"""
Creates an slider for every parameter.
"""
i = 0.05
self._sliders = {}
for param in self.model.params:
if not param.fixed:
axbg = 'lightgoldenrodyellow'
else:
axbg = 'red'
# start-x, start-y, width, height
ax = self.fig.add_axes((0.162, i, 0.68, 0.03),
facecolor=axbg, label=param)
val = param.value
if not hasattr(param, 'min') or param.min is None:
minimum = 0
else:
minimum = param.min
if not hasattr(param, 'max') or param.max is None:
maximum = 2 * val
else:
maximum = param.max
slid = plt.Slider(ax, param, minimum, maximum,
valinit=val, valfmt='% 5.4g')
self._sliders[param] = slid
slid.on_changed(self._update_plot)
i += 0.05 | [
"def",
"_set_up_sliders",
"(",
"self",
")",
":",
"i",
"=",
"0.05",
"self",
".",
"_sliders",
"=",
"{",
"}",
"for",
"param",
"in",
"self",
".",
"model",
".",
"params",
":",
"if",
"not",
"param",
".",
"fixed",
":",
"axbg",
"=",
"'lightgoldenrodyellow'",
"else",
":",
"axbg",
"=",
"'red'",
"# start-x, start-y, width, height",
"ax",
"=",
"self",
".",
"fig",
".",
"add_axes",
"(",
"(",
"0.162",
",",
"i",
",",
"0.68",
",",
"0.03",
")",
",",
"facecolor",
"=",
"axbg",
",",
"label",
"=",
"param",
")",
"val",
"=",
"param",
".",
"value",
"if",
"not",
"hasattr",
"(",
"param",
",",
"'min'",
")",
"or",
"param",
".",
"min",
"is",
"None",
":",
"minimum",
"=",
"0",
"else",
":",
"minimum",
"=",
"param",
".",
"min",
"if",
"not",
"hasattr",
"(",
"param",
",",
"'max'",
")",
"or",
"param",
".",
"max",
"is",
"None",
":",
"maximum",
"=",
"2",
"*",
"val",
"else",
":",
"maximum",
"=",
"param",
".",
"max",
"slid",
"=",
"plt",
".",
"Slider",
"(",
"ax",
",",
"param",
",",
"minimum",
",",
"maximum",
",",
"valinit",
"=",
"val",
",",
"valfmt",
"=",
"'% 5.4g'",
")",
"self",
".",
"_sliders",
"[",
"param",
"]",
"=",
"slid",
"slid",
".",
"on_changed",
"(",
"self",
".",
"_update_plot",
")",
"i",
"+=",
"0.05"
] | Creates an slider for every parameter. | [
"Creates",
"an",
"slider",
"for",
"every",
"parameter",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L158-L186 | train | 238,467 |
tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | InteractiveGuess._update_plot | def _update_plot(self, _):
"""Callback to redraw the plot to reflect the new parameter values."""
# Since all sliders call this same callback without saying who they are
# I need to update the values for all parameters. This can be
# circumvented by creating a seperate callback function for each
# parameter.
for param in self.model.params:
param.value = self._sliders[param].val
for indep_var, dep_var in self._projections:
self._update_specific_plot(indep_var, dep_var) | python | def _update_plot(self, _):
"""Callback to redraw the plot to reflect the new parameter values."""
# Since all sliders call this same callback without saying who they are
# I need to update the values for all parameters. This can be
# circumvented by creating a seperate callback function for each
# parameter.
for param in self.model.params:
param.value = self._sliders[param].val
for indep_var, dep_var in self._projections:
self._update_specific_plot(indep_var, dep_var) | [
"def",
"_update_plot",
"(",
"self",
",",
"_",
")",
":",
"# Since all sliders call this same callback without saying who they are",
"# I need to update the values for all parameters. This can be",
"# circumvented by creating a seperate callback function for each",
"# parameter.",
"for",
"param",
"in",
"self",
".",
"model",
".",
"params",
":",
"param",
".",
"value",
"=",
"self",
".",
"_sliders",
"[",
"param",
"]",
".",
"val",
"for",
"indep_var",
",",
"dep_var",
"in",
"self",
".",
"_projections",
":",
"self",
".",
"_update_specific_plot",
"(",
"indep_var",
",",
"dep_var",
")"
] | Callback to redraw the plot to reflect the new parameter values. | [
"Callback",
"to",
"redraw",
"the",
"plot",
"to",
"reflect",
"the",
"new",
"parameter",
"values",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L200-L209 | train | 238,468 |
tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | InteractiveGuess._eval_model | def _eval_model(self):
"""
Convenience method for evaluating the model with the current parameters
:return: named tuple with results
"""
arguments = self._x_grid.copy()
arguments.update({param: param.value for param in self.model.params})
return self.model(**key2str(arguments)) | python | def _eval_model(self):
"""
Convenience method for evaluating the model with the current parameters
:return: named tuple with results
"""
arguments = self._x_grid.copy()
arguments.update({param: param.value for param in self.model.params})
return self.model(**key2str(arguments)) | [
"def",
"_eval_model",
"(",
"self",
")",
":",
"arguments",
"=",
"self",
".",
"_x_grid",
".",
"copy",
"(",
")",
"arguments",
".",
"update",
"(",
"{",
"param",
":",
"param",
".",
"value",
"for",
"param",
"in",
"self",
".",
"model",
".",
"params",
"}",
")",
"return",
"self",
".",
"model",
"(",
"*",
"*",
"key2str",
"(",
"arguments",
")",
")"
] | Convenience method for evaluating the model with the current parameters
:return: named tuple with results | [
"Convenience",
"method",
"for",
"evaluating",
"the",
"model",
"with",
"the",
"current",
"parameters"
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L211-L219 | train | 238,469 |
tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | Strategy2D.plot_data | def plot_data(self, proj, ax):
"""
Creates and plots a scatter plot of the original data.
"""
x, y = proj
ax.scatter(self.ig.independent_data[x],
self.ig.dependent_data[y], c='b') | python | def plot_data(self, proj, ax):
"""
Creates and plots a scatter plot of the original data.
"""
x, y = proj
ax.scatter(self.ig.independent_data[x],
self.ig.dependent_data[y], c='b') | [
"def",
"plot_data",
"(",
"self",
",",
"proj",
",",
"ax",
")",
":",
"x",
",",
"y",
"=",
"proj",
"ax",
".",
"scatter",
"(",
"self",
".",
"ig",
".",
"independent_data",
"[",
"x",
"]",
",",
"self",
".",
"ig",
".",
"dependent_data",
"[",
"y",
"]",
",",
"c",
"=",
"'b'",
")"
] | Creates and plots a scatter plot of the original data. | [
"Creates",
"and",
"plots",
"a",
"scatter",
"plot",
"of",
"the",
"original",
"data",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L241-L247 | train | 238,470 |
tBuLi/symfit | symfit/contrib/interactive_guess/interactive_guess.py | StrategynD.plot_data | def plot_data(self, proj, ax):
"""
Creates and plots the contourplot of the original data. This is done
by evaluating the density of projected datapoints on a grid.
"""
x, y = proj
x_data = self.ig.independent_data[x]
y_data = self.ig.dependent_data[y]
projected_data = np.column_stack((x_data, y_data)).T
kde = gaussian_kde(projected_data)
xx, yy = np.meshgrid(self.ig._x_points[x], self.ig._y_points[y])
x_grid = xx.flatten()
y_grid = yy.flatten()
contour_grid = kde.pdf(np.column_stack((x_grid, y_grid)).T)
# This is an fugly kludge, but it seems nescessary to make low density
# areas show up.
if self.ig.log_contour:
contour_grid = np.log(contour_grid)
vmin = -7
else:
vmin = None
ax.contourf(xx, yy, contour_grid.reshape(xx.shape),
50, vmin=vmin, cmap='Blues') | python | def plot_data(self, proj, ax):
"""
Creates and plots the contourplot of the original data. This is done
by evaluating the density of projected datapoints on a grid.
"""
x, y = proj
x_data = self.ig.independent_data[x]
y_data = self.ig.dependent_data[y]
projected_data = np.column_stack((x_data, y_data)).T
kde = gaussian_kde(projected_data)
xx, yy = np.meshgrid(self.ig._x_points[x], self.ig._y_points[y])
x_grid = xx.flatten()
y_grid = yy.flatten()
contour_grid = kde.pdf(np.column_stack((x_grid, y_grid)).T)
# This is an fugly kludge, but it seems nescessary to make low density
# areas show up.
if self.ig.log_contour:
contour_grid = np.log(contour_grid)
vmin = -7
else:
vmin = None
ax.contourf(xx, yy, contour_grid.reshape(xx.shape),
50, vmin=vmin, cmap='Blues') | [
"def",
"plot_data",
"(",
"self",
",",
"proj",
",",
"ax",
")",
":",
"x",
",",
"y",
"=",
"proj",
"x_data",
"=",
"self",
".",
"ig",
".",
"independent_data",
"[",
"x",
"]",
"y_data",
"=",
"self",
".",
"ig",
".",
"dependent_data",
"[",
"y",
"]",
"projected_data",
"=",
"np",
".",
"column_stack",
"(",
"(",
"x_data",
",",
"y_data",
")",
")",
".",
"T",
"kde",
"=",
"gaussian_kde",
"(",
"projected_data",
")",
"xx",
",",
"yy",
"=",
"np",
".",
"meshgrid",
"(",
"self",
".",
"ig",
".",
"_x_points",
"[",
"x",
"]",
",",
"self",
".",
"ig",
".",
"_y_points",
"[",
"y",
"]",
")",
"x_grid",
"=",
"xx",
".",
"flatten",
"(",
")",
"y_grid",
"=",
"yy",
".",
"flatten",
"(",
")",
"contour_grid",
"=",
"kde",
".",
"pdf",
"(",
"np",
".",
"column_stack",
"(",
"(",
"x_grid",
",",
"y_grid",
")",
")",
".",
"T",
")",
"# This is an fugly kludge, but it seems nescessary to make low density",
"# areas show up.",
"if",
"self",
".",
"ig",
".",
"log_contour",
":",
"contour_grid",
"=",
"np",
".",
"log",
"(",
"contour_grid",
")",
"vmin",
"=",
"-",
"7",
"else",
":",
"vmin",
"=",
"None",
"ax",
".",
"contourf",
"(",
"xx",
",",
"yy",
",",
"contour_grid",
".",
"reshape",
"(",
"xx",
".",
"shape",
")",
",",
"50",
",",
"vmin",
"=",
"vmin",
",",
"cmap",
"=",
"'Blues'",
")"
] | Creates and plots the contourplot of the original data. This is done
by evaluating the density of projected datapoints on a grid. | [
"Creates",
"and",
"plots",
"the",
"contourplot",
"of",
"the",
"original",
"data",
".",
"This",
"is",
"done",
"by",
"evaluating",
"the",
"density",
"of",
"projected",
"datapoints",
"on",
"a",
"grid",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/contrib/interactive_guess/interactive_guess.py#L278-L302 | train | 238,471 |
tBuLi/symfit | symfit/distributions.py | BivariateGaussian | def BivariateGaussian(x, y, mu_x, mu_y, sig_x, sig_y, rho):
"""
Bivariate Gaussian pdf.
:param x: :class:`symfit.core.argument.Variable`
:param y: :class:`symfit.core.argument.Variable`
:param mu_x: :class:`symfit.core.argument.Parameter` for the mean of `x`
:param mu_y: :class:`symfit.core.argument.Parameter` for the mean of `y`
:param sig_x: :class:`symfit.core.argument.Parameter` for the standard
deviation of `x`
:param sig_y: :class:`symfit.core.argument.Parameter` for the standard
deviation of `y`
:param rho: :class:`symfit.core.argument.Parameter` for the correlation
between `x` and `y`.
:return: sympy expression for a Bivariate Gaussian pdf.
"""
exponent = - 1 / (2 * (1 - rho**2))
exponent *= (x - mu_x)**2 / sig_x**2 + (y - mu_y)**2 / sig_y**2 \
- 2 * rho * (x - mu_x) * (y - mu_y) / (sig_x * sig_y)
return sympy.exp(exponent) / (2 * sympy.pi * sig_x * sig_y * sympy.sqrt(1 - rho**2)) | python | def BivariateGaussian(x, y, mu_x, mu_y, sig_x, sig_y, rho):
"""
Bivariate Gaussian pdf.
:param x: :class:`symfit.core.argument.Variable`
:param y: :class:`symfit.core.argument.Variable`
:param mu_x: :class:`symfit.core.argument.Parameter` for the mean of `x`
:param mu_y: :class:`symfit.core.argument.Parameter` for the mean of `y`
:param sig_x: :class:`symfit.core.argument.Parameter` for the standard
deviation of `x`
:param sig_y: :class:`symfit.core.argument.Parameter` for the standard
deviation of `y`
:param rho: :class:`symfit.core.argument.Parameter` for the correlation
between `x` and `y`.
:return: sympy expression for a Bivariate Gaussian pdf.
"""
exponent = - 1 / (2 * (1 - rho**2))
exponent *= (x - mu_x)**2 / sig_x**2 + (y - mu_y)**2 / sig_y**2 \
- 2 * rho * (x - mu_x) * (y - mu_y) / (sig_x * sig_y)
return sympy.exp(exponent) / (2 * sympy.pi * sig_x * sig_y * sympy.sqrt(1 - rho**2)) | [
"def",
"BivariateGaussian",
"(",
"x",
",",
"y",
",",
"mu_x",
",",
"mu_y",
",",
"sig_x",
",",
"sig_y",
",",
"rho",
")",
":",
"exponent",
"=",
"-",
"1",
"/",
"(",
"2",
"*",
"(",
"1",
"-",
"rho",
"**",
"2",
")",
")",
"exponent",
"*=",
"(",
"x",
"-",
"mu_x",
")",
"**",
"2",
"/",
"sig_x",
"**",
"2",
"+",
"(",
"y",
"-",
"mu_y",
")",
"**",
"2",
"/",
"sig_y",
"**",
"2",
"-",
"2",
"*",
"rho",
"*",
"(",
"x",
"-",
"mu_x",
")",
"*",
"(",
"y",
"-",
"mu_y",
")",
"/",
"(",
"sig_x",
"*",
"sig_y",
")",
"return",
"sympy",
".",
"exp",
"(",
"exponent",
")",
"/",
"(",
"2",
"*",
"sympy",
".",
"pi",
"*",
"sig_x",
"*",
"sig_y",
"*",
"sympy",
".",
"sqrt",
"(",
"1",
"-",
"rho",
"**",
"2",
")",
")"
] | Bivariate Gaussian pdf.
:param x: :class:`symfit.core.argument.Variable`
:param y: :class:`symfit.core.argument.Variable`
:param mu_x: :class:`symfit.core.argument.Parameter` for the mean of `x`
:param mu_y: :class:`symfit.core.argument.Parameter` for the mean of `y`
:param sig_x: :class:`symfit.core.argument.Parameter` for the standard
deviation of `x`
:param sig_y: :class:`symfit.core.argument.Parameter` for the standard
deviation of `y`
:param rho: :class:`symfit.core.argument.Parameter` for the correlation
between `x` and `y`.
:return: sympy expression for a Bivariate Gaussian pdf. | [
"Bivariate",
"Gaussian",
"pdf",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/distributions.py#L21-L40 | train | 238,472 |
tBuLi/symfit | symfit/core/fit.py | r_squared | def r_squared(model, fit_result, data):
"""
Calculates the coefficient of determination, R^2, for the fit.
(Is not defined properly for vector valued functions.)
:param model: Model instance
:param fit_result: FitResults instance
:param data: data with which the fit was performed.
"""
# First filter out the dependent vars
y_is = [data[var] for var in model if var in data]
x_is = [value for var, value in data.items() if var.name in model.__signature__.parameters]
y_bars = [np.mean(y_i) if y_i is not None else None for y_i in y_is]
f_is = model(*x_is, **fit_result.params)
SS_res = np.sum([np.sum((y_i - f_i)**2) for y_i, f_i in zip(y_is, f_is) if y_i is not None])
SS_tot = np.sum([np.sum((y_i - y_bar)**2) for y_i, y_bar in zip(y_is, y_bars) if y_i is not None])
return 1 - SS_res/SS_tot | python | def r_squared(model, fit_result, data):
"""
Calculates the coefficient of determination, R^2, for the fit.
(Is not defined properly for vector valued functions.)
:param model: Model instance
:param fit_result: FitResults instance
:param data: data with which the fit was performed.
"""
# First filter out the dependent vars
y_is = [data[var] for var in model if var in data]
x_is = [value for var, value in data.items() if var.name in model.__signature__.parameters]
y_bars = [np.mean(y_i) if y_i is not None else None for y_i in y_is]
f_is = model(*x_is, **fit_result.params)
SS_res = np.sum([np.sum((y_i - f_i)**2) for y_i, f_i in zip(y_is, f_is) if y_i is not None])
SS_tot = np.sum([np.sum((y_i - y_bar)**2) for y_i, y_bar in zip(y_is, y_bars) if y_i is not None])
return 1 - SS_res/SS_tot | [
"def",
"r_squared",
"(",
"model",
",",
"fit_result",
",",
"data",
")",
":",
"# First filter out the dependent vars",
"y_is",
"=",
"[",
"data",
"[",
"var",
"]",
"for",
"var",
"in",
"model",
"if",
"var",
"in",
"data",
"]",
"x_is",
"=",
"[",
"value",
"for",
"var",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
"if",
"var",
".",
"name",
"in",
"model",
".",
"__signature__",
".",
"parameters",
"]",
"y_bars",
"=",
"[",
"np",
".",
"mean",
"(",
"y_i",
")",
"if",
"y_i",
"is",
"not",
"None",
"else",
"None",
"for",
"y_i",
"in",
"y_is",
"]",
"f_is",
"=",
"model",
"(",
"*",
"x_is",
",",
"*",
"*",
"fit_result",
".",
"params",
")",
"SS_res",
"=",
"np",
".",
"sum",
"(",
"[",
"np",
".",
"sum",
"(",
"(",
"y_i",
"-",
"f_i",
")",
"**",
"2",
")",
"for",
"y_i",
",",
"f_i",
"in",
"zip",
"(",
"y_is",
",",
"f_is",
")",
"if",
"y_i",
"is",
"not",
"None",
"]",
")",
"SS_tot",
"=",
"np",
".",
"sum",
"(",
"[",
"np",
".",
"sum",
"(",
"(",
"y_i",
"-",
"y_bar",
")",
"**",
"2",
")",
"for",
"y_i",
",",
"y_bar",
"in",
"zip",
"(",
"y_is",
",",
"y_bars",
")",
"if",
"y_i",
"is",
"not",
"None",
"]",
")",
"return",
"1",
"-",
"SS_res",
"/",
"SS_tot"
] | Calculates the coefficient of determination, R^2, for the fit.
(Is not defined properly for vector valued functions.)
:param model: Model instance
:param fit_result: FitResults instance
:param data: data with which the fit was performed. | [
"Calculates",
"the",
"coefficient",
"of",
"determination",
"R^2",
"for",
"the",
"fit",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1431-L1448 | train | 238,473 |
tBuLi/symfit | symfit/core/fit.py | _partial_subs | def _partial_subs(func, func2vars):
"""
Partial-bug proof substitution. Works by making the substitutions on
the expression inside the derivative first, and then rebuilding the
derivative safely without evaluating it using `_partial_diff`.
"""
if isinstance(func, sympy.Derivative):
new_func = func.expr.xreplace(func2vars)
new_variables = tuple(var.xreplace(func2vars)
for var in func.variables)
return _partial_diff(new_func, *new_variables)
else:
return func.xreplace(func2vars) | python | def _partial_subs(func, func2vars):
"""
Partial-bug proof substitution. Works by making the substitutions on
the expression inside the derivative first, and then rebuilding the
derivative safely without evaluating it using `_partial_diff`.
"""
if isinstance(func, sympy.Derivative):
new_func = func.expr.xreplace(func2vars)
new_variables = tuple(var.xreplace(func2vars)
for var in func.variables)
return _partial_diff(new_func, *new_variables)
else:
return func.xreplace(func2vars) | [
"def",
"_partial_subs",
"(",
"func",
",",
"func2vars",
")",
":",
"if",
"isinstance",
"(",
"func",
",",
"sympy",
".",
"Derivative",
")",
":",
"new_func",
"=",
"func",
".",
"expr",
".",
"xreplace",
"(",
"func2vars",
")",
"new_variables",
"=",
"tuple",
"(",
"var",
".",
"xreplace",
"(",
"func2vars",
")",
"for",
"var",
"in",
"func",
".",
"variables",
")",
"return",
"_partial_diff",
"(",
"new_func",
",",
"*",
"new_variables",
")",
"else",
":",
"return",
"func",
".",
"xreplace",
"(",
"func2vars",
")"
] | Partial-bug proof substitution. Works by making the substitutions on
the expression inside the derivative first, and then rebuilding the
derivative safely without evaluating it using `_partial_diff`. | [
"Partial",
"-",
"bug",
"proof",
"substitution",
".",
"Works",
"by",
"making",
"the",
"substitutions",
"on",
"the",
"expression",
"inside",
"the",
"derivative",
"first",
"and",
"then",
"rebuilding",
"the",
"derivative",
"safely",
"without",
"evaluating",
"it",
"using",
"_partial_diff",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1693-L1705 | train | 238,474 |
tBuLi/symfit | symfit/core/fit.py | BaseModel._init_from_dict | def _init_from_dict(self, model_dict):
"""
Initiate self from a model_dict to make sure attributes such as vars, params are available.
Creates lists of alphabetically sorted independent vars, dependent vars, sigma vars, and parameters.
Finally it creates a signature for this model so it can be called nicely. This signature only contains
independent vars and params, as one would expect.
:param model_dict: dict of (dependent_var, expression) pairs.
"""
sort_func = lambda symbol: symbol.name
self.model_dict = OrderedDict(sorted(model_dict.items(),
key=lambda i: sort_func(i[0])))
# Everything at the bottom of the toposort is independent, at the top
# dependent, and the rest interdependent.
ordered = list(toposort(self.connectivity_mapping))
independent = sorted(ordered.pop(0), key=sort_func)
self.dependent_vars = sorted(ordered.pop(-1), key=sort_func)
self.interdependent_vars = sorted(
[item for items in ordered for item in items],
key=sort_func
)
# `independent` contains both params and vars, needs to be separated
self.independent_vars = [s for s in independent if
not isinstance(s, Parameter) and not s in self]
self.params = [s for s in independent if isinstance(s, Parameter)]
try:
assert not any(isinstance(var, Parameter)
for var in self.dependent_vars)
assert not any(isinstance(var, Parameter)
for var in self.interdependent_vars)
except AssertionError:
raise ModelError('`Parameter`\'s can not feature in the role '
'of `Variable`')
# Make Variable object corresponding to each depedent var.
self.sigmas = {var: Variable(name='sigma_{}'.format(var.name))
for var in self.dependent_vars} | python | def _init_from_dict(self, model_dict):
"""
Initiate self from a model_dict to make sure attributes such as vars, params are available.
Creates lists of alphabetically sorted independent vars, dependent vars, sigma vars, and parameters.
Finally it creates a signature for this model so it can be called nicely. This signature only contains
independent vars and params, as one would expect.
:param model_dict: dict of (dependent_var, expression) pairs.
"""
sort_func = lambda symbol: symbol.name
self.model_dict = OrderedDict(sorted(model_dict.items(),
key=lambda i: sort_func(i[0])))
# Everything at the bottom of the toposort is independent, at the top
# dependent, and the rest interdependent.
ordered = list(toposort(self.connectivity_mapping))
independent = sorted(ordered.pop(0), key=sort_func)
self.dependent_vars = sorted(ordered.pop(-1), key=sort_func)
self.interdependent_vars = sorted(
[item for items in ordered for item in items],
key=sort_func
)
# `independent` contains both params and vars, needs to be separated
self.independent_vars = [s for s in independent if
not isinstance(s, Parameter) and not s in self]
self.params = [s for s in independent if isinstance(s, Parameter)]
try:
assert not any(isinstance(var, Parameter)
for var in self.dependent_vars)
assert not any(isinstance(var, Parameter)
for var in self.interdependent_vars)
except AssertionError:
raise ModelError('`Parameter`\'s can not feature in the role '
'of `Variable`')
# Make Variable object corresponding to each depedent var.
self.sigmas = {var: Variable(name='sigma_{}'.format(var.name))
for var in self.dependent_vars} | [
"def",
"_init_from_dict",
"(",
"self",
",",
"model_dict",
")",
":",
"sort_func",
"=",
"lambda",
"symbol",
":",
"symbol",
".",
"name",
"self",
".",
"model_dict",
"=",
"OrderedDict",
"(",
"sorted",
"(",
"model_dict",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"i",
":",
"sort_func",
"(",
"i",
"[",
"0",
"]",
")",
")",
")",
"# Everything at the bottom of the toposort is independent, at the top",
"# dependent, and the rest interdependent.",
"ordered",
"=",
"list",
"(",
"toposort",
"(",
"self",
".",
"connectivity_mapping",
")",
")",
"independent",
"=",
"sorted",
"(",
"ordered",
".",
"pop",
"(",
"0",
")",
",",
"key",
"=",
"sort_func",
")",
"self",
".",
"dependent_vars",
"=",
"sorted",
"(",
"ordered",
".",
"pop",
"(",
"-",
"1",
")",
",",
"key",
"=",
"sort_func",
")",
"self",
".",
"interdependent_vars",
"=",
"sorted",
"(",
"[",
"item",
"for",
"items",
"in",
"ordered",
"for",
"item",
"in",
"items",
"]",
",",
"key",
"=",
"sort_func",
")",
"# `independent` contains both params and vars, needs to be separated",
"self",
".",
"independent_vars",
"=",
"[",
"s",
"for",
"s",
"in",
"independent",
"if",
"not",
"isinstance",
"(",
"s",
",",
"Parameter",
")",
"and",
"not",
"s",
"in",
"self",
"]",
"self",
".",
"params",
"=",
"[",
"s",
"for",
"s",
"in",
"independent",
"if",
"isinstance",
"(",
"s",
",",
"Parameter",
")",
"]",
"try",
":",
"assert",
"not",
"any",
"(",
"isinstance",
"(",
"var",
",",
"Parameter",
")",
"for",
"var",
"in",
"self",
".",
"dependent_vars",
")",
"assert",
"not",
"any",
"(",
"isinstance",
"(",
"var",
",",
"Parameter",
")",
"for",
"var",
"in",
"self",
".",
"interdependent_vars",
")",
"except",
"AssertionError",
":",
"raise",
"ModelError",
"(",
"'`Parameter`\\'s can not feature in the role '",
"'of `Variable`'",
")",
"# Make Variable object corresponding to each depedent var.",
"self",
".",
"sigmas",
"=",
"{",
"var",
":",
"Variable",
"(",
"name",
"=",
"'sigma_{}'",
".",
"format",
"(",
"var",
".",
"name",
")",
")",
"for",
"var",
"in",
"self",
".",
"dependent_vars",
"}"
] | Initiate self from a model_dict to make sure attributes such as vars, params are available.
Creates lists of alphabetically sorted independent vars, dependent vars, sigma vars, and parameters.
Finally it creates a signature for this model so it can be called nicely. This signature only contains
independent vars and params, as one would expect.
:param model_dict: dict of (dependent_var, expression) pairs. | [
"Initiate",
"self",
"from",
"a",
"model_dict",
"to",
"make",
"sure",
"attributes",
"such",
"as",
"vars",
"params",
"are",
"available",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L273-L310 | train | 238,475 |
tBuLi/symfit | symfit/core/fit.py | BaseModel.function_dict | def function_dict(self):
"""
Equivalent to ``self.model_dict``, but with all variables replaced by
functions if applicable. Sorted by the evaluation order according to
``self.ordered_symbols``, not alphabetical like ``self.model_dict``!
"""
func_dict = OrderedDict()
for var, func in self.vars_as_functions.items():
expr = self.model_dict[var].xreplace(self.vars_as_functions)
func_dict[func] = expr
return func_dict | python | def function_dict(self):
"""
Equivalent to ``self.model_dict``, but with all variables replaced by
functions if applicable. Sorted by the evaluation order according to
``self.ordered_symbols``, not alphabetical like ``self.model_dict``!
"""
func_dict = OrderedDict()
for var, func in self.vars_as_functions.items():
expr = self.model_dict[var].xreplace(self.vars_as_functions)
func_dict[func] = expr
return func_dict | [
"def",
"function_dict",
"(",
"self",
")",
":",
"func_dict",
"=",
"OrderedDict",
"(",
")",
"for",
"var",
",",
"func",
"in",
"self",
".",
"vars_as_functions",
".",
"items",
"(",
")",
":",
"expr",
"=",
"self",
".",
"model_dict",
"[",
"var",
"]",
".",
"xreplace",
"(",
"self",
".",
"vars_as_functions",
")",
"func_dict",
"[",
"func",
"]",
"=",
"expr",
"return",
"func_dict"
] | Equivalent to ``self.model_dict``, but with all variables replaced by
functions if applicable. Sorted by the evaluation order according to
``self.ordered_symbols``, not alphabetical like ``self.model_dict``! | [
"Equivalent",
"to",
"self",
".",
"model_dict",
"but",
"with",
"all",
"variables",
"replaced",
"by",
"functions",
"if",
"applicable",
".",
"Sorted",
"by",
"the",
"evaluation",
"order",
"according",
"to",
"self",
".",
"ordered_symbols",
"not",
"alphabetical",
"like",
"self",
".",
"model_dict",
"!"
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L340-L350 | train | 238,476 |
tBuLi/symfit | symfit/core/fit.py | TakesData._model_sanity | def _model_sanity(model):
"""
Perform some basic sanity checking on the model to warn users when they
might be trying something ill advised.
:param model: model instance.
"""
if not isinstance(model, ODEModel) and not isinstance(model, BaseNumericalModel):
# Such a model should probably not contain derivatives
for var, expr in model.items():
if isinstance(var, sympy.Derivative) or expr.has(sympy.Derivative):
warnings.warn(RuntimeWarning(
'The model contains derivatives in its definition. '
'Are you sure you don\'t mean to use `symfit.ODEModel`?'
)) | python | def _model_sanity(model):
"""
Perform some basic sanity checking on the model to warn users when they
might be trying something ill advised.
:param model: model instance.
"""
if not isinstance(model, ODEModel) and not isinstance(model, BaseNumericalModel):
# Such a model should probably not contain derivatives
for var, expr in model.items():
if isinstance(var, sympy.Derivative) or expr.has(sympy.Derivative):
warnings.warn(RuntimeWarning(
'The model contains derivatives in its definition. '
'Are you sure you don\'t mean to use `symfit.ODEModel`?'
)) | [
"def",
"_model_sanity",
"(",
"model",
")",
":",
"if",
"not",
"isinstance",
"(",
"model",
",",
"ODEModel",
")",
"and",
"not",
"isinstance",
"(",
"model",
",",
"BaseNumericalModel",
")",
":",
"# Such a model should probably not contain derivatives",
"for",
"var",
",",
"expr",
"in",
"model",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"var",
",",
"sympy",
".",
"Derivative",
")",
"or",
"expr",
".",
"has",
"(",
"sympy",
".",
"Derivative",
")",
":",
"warnings",
".",
"warn",
"(",
"RuntimeWarning",
"(",
"'The model contains derivatives in its definition. '",
"'Are you sure you don\\'t mean to use `symfit.ODEModel`?'",
")",
")"
] | Perform some basic sanity checking on the model to warn users when they
might be trying something ill advised.
:param model: model instance. | [
"Perform",
"some",
"basic",
"sanity",
"checking",
"on",
"the",
"model",
"to",
"warn",
"users",
"when",
"they",
"might",
"be",
"trying",
"something",
"ill",
"advised",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1014-L1028 | train | 238,477 |
tBuLi/symfit | symfit/core/fit.py | TakesData.data_shapes | def data_shapes(self):
"""
Returns the shape of the data. In most cases this will be the same for
all variables of the same type, if not this raises an Exception.
Ignores variables which are set to None by design so we know that those
None variables can be assumed to have the same shape as the other in
calculations where this is needed, such as the covariance matrix.
:return: Tuple of all independent var shapes, dependent var shapes.
"""
independent_shapes = []
for var, data in self.independent_data.items():
if data is not None:
independent_shapes.append(data.shape)
dependent_shapes = []
for var, data in self.dependent_data.items():
if data is not None:
dependent_shapes.append(data.shape)
return list(set(independent_shapes)), list(set(dependent_shapes)) | python | def data_shapes(self):
"""
Returns the shape of the data. In most cases this will be the same for
all variables of the same type, if not this raises an Exception.
Ignores variables which are set to None by design so we know that those
None variables can be assumed to have the same shape as the other in
calculations where this is needed, such as the covariance matrix.
:return: Tuple of all independent var shapes, dependent var shapes.
"""
independent_shapes = []
for var, data in self.independent_data.items():
if data is not None:
independent_shapes.append(data.shape)
dependent_shapes = []
for var, data in self.dependent_data.items():
if data is not None:
dependent_shapes.append(data.shape)
return list(set(independent_shapes)), list(set(dependent_shapes)) | [
"def",
"data_shapes",
"(",
"self",
")",
":",
"independent_shapes",
"=",
"[",
"]",
"for",
"var",
",",
"data",
"in",
"self",
".",
"independent_data",
".",
"items",
"(",
")",
":",
"if",
"data",
"is",
"not",
"None",
":",
"independent_shapes",
".",
"append",
"(",
"data",
".",
"shape",
")",
"dependent_shapes",
"=",
"[",
"]",
"for",
"var",
",",
"data",
"in",
"self",
".",
"dependent_data",
".",
"items",
"(",
")",
":",
"if",
"data",
"is",
"not",
"None",
":",
"dependent_shapes",
".",
"append",
"(",
"data",
".",
"shape",
")",
"return",
"list",
"(",
"set",
"(",
"independent_shapes",
")",
")",
",",
"list",
"(",
"set",
"(",
"dependent_shapes",
")",
")"
] | Returns the shape of the data. In most cases this will be the same for
all variables of the same type, if not this raises an Exception.
Ignores variables which are set to None by design so we know that those
None variables can be assumed to have the same shape as the other in
calculations where this is needed, such as the covariance matrix.
:return: Tuple of all independent var shapes, dependent var shapes. | [
"Returns",
"the",
"shape",
"of",
"the",
"data",
".",
"In",
"most",
"cases",
"this",
"will",
"be",
"the",
"same",
"for",
"all",
"variables",
"of",
"the",
"same",
"type",
"if",
"not",
"this",
"raises",
"an",
"Exception",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1067-L1088 | train | 238,478 |
tBuLi/symfit | symfit/core/fit.py | Fit.execute | def execute(self, **minimize_options):
"""
Execute the fit.
:param minimize_options: keyword arguments to be passed to the specified
minimizer.
:return: FitResults instance
"""
minimizer_ans = self.minimizer.execute(**minimize_options)
try: # to build covariance matrix
cov_matrix = minimizer_ans.covariance_matrix
except AttributeError:
cov_matrix = self.covariance_matrix(dict(zip(self.model.params, minimizer_ans._popt)))
else:
if cov_matrix is None:
cov_matrix = self.covariance_matrix(dict(zip(self.model.params, minimizer_ans._popt)))
finally:
minimizer_ans.covariance_matrix = cov_matrix
# Overwrite the DummyModel with the current model
minimizer_ans.model = self.model
minimizer_ans.gof_qualifiers['r_squared'] = r_squared(self.model, minimizer_ans, self.data)
return minimizer_ans | python | def execute(self, **minimize_options):
"""
Execute the fit.
:param minimize_options: keyword arguments to be passed to the specified
minimizer.
:return: FitResults instance
"""
minimizer_ans = self.minimizer.execute(**minimize_options)
try: # to build covariance matrix
cov_matrix = minimizer_ans.covariance_matrix
except AttributeError:
cov_matrix = self.covariance_matrix(dict(zip(self.model.params, minimizer_ans._popt)))
else:
if cov_matrix is None:
cov_matrix = self.covariance_matrix(dict(zip(self.model.params, minimizer_ans._popt)))
finally:
minimizer_ans.covariance_matrix = cov_matrix
# Overwrite the DummyModel with the current model
minimizer_ans.model = self.model
minimizer_ans.gof_qualifiers['r_squared'] = r_squared(self.model, minimizer_ans, self.data)
return minimizer_ans | [
"def",
"execute",
"(",
"self",
",",
"*",
"*",
"minimize_options",
")",
":",
"minimizer_ans",
"=",
"self",
".",
"minimizer",
".",
"execute",
"(",
"*",
"*",
"minimize_options",
")",
"try",
":",
"# to build covariance matrix",
"cov_matrix",
"=",
"minimizer_ans",
".",
"covariance_matrix",
"except",
"AttributeError",
":",
"cov_matrix",
"=",
"self",
".",
"covariance_matrix",
"(",
"dict",
"(",
"zip",
"(",
"self",
".",
"model",
".",
"params",
",",
"minimizer_ans",
".",
"_popt",
")",
")",
")",
"else",
":",
"if",
"cov_matrix",
"is",
"None",
":",
"cov_matrix",
"=",
"self",
".",
"covariance_matrix",
"(",
"dict",
"(",
"zip",
"(",
"self",
".",
"model",
".",
"params",
",",
"minimizer_ans",
".",
"_popt",
")",
")",
")",
"finally",
":",
"minimizer_ans",
".",
"covariance_matrix",
"=",
"cov_matrix",
"# Overwrite the DummyModel with the current model",
"minimizer_ans",
".",
"model",
"=",
"self",
".",
"model",
"minimizer_ans",
".",
"gof_qualifiers",
"[",
"'r_squared'",
"]",
"=",
"r_squared",
"(",
"self",
".",
"model",
",",
"minimizer_ans",
",",
"self",
".",
"data",
")",
"return",
"minimizer_ans"
] | Execute the fit.
:param minimize_options: keyword arguments to be passed to the specified
minimizer.
:return: FitResults instance | [
"Execute",
"the",
"fit",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1407-L1428 | train | 238,479 |
tBuLi/symfit | symfit/core/fit.py | ODEModel.eval_components | def eval_components(self, *args, **kwargs):
"""
Numerically integrate the system of ODEs.
:param args: Ordered arguments for the parameters and independent
variables
:param kwargs: Keyword arguments for the parameters and independent
variables
:return:
"""
bound_arguments = self.__signature__.bind(*args, **kwargs)
t_like = bound_arguments.arguments[self.independent_vars[0].name]
# System of functions to be integrated
f = lambda ys, t, *a: [c(t, *(list(ys) + list(a))) for c in self._ncomponents]
Dfun = lambda ys, t, *a: [[c(t, *(list(ys) + list(a))) for c in row] for row in self._njacobian]
initial_dependent = [self.initial[var] for var in self.dependent_vars]
t_initial = self.initial[self.independent_vars[0]] # Assuming there's only one
# Check if the time-like data includes the initial value, because integration should start there.
try:
t_like[0]
except (TypeError, IndexError): # Python scalar gives TypeError, numpy scalars IndexError
t_like = np.array([t_like]) # Allow evaluation at one point.
# The strategy is to split the time axis in a part above and below the
# initial value, and to integrate those seperately. At the end we rejoin them.
# np.flip is needed because odeint wants the first point to be t_initial
# and so t_smaller is a declining series.
if t_initial in t_like:
t_bigger = t_like[t_like >= t_initial]
t_smaller = t_like[t_like <= t_initial][::-1]
else:
t_bigger = np.concatenate(
(np.array([t_initial]), t_like[t_like > t_initial])
)
t_smaller = np.concatenate(
(np.array([t_initial]), t_like[t_like < t_initial][::-1])
)
# Properly ordered time axis containing t_initial
t_total = np.concatenate((t_smaller[::-1][:-1], t_bigger))
ans_bigger = odeint(
f,
initial_dependent,
t_bigger,
args=tuple(
bound_arguments.arguments[param.name] for param in self.params),
Dfun=Dfun,
*self.lsoda_args, **self.lsoda_kwargs
)
ans_smaller = odeint(
f,
initial_dependent,
t_smaller,
args=tuple(
bound_arguments.arguments[param.name] for param in self.params),
Dfun=Dfun,
*self.lsoda_args, **self.lsoda_kwargs
)
ans = np.concatenate((ans_smaller[1:][::-1], ans_bigger))
if t_initial in t_like:
# The user also requested to know the value at t_initial, so keep it.
return ans.T
else:
# The user didn't ask for the value at t_initial, so exclude it.
# (t_total contains all the t-points used for the integration,
# and so is t_like with t_initial inserted at the right position).
return ans[t_total != t_initial].T | python | def eval_components(self, *args, **kwargs):
"""
Numerically integrate the system of ODEs.
:param args: Ordered arguments for the parameters and independent
variables
:param kwargs: Keyword arguments for the parameters and independent
variables
:return:
"""
bound_arguments = self.__signature__.bind(*args, **kwargs)
t_like = bound_arguments.arguments[self.independent_vars[0].name]
# System of functions to be integrated
f = lambda ys, t, *a: [c(t, *(list(ys) + list(a))) for c in self._ncomponents]
Dfun = lambda ys, t, *a: [[c(t, *(list(ys) + list(a))) for c in row] for row in self._njacobian]
initial_dependent = [self.initial[var] for var in self.dependent_vars]
t_initial = self.initial[self.independent_vars[0]] # Assuming there's only one
# Check if the time-like data includes the initial value, because integration should start there.
try:
t_like[0]
except (TypeError, IndexError): # Python scalar gives TypeError, numpy scalars IndexError
t_like = np.array([t_like]) # Allow evaluation at one point.
# The strategy is to split the time axis in a part above and below the
# initial value, and to integrate those seperately. At the end we rejoin them.
# np.flip is needed because odeint wants the first point to be t_initial
# and so t_smaller is a declining series.
if t_initial in t_like:
t_bigger = t_like[t_like >= t_initial]
t_smaller = t_like[t_like <= t_initial][::-1]
else:
t_bigger = np.concatenate(
(np.array([t_initial]), t_like[t_like > t_initial])
)
t_smaller = np.concatenate(
(np.array([t_initial]), t_like[t_like < t_initial][::-1])
)
# Properly ordered time axis containing t_initial
t_total = np.concatenate((t_smaller[::-1][:-1], t_bigger))
ans_bigger = odeint(
f,
initial_dependent,
t_bigger,
args=tuple(
bound_arguments.arguments[param.name] for param in self.params),
Dfun=Dfun,
*self.lsoda_args, **self.lsoda_kwargs
)
ans_smaller = odeint(
f,
initial_dependent,
t_smaller,
args=tuple(
bound_arguments.arguments[param.name] for param in self.params),
Dfun=Dfun,
*self.lsoda_args, **self.lsoda_kwargs
)
ans = np.concatenate((ans_smaller[1:][::-1], ans_bigger))
if t_initial in t_like:
# The user also requested to know the value at t_initial, so keep it.
return ans.T
else:
# The user didn't ask for the value at t_initial, so exclude it.
# (t_total contains all the t-points used for the integration,
# and so is t_like with t_initial inserted at the right position).
return ans[t_total != t_initial].T | [
"def",
"eval_components",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"bound_arguments",
"=",
"self",
".",
"__signature__",
".",
"bind",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"t_like",
"=",
"bound_arguments",
".",
"arguments",
"[",
"self",
".",
"independent_vars",
"[",
"0",
"]",
".",
"name",
"]",
"# System of functions to be integrated",
"f",
"=",
"lambda",
"ys",
",",
"t",
",",
"*",
"a",
":",
"[",
"c",
"(",
"t",
",",
"*",
"(",
"list",
"(",
"ys",
")",
"+",
"list",
"(",
"a",
")",
")",
")",
"for",
"c",
"in",
"self",
".",
"_ncomponents",
"]",
"Dfun",
"=",
"lambda",
"ys",
",",
"t",
",",
"*",
"a",
":",
"[",
"[",
"c",
"(",
"t",
",",
"*",
"(",
"list",
"(",
"ys",
")",
"+",
"list",
"(",
"a",
")",
")",
")",
"for",
"c",
"in",
"row",
"]",
"for",
"row",
"in",
"self",
".",
"_njacobian",
"]",
"initial_dependent",
"=",
"[",
"self",
".",
"initial",
"[",
"var",
"]",
"for",
"var",
"in",
"self",
".",
"dependent_vars",
"]",
"t_initial",
"=",
"self",
".",
"initial",
"[",
"self",
".",
"independent_vars",
"[",
"0",
"]",
"]",
"# Assuming there's only one",
"# Check if the time-like data includes the initial value, because integration should start there.",
"try",
":",
"t_like",
"[",
"0",
"]",
"except",
"(",
"TypeError",
",",
"IndexError",
")",
":",
"# Python scalar gives TypeError, numpy scalars IndexError",
"t_like",
"=",
"np",
".",
"array",
"(",
"[",
"t_like",
"]",
")",
"# Allow evaluation at one point.",
"# The strategy is to split the time axis in a part above and below the",
"# initial value, and to integrate those seperately. At the end we rejoin them.",
"# np.flip is needed because odeint wants the first point to be t_initial",
"# and so t_smaller is a declining series.",
"if",
"t_initial",
"in",
"t_like",
":",
"t_bigger",
"=",
"t_like",
"[",
"t_like",
">=",
"t_initial",
"]",
"t_smaller",
"=",
"t_like",
"[",
"t_like",
"<=",
"t_initial",
"]",
"[",
":",
":",
"-",
"1",
"]",
"else",
":",
"t_bigger",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"array",
"(",
"[",
"t_initial",
"]",
")",
",",
"t_like",
"[",
"t_like",
">",
"t_initial",
"]",
")",
")",
"t_smaller",
"=",
"np",
".",
"concatenate",
"(",
"(",
"np",
".",
"array",
"(",
"[",
"t_initial",
"]",
")",
",",
"t_like",
"[",
"t_like",
"<",
"t_initial",
"]",
"[",
":",
":",
"-",
"1",
"]",
")",
")",
"# Properly ordered time axis containing t_initial",
"t_total",
"=",
"np",
".",
"concatenate",
"(",
"(",
"t_smaller",
"[",
":",
":",
"-",
"1",
"]",
"[",
":",
"-",
"1",
"]",
",",
"t_bigger",
")",
")",
"ans_bigger",
"=",
"odeint",
"(",
"f",
",",
"initial_dependent",
",",
"t_bigger",
",",
"args",
"=",
"tuple",
"(",
"bound_arguments",
".",
"arguments",
"[",
"param",
".",
"name",
"]",
"for",
"param",
"in",
"self",
".",
"params",
")",
",",
"Dfun",
"=",
"Dfun",
",",
"*",
"self",
".",
"lsoda_args",
",",
"*",
"*",
"self",
".",
"lsoda_kwargs",
")",
"ans_smaller",
"=",
"odeint",
"(",
"f",
",",
"initial_dependent",
",",
"t_smaller",
",",
"args",
"=",
"tuple",
"(",
"bound_arguments",
".",
"arguments",
"[",
"param",
".",
"name",
"]",
"for",
"param",
"in",
"self",
".",
"params",
")",
",",
"Dfun",
"=",
"Dfun",
",",
"*",
"self",
".",
"lsoda_args",
",",
"*",
"*",
"self",
".",
"lsoda_kwargs",
")",
"ans",
"=",
"np",
".",
"concatenate",
"(",
"(",
"ans_smaller",
"[",
"1",
":",
"]",
"[",
":",
":",
"-",
"1",
"]",
",",
"ans_bigger",
")",
")",
"if",
"t_initial",
"in",
"t_like",
":",
"# The user also requested to know the value at t_initial, so keep it.",
"return",
"ans",
".",
"T",
"else",
":",
"# The user didn't ask for the value at t_initial, so exclude it.",
"# (t_total contains all the t-points used for the integration,",
"# and so is t_like with t_initial inserted at the right position).",
"return",
"ans",
"[",
"t_total",
"!=",
"t_initial",
"]",
".",
"T"
] | Numerically integrate the system of ODEs.
:param args: Ordered arguments for the parameters and independent
variables
:param kwargs: Keyword arguments for the parameters and independent
variables
:return: | [
"Numerically",
"integrate",
"the",
"system",
"of",
"ODEs",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit.py#L1590-L1660 | train | 238,480 |
tBuLi/symfit | symfit/core/operators.py | call | def call(self, *values, **named_values):
"""
Call an expression to evaluate it at the given point.
Future improvements: I would like if func and signature could be buffered after the
first call so they don't have to be recalculated for every call. However, nothing
can be stored on self as sympy uses __slots__ for efficiency. This means there is no
instance dict to put stuff in! And I'm pretty sure it's ill advised to hack into the
__slots__ of Expr.
However, for the moment I don't really notice a performance penalty in running tests.
p.s. In the current setup signature is not even needed since no introspection is possible
on the Expr before calling it anyway, which makes calculating the signature absolutely useless.
However, I hope that someday some monkey patching expert in shining armour comes by and finds
a way to store it in __signature__ upon __init__ of any ``symfit`` expr such that calling
inspect_sig.signature on a symbolic expression will tell you which arguments to provide.
:param self: Any subclass of sympy.Expr
:param values: Values for the Parameters and Variables of the Expr.
:param named_values: Values for the vars and params by name. ``named_values`` is
allowed to contain too many values, as this sometimes happens when using
\*\*fit_result.params on a submodel. The irrelevant params are simply ignored.
:return: The function evaluated at ``values``. The type depends entirely on the input.
Typically an array or a float but nothing is enforced.
"""
independent_vars, params = seperate_symbols(self)
# Convert to a pythonic function
func = sympy_to_py(self, independent_vars + params)
# Handle args and kwargs according to the allowed names.
parameters = [ # Note that these are inspect_sig.Parameter's, not symfit parameters!
inspect_sig.Parameter(arg.name, inspect_sig.Parameter.POSITIONAL_OR_KEYWORD)
for arg in independent_vars + params
]
arg_names = [arg.name for arg in independent_vars + params]
relevant_named_values = {
name: value for name, value in named_values.items() if name in arg_names
}
signature = inspect_sig.Signature(parameters=parameters)
bound_arguments = signature.bind(*values, **relevant_named_values)
return func(**bound_arguments.arguments) | python | def call(self, *values, **named_values):
"""
Call an expression to evaluate it at the given point.
Future improvements: I would like if func and signature could be buffered after the
first call so they don't have to be recalculated for every call. However, nothing
can be stored on self as sympy uses __slots__ for efficiency. This means there is no
instance dict to put stuff in! And I'm pretty sure it's ill advised to hack into the
__slots__ of Expr.
However, for the moment I don't really notice a performance penalty in running tests.
p.s. In the current setup signature is not even needed since no introspection is possible
on the Expr before calling it anyway, which makes calculating the signature absolutely useless.
However, I hope that someday some monkey patching expert in shining armour comes by and finds
a way to store it in __signature__ upon __init__ of any ``symfit`` expr such that calling
inspect_sig.signature on a symbolic expression will tell you which arguments to provide.
:param self: Any subclass of sympy.Expr
:param values: Values for the Parameters and Variables of the Expr.
:param named_values: Values for the vars and params by name. ``named_values`` is
allowed to contain too many values, as this sometimes happens when using
\*\*fit_result.params on a submodel. The irrelevant params are simply ignored.
:return: The function evaluated at ``values``. The type depends entirely on the input.
Typically an array or a float but nothing is enforced.
"""
independent_vars, params = seperate_symbols(self)
# Convert to a pythonic function
func = sympy_to_py(self, independent_vars + params)
# Handle args and kwargs according to the allowed names.
parameters = [ # Note that these are inspect_sig.Parameter's, not symfit parameters!
inspect_sig.Parameter(arg.name, inspect_sig.Parameter.POSITIONAL_OR_KEYWORD)
for arg in independent_vars + params
]
arg_names = [arg.name for arg in independent_vars + params]
relevant_named_values = {
name: value for name, value in named_values.items() if name in arg_names
}
signature = inspect_sig.Signature(parameters=parameters)
bound_arguments = signature.bind(*values, **relevant_named_values)
return func(**bound_arguments.arguments) | [
"def",
"call",
"(",
"self",
",",
"*",
"values",
",",
"*",
"*",
"named_values",
")",
":",
"independent_vars",
",",
"params",
"=",
"seperate_symbols",
"(",
"self",
")",
"# Convert to a pythonic function",
"func",
"=",
"sympy_to_py",
"(",
"self",
",",
"independent_vars",
"+",
"params",
")",
"# Handle args and kwargs according to the allowed names.",
"parameters",
"=",
"[",
"# Note that these are inspect_sig.Parameter's, not symfit parameters!",
"inspect_sig",
".",
"Parameter",
"(",
"arg",
".",
"name",
",",
"inspect_sig",
".",
"Parameter",
".",
"POSITIONAL_OR_KEYWORD",
")",
"for",
"arg",
"in",
"independent_vars",
"+",
"params",
"]",
"arg_names",
"=",
"[",
"arg",
".",
"name",
"for",
"arg",
"in",
"independent_vars",
"+",
"params",
"]",
"relevant_named_values",
"=",
"{",
"name",
":",
"value",
"for",
"name",
",",
"value",
"in",
"named_values",
".",
"items",
"(",
")",
"if",
"name",
"in",
"arg_names",
"}",
"signature",
"=",
"inspect_sig",
".",
"Signature",
"(",
"parameters",
"=",
"parameters",
")",
"bound_arguments",
"=",
"signature",
".",
"bind",
"(",
"*",
"values",
",",
"*",
"*",
"relevant_named_values",
")",
"return",
"func",
"(",
"*",
"*",
"bound_arguments",
".",
"arguments",
")"
] | Call an expression to evaluate it at the given point.
Future improvements: I would like if func and signature could be buffered after the
first call so they don't have to be recalculated for every call. However, nothing
can be stored on self as sympy uses __slots__ for efficiency. This means there is no
instance dict to put stuff in! And I'm pretty sure it's ill advised to hack into the
__slots__ of Expr.
However, for the moment I don't really notice a performance penalty in running tests.
p.s. In the current setup signature is not even needed since no introspection is possible
on the Expr before calling it anyway, which makes calculating the signature absolutely useless.
However, I hope that someday some monkey patching expert in shining armour comes by and finds
a way to store it in __signature__ upon __init__ of any ``symfit`` expr such that calling
inspect_sig.signature on a symbolic expression will tell you which arguments to provide.
:param self: Any subclass of sympy.Expr
:param values: Values for the Parameters and Variables of the Expr.
:param named_values: Values for the vars and params by name. ``named_values`` is
allowed to contain too many values, as this sometimes happens when using
\*\*fit_result.params on a submodel. The irrelevant params are simply ignored.
:return: The function evaluated at ``values``. The type depends entirely on the input.
Typically an array or a float but nothing is enforced. | [
"Call",
"an",
"expression",
"to",
"evaluate",
"it",
"at",
"the",
"given",
"point",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/operators.py#L48-L92 | train | 238,481 |
tBuLi/symfit | symfit/core/fit_results.py | FitResults.variance | def variance(self, param):
"""
Return the variance in a given parameter as found by the fit.
:param param: ``Parameter`` Instance.
:return: Variance of ``param``.
"""
param_number = self.model.params.index(param)
try:
return self.covariance_matrix[param_number, param_number]
except TypeError:
# covariance_matrix can be None
return None | python | def variance(self, param):
"""
Return the variance in a given parameter as found by the fit.
:param param: ``Parameter`` Instance.
:return: Variance of ``param``.
"""
param_number = self.model.params.index(param)
try:
return self.covariance_matrix[param_number, param_number]
except TypeError:
# covariance_matrix can be None
return None | [
"def",
"variance",
"(",
"self",
",",
"param",
")",
":",
"param_number",
"=",
"self",
".",
"model",
".",
"params",
".",
"index",
"(",
"param",
")",
"try",
":",
"return",
"self",
".",
"covariance_matrix",
"[",
"param_number",
",",
"param_number",
"]",
"except",
"TypeError",
":",
"# covariance_matrix can be None",
"return",
"None"
] | Return the variance in a given parameter as found by the fit.
:param param: ``Parameter`` Instance.
:return: Variance of ``param``. | [
"Return",
"the",
"variance",
"in",
"a",
"given",
"parameter",
"as",
"found",
"by",
"the",
"fit",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit_results.py#L99-L111 | train | 238,482 |
tBuLi/symfit | symfit/core/fit_results.py | FitResults.covariance | def covariance(self, param_1, param_2):
"""
Return the covariance between param_1 and param_2.
:param param_1: ``Parameter`` Instance.
:param param_2: ``Parameter`` Instance.
:return: Covariance of the two params.
"""
param_1_number = self.model.params.index(param_1)
param_2_number = self.model.params.index(param_2)
return self.covariance_matrix[param_1_number, param_2_number] | python | def covariance(self, param_1, param_2):
"""
Return the covariance between param_1 and param_2.
:param param_1: ``Parameter`` Instance.
:param param_2: ``Parameter`` Instance.
:return: Covariance of the two params.
"""
param_1_number = self.model.params.index(param_1)
param_2_number = self.model.params.index(param_2)
return self.covariance_matrix[param_1_number, param_2_number] | [
"def",
"covariance",
"(",
"self",
",",
"param_1",
",",
"param_2",
")",
":",
"param_1_number",
"=",
"self",
".",
"model",
".",
"params",
".",
"index",
"(",
"param_1",
")",
"param_2_number",
"=",
"self",
".",
"model",
".",
"params",
".",
"index",
"(",
"param_2",
")",
"return",
"self",
".",
"covariance_matrix",
"[",
"param_1_number",
",",
"param_2_number",
"]"
] | Return the covariance between param_1 and param_2.
:param param_1: ``Parameter`` Instance.
:param param_2: ``Parameter`` Instance.
:return: Covariance of the two params. | [
"Return",
"the",
"covariance",
"between",
"param_1",
"and",
"param_2",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit_results.py#L113-L123 | train | 238,483 |
tBuLi/symfit | symfit/core/fit_results.py | FitResults._array_safe_dict_eq | def _array_safe_dict_eq(one_dict, other_dict):
"""
Dicts containing arrays are hard to compare. This function uses
numpy.allclose to compare arrays, and does normal comparison for all
other types.
:param one_dict:
:param other_dict:
:return: bool
"""
for key in one_dict:
try:
assert one_dict[key] == other_dict[key]
except ValueError as err:
# When dealing with arrays, we need to use numpy for comparison
if isinstance(one_dict[key], dict):
assert FitResults._array_safe_dict_eq(one_dict[key], other_dict[key])
else:
assert np.allclose(one_dict[key], other_dict[key])
except AssertionError:
return False
else: return True | python | def _array_safe_dict_eq(one_dict, other_dict):
"""
Dicts containing arrays are hard to compare. This function uses
numpy.allclose to compare arrays, and does normal comparison for all
other types.
:param one_dict:
:param other_dict:
:return: bool
"""
for key in one_dict:
try:
assert one_dict[key] == other_dict[key]
except ValueError as err:
# When dealing with arrays, we need to use numpy for comparison
if isinstance(one_dict[key], dict):
assert FitResults._array_safe_dict_eq(one_dict[key], other_dict[key])
else:
assert np.allclose(one_dict[key], other_dict[key])
except AssertionError:
return False
else: return True | [
"def",
"_array_safe_dict_eq",
"(",
"one_dict",
",",
"other_dict",
")",
":",
"for",
"key",
"in",
"one_dict",
":",
"try",
":",
"assert",
"one_dict",
"[",
"key",
"]",
"==",
"other_dict",
"[",
"key",
"]",
"except",
"ValueError",
"as",
"err",
":",
"# When dealing with arrays, we need to use numpy for comparison",
"if",
"isinstance",
"(",
"one_dict",
"[",
"key",
"]",
",",
"dict",
")",
":",
"assert",
"FitResults",
".",
"_array_safe_dict_eq",
"(",
"one_dict",
"[",
"key",
"]",
",",
"other_dict",
"[",
"key",
"]",
")",
"else",
":",
"assert",
"np",
".",
"allclose",
"(",
"one_dict",
"[",
"key",
"]",
",",
"other_dict",
"[",
"key",
"]",
")",
"except",
"AssertionError",
":",
"return",
"False",
"else",
":",
"return",
"True"
] | Dicts containing arrays are hard to compare. This function uses
numpy.allclose to compare arrays, and does normal comparison for all
other types.
:param one_dict:
:param other_dict:
:return: bool | [
"Dicts",
"containing",
"arrays",
"are",
"hard",
"to",
"compare",
".",
"This",
"function",
"uses",
"numpy",
".",
"allclose",
"to",
"compare",
"arrays",
"and",
"does",
"normal",
"comparison",
"for",
"all",
"other",
"types",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/symfit/core/fit_results.py#L126-L147 | train | 238,484 |
tBuLi/symfit | examples/callable_numerical_model.py | nonanalytical_func | def nonanalytical_func(x, a, b):
"""
This can be any pythonic function which should be fitted, typically one
which is not easily written or supported as an analytical expression.
"""
# Do your non-trivial magic here. In this case a Piecewise, although this
# could also be done symbolically.
y = np.zeros_like(x)
y[x > b] = (a * (x - b) + b)[x > b]
y[x <= b] = b
return y | python | def nonanalytical_func(x, a, b):
"""
This can be any pythonic function which should be fitted, typically one
which is not easily written or supported as an analytical expression.
"""
# Do your non-trivial magic here. In this case a Piecewise, although this
# could also be done symbolically.
y = np.zeros_like(x)
y[x > b] = (a * (x - b) + b)[x > b]
y[x <= b] = b
return y | [
"def",
"nonanalytical_func",
"(",
"x",
",",
"a",
",",
"b",
")",
":",
"# Do your non-trivial magic here. In this case a Piecewise, although this",
"# could also be done symbolically.",
"y",
"=",
"np",
".",
"zeros_like",
"(",
"x",
")",
"y",
"[",
"x",
">",
"b",
"]",
"=",
"(",
"a",
"*",
"(",
"x",
"-",
"b",
")",
"+",
"b",
")",
"[",
"x",
">",
"b",
"]",
"y",
"[",
"x",
"<=",
"b",
"]",
"=",
"b",
"return",
"y"
] | This can be any pythonic function which should be fitted, typically one
which is not easily written or supported as an analytical expression. | [
"This",
"can",
"be",
"any",
"pythonic",
"function",
"which",
"should",
"be",
"fitted",
"typically",
"one",
"which",
"is",
"not",
"easily",
"written",
"or",
"supported",
"as",
"an",
"analytical",
"expression",
"."
] | 759dd3d1d4270510d651f40b23dd26b1b10eee83 | https://github.com/tBuLi/symfit/blob/759dd3d1d4270510d651f40b23dd26b1b10eee83/examples/callable_numerical_model.py#L5-L15 | train | 238,485 |
mixcloud/django-experiments | experiments/admin.py | ExperimentAdmin.get_form | def get_form(self, request, obj=None, **kwargs):
"""
Add the default alternative dropdown with appropriate choices
"""
if obj:
if obj.alternatives:
choices = [(alternative, alternative) for alternative in obj.alternatives.keys()]
else:
choices = [(conf.CONTROL_GROUP, conf.CONTROL_GROUP)]
class ExperimentModelForm(forms.ModelForm):
default_alternative = forms.ChoiceField(choices=choices,
initial=obj.default_alternative,
required=False)
kwargs['form'] = ExperimentModelForm
return super(ExperimentAdmin, self).get_form(request, obj=obj, **kwargs) | python | def get_form(self, request, obj=None, **kwargs):
"""
Add the default alternative dropdown with appropriate choices
"""
if obj:
if obj.alternatives:
choices = [(alternative, alternative) for alternative in obj.alternatives.keys()]
else:
choices = [(conf.CONTROL_GROUP, conf.CONTROL_GROUP)]
class ExperimentModelForm(forms.ModelForm):
default_alternative = forms.ChoiceField(choices=choices,
initial=obj.default_alternative,
required=False)
kwargs['form'] = ExperimentModelForm
return super(ExperimentAdmin, self).get_form(request, obj=obj, **kwargs) | [
"def",
"get_form",
"(",
"self",
",",
"request",
",",
"obj",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"obj",
":",
"if",
"obj",
".",
"alternatives",
":",
"choices",
"=",
"[",
"(",
"alternative",
",",
"alternative",
")",
"for",
"alternative",
"in",
"obj",
".",
"alternatives",
".",
"keys",
"(",
")",
"]",
"else",
":",
"choices",
"=",
"[",
"(",
"conf",
".",
"CONTROL_GROUP",
",",
"conf",
".",
"CONTROL_GROUP",
")",
"]",
"class",
"ExperimentModelForm",
"(",
"forms",
".",
"ModelForm",
")",
":",
"default_alternative",
"=",
"forms",
".",
"ChoiceField",
"(",
"choices",
"=",
"choices",
",",
"initial",
"=",
"obj",
".",
"default_alternative",
",",
"required",
"=",
"False",
")",
"kwargs",
"[",
"'form'",
"]",
"=",
"ExperimentModelForm",
"return",
"super",
"(",
"ExperimentAdmin",
",",
"self",
")",
".",
"get_form",
"(",
"request",
",",
"obj",
"=",
"obj",
",",
"*",
"*",
"kwargs",
")"
] | Add the default alternative dropdown with appropriate choices | [
"Add",
"the",
"default",
"alternative",
"dropdown",
"with",
"appropriate",
"choices"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/admin.py#L46-L61 | train | 238,486 |
mixcloud/django-experiments | experiments/admin.py | ExperimentAdmin.set_alternative_view | def set_alternative_view(self, request):
"""
Allows the admin user to change their assigned alternative
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
experiment_name = request.POST.get("experiment")
alternative_name = request.POST.get("alternative")
if not (experiment_name and alternative_name):
return HttpResponseBadRequest()
participant(request).set_alternative(experiment_name, alternative_name)
return JsonResponse({
'success': True,
'alternative': participant(request).get_alternative(experiment_name)
}) | python | def set_alternative_view(self, request):
"""
Allows the admin user to change their assigned alternative
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
experiment_name = request.POST.get("experiment")
alternative_name = request.POST.get("alternative")
if not (experiment_name and alternative_name):
return HttpResponseBadRequest()
participant(request).set_alternative(experiment_name, alternative_name)
return JsonResponse({
'success': True,
'alternative': participant(request).get_alternative(experiment_name)
}) | [
"def",
"set_alternative_view",
"(",
"self",
",",
"request",
")",
":",
"if",
"not",
"request",
".",
"user",
".",
"has_perm",
"(",
"'experiments.change_experiment'",
")",
":",
"return",
"HttpResponseForbidden",
"(",
")",
"experiment_name",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"\"experiment\"",
")",
"alternative_name",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"\"alternative\"",
")",
"if",
"not",
"(",
"experiment_name",
"and",
"alternative_name",
")",
":",
"return",
"HttpResponseBadRequest",
"(",
")",
"participant",
"(",
"request",
")",
".",
"set_alternative",
"(",
"experiment_name",
",",
"alternative_name",
")",
"return",
"JsonResponse",
"(",
"{",
"'success'",
":",
"True",
",",
"'alternative'",
":",
"participant",
"(",
"request",
")",
".",
"get_alternative",
"(",
"experiment_name",
")",
"}",
")"
] | Allows the admin user to change their assigned alternative | [
"Allows",
"the",
"admin",
"user",
"to",
"change",
"their",
"assigned",
"alternative"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/admin.py#L112-L128 | train | 238,487 |
mixcloud/django-experiments | experiments/admin.py | ExperimentAdmin.set_state_view | def set_state_view(self, request):
"""
Changes the experiment state
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
try:
state = int(request.POST.get("state", ""))
except ValueError:
return HttpResponseBadRequest()
try:
experiment = Experiment.objects.get(name=request.POST.get("experiment"))
except Experiment.DoesNotExist:
return HttpResponseBadRequest()
experiment.state = state
if state == 0:
experiment.end_date = timezone.now()
else:
experiment.end_date = None
experiment.save()
return HttpResponse() | python | def set_state_view(self, request):
"""
Changes the experiment state
"""
if not request.user.has_perm('experiments.change_experiment'):
return HttpResponseForbidden()
try:
state = int(request.POST.get("state", ""))
except ValueError:
return HttpResponseBadRequest()
try:
experiment = Experiment.objects.get(name=request.POST.get("experiment"))
except Experiment.DoesNotExist:
return HttpResponseBadRequest()
experiment.state = state
if state == 0:
experiment.end_date = timezone.now()
else:
experiment.end_date = None
experiment.save()
return HttpResponse() | [
"def",
"set_state_view",
"(",
"self",
",",
"request",
")",
":",
"if",
"not",
"request",
".",
"user",
".",
"has_perm",
"(",
"'experiments.change_experiment'",
")",
":",
"return",
"HttpResponseForbidden",
"(",
")",
"try",
":",
"state",
"=",
"int",
"(",
"request",
".",
"POST",
".",
"get",
"(",
"\"state\"",
",",
"\"\"",
")",
")",
"except",
"ValueError",
":",
"return",
"HttpResponseBadRequest",
"(",
")",
"try",
":",
"experiment",
"=",
"Experiment",
".",
"objects",
".",
"get",
"(",
"name",
"=",
"request",
".",
"POST",
".",
"get",
"(",
"\"experiment\"",
")",
")",
"except",
"Experiment",
".",
"DoesNotExist",
":",
"return",
"HttpResponseBadRequest",
"(",
")",
"experiment",
".",
"state",
"=",
"state",
"if",
"state",
"==",
"0",
":",
"experiment",
".",
"end_date",
"=",
"timezone",
".",
"now",
"(",
")",
"else",
":",
"experiment",
".",
"end_date",
"=",
"None",
"experiment",
".",
"save",
"(",
")",
"return",
"HttpResponse",
"(",
")"
] | Changes the experiment state | [
"Changes",
"the",
"experiment",
"state"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/admin.py#L130-L156 | train | 238,488 |
mixcloud/django-experiments | experiments/utils.py | WebUser.get_alternative | def get_alternative(self, experiment_name):
"""
Get the alternative this user is enrolled in.
"""
experiment = None
try:
# catching the KeyError instead of using .get so that the experiment is auto created if desired
experiment = experiment_manager[experiment_name]
except KeyError:
pass
if experiment:
if experiment.is_displaying_alternatives():
alternative = self._get_enrollment(experiment)
if alternative is not None:
return alternative
else:
return experiment.default_alternative
return conf.CONTROL_GROUP | python | def get_alternative(self, experiment_name):
"""
Get the alternative this user is enrolled in.
"""
experiment = None
try:
# catching the KeyError instead of using .get so that the experiment is auto created if desired
experiment = experiment_manager[experiment_name]
except KeyError:
pass
if experiment:
if experiment.is_displaying_alternatives():
alternative = self._get_enrollment(experiment)
if alternative is not None:
return alternative
else:
return experiment.default_alternative
return conf.CONTROL_GROUP | [
"def",
"get_alternative",
"(",
"self",
",",
"experiment_name",
")",
":",
"experiment",
"=",
"None",
"try",
":",
"# catching the KeyError instead of using .get so that the experiment is auto created if desired",
"experiment",
"=",
"experiment_manager",
"[",
"experiment_name",
"]",
"except",
"KeyError",
":",
"pass",
"if",
"experiment",
":",
"if",
"experiment",
".",
"is_displaying_alternatives",
"(",
")",
":",
"alternative",
"=",
"self",
".",
"_get_enrollment",
"(",
"experiment",
")",
"if",
"alternative",
"is",
"not",
"None",
":",
"return",
"alternative",
"else",
":",
"return",
"experiment",
".",
"default_alternative",
"return",
"conf",
".",
"CONTROL_GROUP"
] | Get the alternative this user is enrolled in. | [
"Get",
"the",
"alternative",
"this",
"user",
"is",
"enrolled",
"in",
"."
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L102-L119 | train | 238,489 |
mixcloud/django-experiments | experiments/utils.py | WebUser.set_alternative | def set_alternative(self, experiment_name, alternative):
"""Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented. The user will
be enrolled in the experiment even if the experiment would not normally accept this user."""
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
self._set_enrollment(experiment, alternative) | python | def set_alternative(self, experiment_name, alternative):
"""Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented. The user will
be enrolled in the experiment even if the experiment would not normally accept this user."""
experiment = experiment_manager.get_experiment(experiment_name)
if experiment:
self._set_enrollment(experiment, alternative) | [
"def",
"set_alternative",
"(",
"self",
",",
"experiment_name",
",",
"alternative",
")",
":",
"experiment",
"=",
"experiment_manager",
".",
"get_experiment",
"(",
"experiment_name",
")",
"if",
"experiment",
":",
"self",
".",
"_set_enrollment",
"(",
"experiment",
",",
"alternative",
")"
] | Explicitly set the alternative the user is enrolled in for the specified experiment.
This allows you to change a user between alternatives. The user and goal counts for the new
alternative will be increment, but those for the old one will not be decremented. The user will
be enrolled in the experiment even if the experiment would not normally accept this user. | [
"Explicitly",
"set",
"the",
"alternative",
"the",
"user",
"is",
"enrolled",
"in",
"for",
"the",
"specified",
"experiment",
"."
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L121-L129 | train | 238,490 |
mixcloud/django-experiments | experiments/utils.py | WebUser.goal | def goal(self, goal_name, count=1):
"""Record that this user has performed a particular goal
This will update the goal stats for all experiments the user is enrolled in."""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
self._experiment_goal(enrollment.experiment, enrollment.alternative, goal_name, count) | python | def goal(self, goal_name, count=1):
"""Record that this user has performed a particular goal
This will update the goal stats for all experiments the user is enrolled in."""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
self._experiment_goal(enrollment.experiment, enrollment.alternative, goal_name, count) | [
"def",
"goal",
"(",
"self",
",",
"goal_name",
",",
"count",
"=",
"1",
")",
":",
"for",
"enrollment",
"in",
"self",
".",
"_get_all_enrollments",
"(",
")",
":",
"if",
"enrollment",
".",
"experiment",
".",
"is_displaying_alternatives",
"(",
")",
":",
"self",
".",
"_experiment_goal",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"goal_name",
",",
"count",
")"
] | Record that this user has performed a particular goal
This will update the goal stats for all experiments the user is enrolled in. | [
"Record",
"that",
"this",
"user",
"has",
"performed",
"a",
"particular",
"goal"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L131-L137 | train | 238,491 |
mixcloud/django-experiments | experiments/utils.py | WebUser.incorporate | def incorporate(self, other_user):
"""Incorporate all enrollments and goals performed by the other user
If this user is not enrolled in a given experiment, the results for the
other user are incorporated. For experiments this user is already
enrolled in the results of the other user are discarded.
This takes a relatively large amount of time for each experiment the other
user is enrolled in."""
for enrollment in other_user._get_all_enrollments():
if not self._get_enrollment(enrollment.experiment):
self._set_enrollment(enrollment.experiment, enrollment.alternative, enrollment.enrollment_date, enrollment.last_seen)
goals = self.experiment_counter.participant_goal_frequencies(enrollment.experiment, enrollment.alternative, other_user._participant_identifier())
for goal_name, count in goals:
self.experiment_counter.increment_goal_count(enrollment.experiment, enrollment.alternative, goal_name, self._participant_identifier(), count)
other_user._cancel_enrollment(enrollment.experiment) | python | def incorporate(self, other_user):
"""Incorporate all enrollments and goals performed by the other user
If this user is not enrolled in a given experiment, the results for the
other user are incorporated. For experiments this user is already
enrolled in the results of the other user are discarded.
This takes a relatively large amount of time for each experiment the other
user is enrolled in."""
for enrollment in other_user._get_all_enrollments():
if not self._get_enrollment(enrollment.experiment):
self._set_enrollment(enrollment.experiment, enrollment.alternative, enrollment.enrollment_date, enrollment.last_seen)
goals = self.experiment_counter.participant_goal_frequencies(enrollment.experiment, enrollment.alternative, other_user._participant_identifier())
for goal_name, count in goals:
self.experiment_counter.increment_goal_count(enrollment.experiment, enrollment.alternative, goal_name, self._participant_identifier(), count)
other_user._cancel_enrollment(enrollment.experiment) | [
"def",
"incorporate",
"(",
"self",
",",
"other_user",
")",
":",
"for",
"enrollment",
"in",
"other_user",
".",
"_get_all_enrollments",
"(",
")",
":",
"if",
"not",
"self",
".",
"_get_enrollment",
"(",
"enrollment",
".",
"experiment",
")",
":",
"self",
".",
"_set_enrollment",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"enrollment",
".",
"enrollment_date",
",",
"enrollment",
".",
"last_seen",
")",
"goals",
"=",
"self",
".",
"experiment_counter",
".",
"participant_goal_frequencies",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"other_user",
".",
"_participant_identifier",
"(",
")",
")",
"for",
"goal_name",
",",
"count",
"in",
"goals",
":",
"self",
".",
"experiment_counter",
".",
"increment_goal_count",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"goal_name",
",",
"self",
".",
"_participant_identifier",
"(",
")",
",",
"count",
")",
"other_user",
".",
"_cancel_enrollment",
"(",
"enrollment",
".",
"experiment",
")"
] | Incorporate all enrollments and goals performed by the other user
If this user is not enrolled in a given experiment, the results for the
other user are incorporated. For experiments this user is already
enrolled in the results of the other user are discarded.
This takes a relatively large amount of time for each experiment the other
user is enrolled in. | [
"Incorporate",
"all",
"enrollments",
"and",
"goals",
"performed",
"by",
"the",
"other",
"user"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L143-L158 | train | 238,492 |
mixcloud/django-experiments | experiments/utils.py | WebUser.visit | def visit(self):
"""Record that the user has visited the site for the purposes of retention tracking"""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
# We have two different goals, VISIT_NOT_PRESENT_COUNT_GOAL and VISIT_PRESENT_COUNT_GOAL.
# VISIT_PRESENT_COUNT_GOAL will avoid firing on the first time we set last_seen as it is assumed that the user is
# on the page and therefore it would automatically trigger and be valueless.
# This should be used for experiments when we enroll the user as part of the pageview,
# alternatively we can use the NOT_PRESENT GOAL which will increment on the first pageview,
# this is mainly useful for notification actions when the users isn't initially present.
if not enrollment.last_seen:
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now())
elif now() - enrollment.last_seen >= timedelta(hours=conf.SESSION_LENGTH):
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now()) | python | def visit(self):
"""Record that the user has visited the site for the purposes of retention tracking"""
for enrollment in self._get_all_enrollments():
if enrollment.experiment.is_displaying_alternatives():
# We have two different goals, VISIT_NOT_PRESENT_COUNT_GOAL and VISIT_PRESENT_COUNT_GOAL.
# VISIT_PRESENT_COUNT_GOAL will avoid firing on the first time we set last_seen as it is assumed that the user is
# on the page and therefore it would automatically trigger and be valueless.
# This should be used for experiments when we enroll the user as part of the pageview,
# alternatively we can use the NOT_PRESENT GOAL which will increment on the first pageview,
# this is mainly useful for notification actions when the users isn't initially present.
if not enrollment.last_seen:
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now())
elif now() - enrollment.last_seen >= timedelta(hours=conf.SESSION_LENGTH):
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL, 1)
self._experiment_goal(enrollment.experiment, enrollment.alternative, conf.VISIT_PRESENT_COUNT_GOAL, 1)
self._set_last_seen(enrollment.experiment, now()) | [
"def",
"visit",
"(",
"self",
")",
":",
"for",
"enrollment",
"in",
"self",
".",
"_get_all_enrollments",
"(",
")",
":",
"if",
"enrollment",
".",
"experiment",
".",
"is_displaying_alternatives",
"(",
")",
":",
"# We have two different goals, VISIT_NOT_PRESENT_COUNT_GOAL and VISIT_PRESENT_COUNT_GOAL.",
"# VISIT_PRESENT_COUNT_GOAL will avoid firing on the first time we set last_seen as it is assumed that the user is",
"# on the page and therefore it would automatically trigger and be valueless.",
"# This should be used for experiments when we enroll the user as part of the pageview,",
"# alternatively we can use the NOT_PRESENT GOAL which will increment on the first pageview,",
"# this is mainly useful for notification actions when the users isn't initially present.",
"if",
"not",
"enrollment",
".",
"last_seen",
":",
"self",
".",
"_experiment_goal",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"conf",
".",
"VISIT_NOT_PRESENT_COUNT_GOAL",
",",
"1",
")",
"self",
".",
"_set_last_seen",
"(",
"enrollment",
".",
"experiment",
",",
"now",
"(",
")",
")",
"elif",
"now",
"(",
")",
"-",
"enrollment",
".",
"last_seen",
">=",
"timedelta",
"(",
"hours",
"=",
"conf",
".",
"SESSION_LENGTH",
")",
":",
"self",
".",
"_experiment_goal",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"conf",
".",
"VISIT_NOT_PRESENT_COUNT_GOAL",
",",
"1",
")",
"self",
".",
"_experiment_goal",
"(",
"enrollment",
".",
"experiment",
",",
"enrollment",
".",
"alternative",
",",
"conf",
".",
"VISIT_PRESENT_COUNT_GOAL",
",",
"1",
")",
"self",
".",
"_set_last_seen",
"(",
"enrollment",
".",
"experiment",
",",
"now",
"(",
")",
")"
] | Record that the user has visited the site for the purposes of retention tracking | [
"Record",
"that",
"the",
"user",
"has",
"visited",
"the",
"site",
"for",
"the",
"purposes",
"of",
"retention",
"tracking"
] | 1f45e9f8a108b51e44918daa647269b2b8d43f1d | https://github.com/mixcloud/django-experiments/blob/1f45e9f8a108b51e44918daa647269b2b8d43f1d/experiments/utils.py#L160-L177 | train | 238,493 |
jgrassler/mkdocs-pandoc | mkdocs_pandoc/pandoc_converter.py | PandocConverter.flatten_pages | def flatten_pages(self, pages, level=1):
"""Recursively flattens pages data structure into a one-dimensional data structure"""
flattened = []
for page in pages:
if type(page) is list:
flattened.append(
{
'file': page[0],
'title': page[1],
'level': level,
})
if type(page) is dict:
if type(list(page.values())[0]) is str:
flattened.append(
{
'file': list(page.values())[0],
'title': list(page.keys())[0],
'level': level,
})
if type(list(page.values())[0]) is list:
flattened.extend(
self.flatten_pages(
list(page.values())[0],
level + 1)
)
return flattened | python | def flatten_pages(self, pages, level=1):
"""Recursively flattens pages data structure into a one-dimensional data structure"""
flattened = []
for page in pages:
if type(page) is list:
flattened.append(
{
'file': page[0],
'title': page[1],
'level': level,
})
if type(page) is dict:
if type(list(page.values())[0]) is str:
flattened.append(
{
'file': list(page.values())[0],
'title': list(page.keys())[0],
'level': level,
})
if type(list(page.values())[0]) is list:
flattened.extend(
self.flatten_pages(
list(page.values())[0],
level + 1)
)
return flattened | [
"def",
"flatten_pages",
"(",
"self",
",",
"pages",
",",
"level",
"=",
"1",
")",
":",
"flattened",
"=",
"[",
"]",
"for",
"page",
"in",
"pages",
":",
"if",
"type",
"(",
"page",
")",
"is",
"list",
":",
"flattened",
".",
"append",
"(",
"{",
"'file'",
":",
"page",
"[",
"0",
"]",
",",
"'title'",
":",
"page",
"[",
"1",
"]",
",",
"'level'",
":",
"level",
",",
"}",
")",
"if",
"type",
"(",
"page",
")",
"is",
"dict",
":",
"if",
"type",
"(",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
")",
"is",
"str",
":",
"flattened",
".",
"append",
"(",
"{",
"'file'",
":",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
",",
"'title'",
":",
"list",
"(",
"page",
".",
"keys",
"(",
")",
")",
"[",
"0",
"]",
",",
"'level'",
":",
"level",
",",
"}",
")",
"if",
"type",
"(",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
")",
"is",
"list",
":",
"flattened",
".",
"extend",
"(",
"self",
".",
"flatten_pages",
"(",
"list",
"(",
"page",
".",
"values",
"(",
")",
")",
"[",
"0",
"]",
",",
"level",
"+",
"1",
")",
")",
"return",
"flattened"
] | Recursively flattens pages data structure into a one-dimensional data structure | [
"Recursively",
"flattens",
"pages",
"data",
"structure",
"into",
"a",
"one",
"-",
"dimensional",
"data",
"structure"
] | 11edfb90830325dca85bd0369bb8e2da8d6815b3 | https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/pandoc_converter.py#L68-L96 | train | 238,494 |
jgrassler/mkdocs-pandoc | mkdocs_pandoc/pandoc_converter.py | PandocConverter.convert | def convert(self):
"""User-facing conversion method. Returns pandoc document as a list of
lines."""
lines = []
pages = self.flatten_pages(self.config['pages'])
f_exclude = mkdocs_pandoc.filters.exclude.ExcludeFilter(
exclude=self.exclude)
f_include = mkdocs_pandoc.filters.include.IncludeFilter(
base_path=self.config['docs_dir'],
encoding=self.encoding)
# First, do the processing that must be done on a per-file basis:
# Adjust header levels, insert chapter headings and adjust image paths.
f_headlevel = mkdocs_pandoc.filters.headlevels.HeadlevelFilter(pages)
for page in pages:
fname = os.path.join(self.config['docs_dir'], page['file'])
try:
p = codecs.open(fname, 'r', self.encoding)
except IOError as e:
raise FatalError("Couldn't open %s for reading: %s" % (fname,
e.strerror), 1)
f_chapterhead = mkdocs_pandoc.filters.chapterhead.ChapterheadFilter(
headlevel=page['level'],
title=page['title']
)
f_image = mkdocs_pandoc.filters.images.ImageFilter(
filename=page['file'],
image_path=self.config['site_dir'],
image_ext=self.image_ext)
lines_tmp = []
for line in p.readlines():
lines_tmp.append(line.rstrip())
if self.exclude:
lines_tmp = f_exclude.run(lines_tmp)
if self.filter_include:
lines_tmp = f_include.run(lines_tmp)
lines_tmp = f_headlevel.run(lines_tmp)
lines_tmp = f_chapterhead.run(lines_tmp)
lines_tmp = f_image.run(lines_tmp)
lines.extend(lines_tmp)
# Add an empty line between pages to prevent text from a previous
# file from butting up against headers in a subsequent file.
lines.append('')
# Strip anchor tags
if self.strip_anchors:
lines = mkdocs_pandoc.filters.anchors.AnchorFilter().run(lines)
# Fix cross references
if self.filter_xrefs:
lines = mkdocs_pandoc.filters.xref.XrefFilter().run(lines)
if self.filter_toc:
lines = mkdocs_pandoc.filters.toc.TocFilter().run(lines)
if self.filter_tables:
lines = mkdocs_pandoc.filters.tables.TableFilter().run(lines)
return(lines) | python | def convert(self):
"""User-facing conversion method. Returns pandoc document as a list of
lines."""
lines = []
pages = self.flatten_pages(self.config['pages'])
f_exclude = mkdocs_pandoc.filters.exclude.ExcludeFilter(
exclude=self.exclude)
f_include = mkdocs_pandoc.filters.include.IncludeFilter(
base_path=self.config['docs_dir'],
encoding=self.encoding)
# First, do the processing that must be done on a per-file basis:
# Adjust header levels, insert chapter headings and adjust image paths.
f_headlevel = mkdocs_pandoc.filters.headlevels.HeadlevelFilter(pages)
for page in pages:
fname = os.path.join(self.config['docs_dir'], page['file'])
try:
p = codecs.open(fname, 'r', self.encoding)
except IOError as e:
raise FatalError("Couldn't open %s for reading: %s" % (fname,
e.strerror), 1)
f_chapterhead = mkdocs_pandoc.filters.chapterhead.ChapterheadFilter(
headlevel=page['level'],
title=page['title']
)
f_image = mkdocs_pandoc.filters.images.ImageFilter(
filename=page['file'],
image_path=self.config['site_dir'],
image_ext=self.image_ext)
lines_tmp = []
for line in p.readlines():
lines_tmp.append(line.rstrip())
if self.exclude:
lines_tmp = f_exclude.run(lines_tmp)
if self.filter_include:
lines_tmp = f_include.run(lines_tmp)
lines_tmp = f_headlevel.run(lines_tmp)
lines_tmp = f_chapterhead.run(lines_tmp)
lines_tmp = f_image.run(lines_tmp)
lines.extend(lines_tmp)
# Add an empty line between pages to prevent text from a previous
# file from butting up against headers in a subsequent file.
lines.append('')
# Strip anchor tags
if self.strip_anchors:
lines = mkdocs_pandoc.filters.anchors.AnchorFilter().run(lines)
# Fix cross references
if self.filter_xrefs:
lines = mkdocs_pandoc.filters.xref.XrefFilter().run(lines)
if self.filter_toc:
lines = mkdocs_pandoc.filters.toc.TocFilter().run(lines)
if self.filter_tables:
lines = mkdocs_pandoc.filters.tables.TableFilter().run(lines)
return(lines) | [
"def",
"convert",
"(",
"self",
")",
":",
"lines",
"=",
"[",
"]",
"pages",
"=",
"self",
".",
"flatten_pages",
"(",
"self",
".",
"config",
"[",
"'pages'",
"]",
")",
"f_exclude",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"exclude",
".",
"ExcludeFilter",
"(",
"exclude",
"=",
"self",
".",
"exclude",
")",
"f_include",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"include",
".",
"IncludeFilter",
"(",
"base_path",
"=",
"self",
".",
"config",
"[",
"'docs_dir'",
"]",
",",
"encoding",
"=",
"self",
".",
"encoding",
")",
"# First, do the processing that must be done on a per-file basis:",
"# Adjust header levels, insert chapter headings and adjust image paths.",
"f_headlevel",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"headlevels",
".",
"HeadlevelFilter",
"(",
"pages",
")",
"for",
"page",
"in",
"pages",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"config",
"[",
"'docs_dir'",
"]",
",",
"page",
"[",
"'file'",
"]",
")",
"try",
":",
"p",
"=",
"codecs",
".",
"open",
"(",
"fname",
",",
"'r'",
",",
"self",
".",
"encoding",
")",
"except",
"IOError",
"as",
"e",
":",
"raise",
"FatalError",
"(",
"\"Couldn't open %s for reading: %s\"",
"%",
"(",
"fname",
",",
"e",
".",
"strerror",
")",
",",
"1",
")",
"f_chapterhead",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"chapterhead",
".",
"ChapterheadFilter",
"(",
"headlevel",
"=",
"page",
"[",
"'level'",
"]",
",",
"title",
"=",
"page",
"[",
"'title'",
"]",
")",
"f_image",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"images",
".",
"ImageFilter",
"(",
"filename",
"=",
"page",
"[",
"'file'",
"]",
",",
"image_path",
"=",
"self",
".",
"config",
"[",
"'site_dir'",
"]",
",",
"image_ext",
"=",
"self",
".",
"image_ext",
")",
"lines_tmp",
"=",
"[",
"]",
"for",
"line",
"in",
"p",
".",
"readlines",
"(",
")",
":",
"lines_tmp",
".",
"append",
"(",
"line",
".",
"rstrip",
"(",
")",
")",
"if",
"self",
".",
"exclude",
":",
"lines_tmp",
"=",
"f_exclude",
".",
"run",
"(",
"lines_tmp",
")",
"if",
"self",
".",
"filter_include",
":",
"lines_tmp",
"=",
"f_include",
".",
"run",
"(",
"lines_tmp",
")",
"lines_tmp",
"=",
"f_headlevel",
".",
"run",
"(",
"lines_tmp",
")",
"lines_tmp",
"=",
"f_chapterhead",
".",
"run",
"(",
"lines_tmp",
")",
"lines_tmp",
"=",
"f_image",
".",
"run",
"(",
"lines_tmp",
")",
"lines",
".",
"extend",
"(",
"lines_tmp",
")",
"# Add an empty line between pages to prevent text from a previous",
"# file from butting up against headers in a subsequent file.",
"lines",
".",
"append",
"(",
"''",
")",
"# Strip anchor tags",
"if",
"self",
".",
"strip_anchors",
":",
"lines",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"anchors",
".",
"AnchorFilter",
"(",
")",
".",
"run",
"(",
"lines",
")",
"# Fix cross references",
"if",
"self",
".",
"filter_xrefs",
":",
"lines",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"xref",
".",
"XrefFilter",
"(",
")",
".",
"run",
"(",
"lines",
")",
"if",
"self",
".",
"filter_toc",
":",
"lines",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"toc",
".",
"TocFilter",
"(",
")",
".",
"run",
"(",
"lines",
")",
"if",
"self",
".",
"filter_tables",
":",
"lines",
"=",
"mkdocs_pandoc",
".",
"filters",
".",
"tables",
".",
"TableFilter",
"(",
")",
".",
"run",
"(",
"lines",
")",
"return",
"(",
"lines",
")"
] | User-facing conversion method. Returns pandoc document as a list of
lines. | [
"User",
"-",
"facing",
"conversion",
"method",
".",
"Returns",
"pandoc",
"document",
"as",
"a",
"list",
"of",
"lines",
"."
] | 11edfb90830325dca85bd0369bb8e2da8d6815b3 | https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/pandoc_converter.py#L98-L167 | train | 238,495 |
jgrassler/mkdocs-pandoc | mkdocs_pandoc/filters/tables.py | TableFilter.blocks | def blocks(self, lines):
"""Groups lines into markdown blocks"""
state = markdown.blockparser.State()
blocks = []
# We use three states: start, ``` and '\n'
state.set('start')
# index of current block
currblock = 0
for line in lines:
line += '\n'
if state.isstate('start'):
if line[:3] == '```':
state.set('```')
else:
state.set('\n')
blocks.append('')
currblock = len(blocks) - 1
else:
marker = line[:3] # Will capture either '\n' or '```'
if state.isstate(marker):
state.reset()
blocks[currblock] += line
return blocks | python | def blocks(self, lines):
"""Groups lines into markdown blocks"""
state = markdown.blockparser.State()
blocks = []
# We use three states: start, ``` and '\n'
state.set('start')
# index of current block
currblock = 0
for line in lines:
line += '\n'
if state.isstate('start'):
if line[:3] == '```':
state.set('```')
else:
state.set('\n')
blocks.append('')
currblock = len(blocks) - 1
else:
marker = line[:3] # Will capture either '\n' or '```'
if state.isstate(marker):
state.reset()
blocks[currblock] += line
return blocks | [
"def",
"blocks",
"(",
"self",
",",
"lines",
")",
":",
"state",
"=",
"markdown",
".",
"blockparser",
".",
"State",
"(",
")",
"blocks",
"=",
"[",
"]",
"# We use three states: start, ``` and '\\n'",
"state",
".",
"set",
"(",
"'start'",
")",
"# index of current block",
"currblock",
"=",
"0",
"for",
"line",
"in",
"lines",
":",
"line",
"+=",
"'\\n'",
"if",
"state",
".",
"isstate",
"(",
"'start'",
")",
":",
"if",
"line",
"[",
":",
"3",
"]",
"==",
"'```'",
":",
"state",
".",
"set",
"(",
"'```'",
")",
"else",
":",
"state",
".",
"set",
"(",
"'\\n'",
")",
"blocks",
".",
"append",
"(",
"''",
")",
"currblock",
"=",
"len",
"(",
"blocks",
")",
"-",
"1",
"else",
":",
"marker",
"=",
"line",
"[",
":",
"3",
"]",
"# Will capture either '\\n' or '```'",
"if",
"state",
".",
"isstate",
"(",
"marker",
")",
":",
"state",
".",
"reset",
"(",
")",
"blocks",
"[",
"currblock",
"]",
"+=",
"line",
"return",
"blocks"
] | Groups lines into markdown blocks | [
"Groups",
"lines",
"into",
"markdown",
"blocks"
] | 11edfb90830325dca85bd0369bb8e2da8d6815b3 | https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/filters/tables.py#L31-L57 | train | 238,496 |
jgrassler/mkdocs-pandoc | mkdocs_pandoc/filters/tables.py | TableFilter.ruler_line | def ruler_line(self, widths, linetype='-'):
"""Generates a ruler line for separating rows from each other"""
cells = []
for w in widths:
cells.append(linetype * (w+2))
return '+' + '+'.join(cells) + '+' | python | def ruler_line(self, widths, linetype='-'):
"""Generates a ruler line for separating rows from each other"""
cells = []
for w in widths:
cells.append(linetype * (w+2))
return '+' + '+'.join(cells) + '+' | [
"def",
"ruler_line",
"(",
"self",
",",
"widths",
",",
"linetype",
"=",
"'-'",
")",
":",
"cells",
"=",
"[",
"]",
"for",
"w",
"in",
"widths",
":",
"cells",
".",
"append",
"(",
"linetype",
"*",
"(",
"w",
"+",
"2",
")",
")",
"return",
"'+'",
"+",
"'+'",
".",
"join",
"(",
"cells",
")",
"+",
"'+'"
] | Generates a ruler line for separating rows from each other | [
"Generates",
"a",
"ruler",
"line",
"for",
"separating",
"rows",
"from",
"each",
"other"
] | 11edfb90830325dca85bd0369bb8e2da8d6815b3 | https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/filters/tables.py#L182-L187 | train | 238,497 |
jgrassler/mkdocs-pandoc | mkdocs_pandoc/filters/tables.py | TableFilter.wrap_row | def wrap_row(self, widths, row, width_default=None):
"""Wraps a single line table row into a fixed width, multi-line table."""
lines = []
longest = 0 # longest wrapped column in row
if not width_default:
width_default = self.width_default
# Wrap column contents
for i in range(0, len(row)):
w=width_default # column width
# Only set column width dynamicaly for non-rogue rows
if i < len(widths):
w = widths[i]
tw = textwrap.TextWrapper(width=w, break_on_hyphens=False)
# Wrap and left-justify
row[i] = tw.wrap(textwrap.dedent(row[i]))
# Pad with spaces up to to fixed column width
for l in range(0, len(row[i])):
row[i][l] += (w - len(row[i][l])) * ' '
if len(row[i]) > longest:
longest = len(row[i])
# Pad all columns to have the same number of lines
for i in range(0, len(row)):
w=width_default # column width
# Only set column width dynamicaly for non-rogue rows
if i < len(widths):
w = widths[i]
if len(row[i]) < longest:
for j in range(len(row[i]), longest):
row[i].append(w * ' ')
for l in range(0,longest):
line = []
for c in range(len(row)):
line.append(row[c][l])
line = '| ' + ' | '.join(line) + ' |'
lines.append(line)
return lines | python | def wrap_row(self, widths, row, width_default=None):
"""Wraps a single line table row into a fixed width, multi-line table."""
lines = []
longest = 0 # longest wrapped column in row
if not width_default:
width_default = self.width_default
# Wrap column contents
for i in range(0, len(row)):
w=width_default # column width
# Only set column width dynamicaly for non-rogue rows
if i < len(widths):
w = widths[i]
tw = textwrap.TextWrapper(width=w, break_on_hyphens=False)
# Wrap and left-justify
row[i] = tw.wrap(textwrap.dedent(row[i]))
# Pad with spaces up to to fixed column width
for l in range(0, len(row[i])):
row[i][l] += (w - len(row[i][l])) * ' '
if len(row[i]) > longest:
longest = len(row[i])
# Pad all columns to have the same number of lines
for i in range(0, len(row)):
w=width_default # column width
# Only set column width dynamicaly for non-rogue rows
if i < len(widths):
w = widths[i]
if len(row[i]) < longest:
for j in range(len(row[i]), longest):
row[i].append(w * ' ')
for l in range(0,longest):
line = []
for c in range(len(row)):
line.append(row[c][l])
line = '| ' + ' | '.join(line) + ' |'
lines.append(line)
return lines | [
"def",
"wrap_row",
"(",
"self",
",",
"widths",
",",
"row",
",",
"width_default",
"=",
"None",
")",
":",
"lines",
"=",
"[",
"]",
"longest",
"=",
"0",
"# longest wrapped column in row",
"if",
"not",
"width_default",
":",
"width_default",
"=",
"self",
".",
"width_default",
"# Wrap column contents",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"row",
")",
")",
":",
"w",
"=",
"width_default",
"# column width",
"# Only set column width dynamicaly for non-rogue rows",
"if",
"i",
"<",
"len",
"(",
"widths",
")",
":",
"w",
"=",
"widths",
"[",
"i",
"]",
"tw",
"=",
"textwrap",
".",
"TextWrapper",
"(",
"width",
"=",
"w",
",",
"break_on_hyphens",
"=",
"False",
")",
"# Wrap and left-justify",
"row",
"[",
"i",
"]",
"=",
"tw",
".",
"wrap",
"(",
"textwrap",
".",
"dedent",
"(",
"row",
"[",
"i",
"]",
")",
")",
"# Pad with spaces up to to fixed column width",
"for",
"l",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"row",
"[",
"i",
"]",
")",
")",
":",
"row",
"[",
"i",
"]",
"[",
"l",
"]",
"+=",
"(",
"w",
"-",
"len",
"(",
"row",
"[",
"i",
"]",
"[",
"l",
"]",
")",
")",
"*",
"' '",
"if",
"len",
"(",
"row",
"[",
"i",
"]",
")",
">",
"longest",
":",
"longest",
"=",
"len",
"(",
"row",
"[",
"i",
"]",
")",
"# Pad all columns to have the same number of lines",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"row",
")",
")",
":",
"w",
"=",
"width_default",
"# column width",
"# Only set column width dynamicaly for non-rogue rows",
"if",
"i",
"<",
"len",
"(",
"widths",
")",
":",
"w",
"=",
"widths",
"[",
"i",
"]",
"if",
"len",
"(",
"row",
"[",
"i",
"]",
")",
"<",
"longest",
":",
"for",
"j",
"in",
"range",
"(",
"len",
"(",
"row",
"[",
"i",
"]",
")",
",",
"longest",
")",
":",
"row",
"[",
"i",
"]",
".",
"append",
"(",
"w",
"*",
"' '",
")",
"for",
"l",
"in",
"range",
"(",
"0",
",",
"longest",
")",
":",
"line",
"=",
"[",
"]",
"for",
"c",
"in",
"range",
"(",
"len",
"(",
"row",
")",
")",
":",
"line",
".",
"append",
"(",
"row",
"[",
"c",
"]",
"[",
"l",
"]",
")",
"line",
"=",
"'| '",
"+",
"' | '",
".",
"join",
"(",
"line",
")",
"+",
"' |'",
"lines",
".",
"append",
"(",
"line",
")",
"return",
"lines"
] | Wraps a single line table row into a fixed width, multi-line table. | [
"Wraps",
"a",
"single",
"line",
"table",
"row",
"into",
"a",
"fixed",
"width",
"multi",
"-",
"line",
"table",
"."
] | 11edfb90830325dca85bd0369bb8e2da8d6815b3 | https://github.com/jgrassler/mkdocs-pandoc/blob/11edfb90830325dca85bd0369bb8e2da8d6815b3/mkdocs_pandoc/filters/tables.py#L190-L234 | train | 238,498 |
mishbahr/djangocms-forms | djangocms_forms/admin.py | FormSubmissionAdmin.render_export_form | def render_export_form(self, request, context, form_url=''):
"""
Render the from submission export form.
"""
context.update({
'has_change_permission': self.has_change_permission(request),
'form_url': mark_safe(form_url),
'opts': self.opts,
'add': True,
'save_on_top': self.save_on_top,
})
return TemplateResponse(request, self.export_form_template, context) | python | def render_export_form(self, request, context, form_url=''):
"""
Render the from submission export form.
"""
context.update({
'has_change_permission': self.has_change_permission(request),
'form_url': mark_safe(form_url),
'opts': self.opts,
'add': True,
'save_on_top': self.save_on_top,
})
return TemplateResponse(request, self.export_form_template, context) | [
"def",
"render_export_form",
"(",
"self",
",",
"request",
",",
"context",
",",
"form_url",
"=",
"''",
")",
":",
"context",
".",
"update",
"(",
"{",
"'has_change_permission'",
":",
"self",
".",
"has_change_permission",
"(",
"request",
")",
",",
"'form_url'",
":",
"mark_safe",
"(",
"form_url",
")",
",",
"'opts'",
":",
"self",
".",
"opts",
",",
"'add'",
":",
"True",
",",
"'save_on_top'",
":",
"self",
".",
"save_on_top",
",",
"}",
")",
"return",
"TemplateResponse",
"(",
"request",
",",
"self",
".",
"export_form_template",
",",
"context",
")"
] | Render the from submission export form. | [
"Render",
"the",
"from",
"submission",
"export",
"form",
"."
] | 9d7a4ef9769fd5e1526921c084d6da7b8070a2c1 | https://github.com/mishbahr/djangocms-forms/blob/9d7a4ef9769fd5e1526921c084d6da7b8070a2c1/djangocms_forms/admin.py#L260-L272 | train | 238,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.