repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
eyurtsev/FlowCytometryTools | FlowCytometryTools/gui/fc_widget.py | BaseGate.get_generation_code | def get_generation_code(self, **gencode):
"""
Generates python code that can create the gate.
"""
channels, verts = self.coordinates
channels = ', '.join(["'{}'".format(ch) for ch in channels])
verts = list(verts)
## Formatting the vertexes
# List level (must be first), used for gates that may have multiple vertexes like a polygon
if len(verts) == 1:
verts = verts[0]
# Tuple level (must be second), used for catching the number of dimensions
# on which a vertex is defined
if len(verts) == 1:
verts = verts[0]
# Format vertices to include less sigfigs
verts = apply_format(verts, '{:.3e}')
gencode.setdefault('name', self.name)
gencode.setdefault('region', self.region)
gencode.setdefault('gate_type', self._gencode_gate_class)
gencode.setdefault('verts', verts)
gencode.setdefault('channels', channels)
format_string = "{name} = {gate_type}({verts}, ({channels}), region='{region}', name='{name}')"
return format_string.format(**gencode) | python | def get_generation_code(self, **gencode):
"""
Generates python code that can create the gate.
"""
channels, verts = self.coordinates
channels = ', '.join(["'{}'".format(ch) for ch in channels])
verts = list(verts)
## Formatting the vertexes
# List level (must be first), used for gates that may have multiple vertexes like a polygon
if len(verts) == 1:
verts = verts[0]
# Tuple level (must be second), used for catching the number of dimensions
# on which a vertex is defined
if len(verts) == 1:
verts = verts[0]
# Format vertices to include less sigfigs
verts = apply_format(verts, '{:.3e}')
gencode.setdefault('name', self.name)
gencode.setdefault('region', self.region)
gencode.setdefault('gate_type', self._gencode_gate_class)
gencode.setdefault('verts', verts)
gencode.setdefault('channels', channels)
format_string = "{name} = {gate_type}({verts}, ({channels}), region='{region}', name='{name}')"
return format_string.format(**gencode) | [
"def",
"get_generation_code",
"(",
"self",
",",
"*",
"*",
"gencode",
")",
":",
"channels",
",",
"verts",
"=",
"self",
".",
"coordinates",
"channels",
"=",
"', '",
".",
"join",
"(",
"[",
"\"'{}'\"",
".",
"format",
"(",
"ch",
")",
"for",
"ch",
"in",
"c... | Generates python code that can create the gate. | [
"Generates",
"python",
"code",
"that",
"can",
"create",
"the",
"gate",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/fc_widget.py#L347-L375 | train | 211,100 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/gui/fc_widget.py | BaseGate._gencode_gate_class | def _gencode_gate_class(self):
""" Returns the class name that generates this gate. """
channels, verts = self.coordinates
num_channels = len(channels)
gate_type_name = self.gate_type.__name__
if gate_type_name == 'ThresholdGate' and num_channels == 2:
gate_type_name = 'QuadGate'
return gate_type_name | python | def _gencode_gate_class(self):
""" Returns the class name that generates this gate. """
channels, verts = self.coordinates
num_channels = len(channels)
gate_type_name = self.gate_type.__name__
if gate_type_name == 'ThresholdGate' and num_channels == 2:
gate_type_name = 'QuadGate'
return gate_type_name | [
"def",
"_gencode_gate_class",
"(",
"self",
")",
":",
"channels",
",",
"verts",
"=",
"self",
".",
"coordinates",
"num_channels",
"=",
"len",
"(",
"channels",
")",
"gate_type_name",
"=",
"self",
".",
"gate_type",
".",
"__name__",
"if",
"gate_type_name",
"==",
... | Returns the class name that generates this gate. | [
"Returns",
"the",
"class",
"name",
"that",
"generates",
"this",
"gate",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/fc_widget.py#L378-L385 | train | 211,101 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/gui/fc_widget.py | BaseGate.source_channels | def source_channels(self):
""" Returns a set describing the source channels on which the gate is defined. """
source_channels = [v.coordinates.keys() for v in self.verts]
return set(itertools.chain(*source_channels)) | python | def source_channels(self):
""" Returns a set describing the source channels on which the gate is defined. """
source_channels = [v.coordinates.keys() for v in self.verts]
return set(itertools.chain(*source_channels)) | [
"def",
"source_channels",
"(",
"self",
")",
":",
"source_channels",
"=",
"[",
"v",
".",
"coordinates",
".",
"keys",
"(",
")",
"for",
"v",
"in",
"self",
".",
"verts",
"]",
"return",
"set",
"(",
"itertools",
".",
"chain",
"(",
"*",
"source_channels",
")"... | Returns a set describing the source channels on which the gate is defined. | [
"Returns",
"a",
"set",
"describing",
"the",
"source",
"channels",
"on",
"which",
"the",
"gate",
"is",
"defined",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/fc_widget.py#L393-L396 | train | 211,102 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/gui/fc_widget.py | FCGateManager.pick_event_handler | def pick_event_handler(self, event):
""" Handles pick events """
info = {'options': self.get_available_channels(),
'guiEvent': event.mouseevent.guiEvent,
}
if hasattr(self, 'xlabel_artist') and (event.artist == self.xlabel_artist):
info['axis_num'] = 0
self.callback(Event('axis_click', info))
if hasattr(self, 'ylabel_artist') and (event.artist == self.ylabel_artist):
info['axis_num'] = 1
self.callback(Event('axis_click', info)) | python | def pick_event_handler(self, event):
""" Handles pick events """
info = {'options': self.get_available_channels(),
'guiEvent': event.mouseevent.guiEvent,
}
if hasattr(self, 'xlabel_artist') and (event.artist == self.xlabel_artist):
info['axis_num'] = 0
self.callback(Event('axis_click', info))
if hasattr(self, 'ylabel_artist') and (event.artist == self.ylabel_artist):
info['axis_num'] = 1
self.callback(Event('axis_click', info)) | [
"def",
"pick_event_handler",
"(",
"self",
",",
"event",
")",
":",
"info",
"=",
"{",
"'options'",
":",
"self",
".",
"get_available_channels",
"(",
")",
",",
"'guiEvent'",
":",
"event",
".",
"mouseevent",
".",
"guiEvent",
",",
"}",
"if",
"hasattr",
"(",
"s... | Handles pick events | [
"Handles",
"pick",
"events"
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/fc_widget.py#L603-L615 | train | 211,103 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/gui/fc_widget.py | FCGateManager.plot_data | def plot_data(self):
"""Plots the loaded data"""
# Clear the plot before plotting onto it
self.ax.cla()
if self.sample is None:
return
if self.current_channels is None:
self.current_channels = self.sample.channel_names[:2]
channels = self.current_channels
channels_to_plot = channels[0] if len(channels) == 1 else channels
self.sample.plot(channels_to_plot, ax=self.ax)
xaxis = self.ax.get_xaxis()
yaxis = self.ax.get_yaxis()
self.xlabel_artist = xaxis.get_label()
self.ylabel_artist = yaxis.get_label()
self.xlabel_artist.set_picker(5)
self.ylabel_artist.set_picker(5)
self.fig.canvas.draw() | python | def plot_data(self):
"""Plots the loaded data"""
# Clear the plot before plotting onto it
self.ax.cla()
if self.sample is None:
return
if self.current_channels is None:
self.current_channels = self.sample.channel_names[:2]
channels = self.current_channels
channels_to_plot = channels[0] if len(channels) == 1 else channels
self.sample.plot(channels_to_plot, ax=self.ax)
xaxis = self.ax.get_xaxis()
yaxis = self.ax.get_yaxis()
self.xlabel_artist = xaxis.get_label()
self.ylabel_artist = yaxis.get_label()
self.xlabel_artist.set_picker(5)
self.ylabel_artist.set_picker(5)
self.fig.canvas.draw() | [
"def",
"plot_data",
"(",
"self",
")",
":",
"# Clear the plot before plotting onto it",
"self",
".",
"ax",
".",
"cla",
"(",
")",
"if",
"self",
".",
"sample",
"is",
"None",
":",
"return",
"if",
"self",
".",
"current_channels",
"is",
"None",
":",
"self",
".",... | Plots the loaded data | [
"Plots",
"the",
"loaded",
"data"
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/fc_widget.py#L784-L806 | train | 211,104 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/gui/fc_widget.py | FCGateManager.get_generation_code | def get_generation_code(self):
"""Return python code that generates all drawn gates."""
if len(self.gates) < 1:
code = ''
else:
import_list = set([gate._gencode_gate_class for gate in self.gates])
import_list = 'from FlowCytometryTools import ' + ', '.join(import_list)
code_list = [gate.get_generation_code() for gate in self.gates]
code_list.sort()
code_list = '\n'.join(code_list)
code = import_list + 2 * '\n' + code_list
self.callback(Event('generated_code',
{'code': code}))
return code | python | def get_generation_code(self):
"""Return python code that generates all drawn gates."""
if len(self.gates) < 1:
code = ''
else:
import_list = set([gate._gencode_gate_class for gate in self.gates])
import_list = 'from FlowCytometryTools import ' + ', '.join(import_list)
code_list = [gate.get_generation_code() for gate in self.gates]
code_list.sort()
code_list = '\n'.join(code_list)
code = import_list + 2 * '\n' + code_list
self.callback(Event('generated_code',
{'code': code}))
return code | [
"def",
"get_generation_code",
"(",
"self",
")",
":",
"if",
"len",
"(",
"self",
".",
"gates",
")",
"<",
"1",
":",
"code",
"=",
"''",
"else",
":",
"import_list",
"=",
"set",
"(",
"[",
"gate",
".",
"_gencode_gate_class",
"for",
"gate",
"in",
"self",
"."... | Return python code that generates all drawn gates. | [
"Return",
"python",
"code",
"that",
"generates",
"all",
"drawn",
"gates",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/gui/fc_widget.py#L808-L822 | train | 211,105 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/docstring.py | DocReplacer.replace | def replace(self):
"""Reformat values inside the self.doc_dict using self.doc_dict
TODO: Make support for partial_formatting
"""
doc_dict = self.doc_dict.copy()
for k, v in doc_dict.items():
if '{' and '}' in v:
self.doc_dict[k] = v.format(**doc_dict) | python | def replace(self):
"""Reformat values inside the self.doc_dict using self.doc_dict
TODO: Make support for partial_formatting
"""
doc_dict = self.doc_dict.copy()
for k, v in doc_dict.items():
if '{' and '}' in v:
self.doc_dict[k] = v.format(**doc_dict) | [
"def",
"replace",
"(",
"self",
")",
":",
"doc_dict",
"=",
"self",
".",
"doc_dict",
".",
"copy",
"(",
")",
"for",
"k",
",",
"v",
"in",
"doc_dict",
".",
"items",
"(",
")",
":",
"if",
"'{'",
"and",
"'}'",
"in",
"v",
":",
"self",
".",
"doc_dict",
"... | Reformat values inside the self.doc_dict using self.doc_dict
TODO: Make support for partial_formatting | [
"Reformat",
"values",
"inside",
"the",
"self",
".",
"doc_dict",
"using",
"self",
".",
"doc_dict"
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/docstring.py#L60-L68 | train | 211,106 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/docstring.py | DocReplacer._format | def _format(self, doc):
""" Formats the docstring using self.doc_dict """
if self.allow_partial_formatting:
mapping = FormatDict(self.doc_dict)
else:
mapping = self.doc_dict
formatter = string.Formatter()
return formatter.vformat(doc, (), mapping) | python | def _format(self, doc):
""" Formats the docstring using self.doc_dict """
if self.allow_partial_formatting:
mapping = FormatDict(self.doc_dict)
else:
mapping = self.doc_dict
formatter = string.Formatter()
return formatter.vformat(doc, (), mapping) | [
"def",
"_format",
"(",
"self",
",",
"doc",
")",
":",
"if",
"self",
".",
"allow_partial_formatting",
":",
"mapping",
"=",
"FormatDict",
"(",
"self",
".",
"doc_dict",
")",
"else",
":",
"mapping",
"=",
"self",
".",
"doc_dict",
"formatter",
"=",
"string",
".... | Formats the docstring using self.doc_dict | [
"Formats",
"the",
"docstring",
"using",
"self",
".",
"doc_dict"
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/docstring.py#L74-L81 | train | 211,107 |
eyurtsev/FlowCytometryTools | setup.py | get_package_version | def get_package_version(path):
'''Extracts the version'''
with open(VERSION_FILE, "rt") as f:
verstrline = f.read()
VERSION = r"^version = ['\"]([^'\"]*)['\"]"
results = re.search(VERSION, verstrline, re.M)
if results:
version = results.group(1)
else:
raise RuntimeError("Unable to find version string in {}.".format(path))
return version | python | def get_package_version(path):
'''Extracts the version'''
with open(VERSION_FILE, "rt") as f:
verstrline = f.read()
VERSION = r"^version = ['\"]([^'\"]*)['\"]"
results = re.search(VERSION, verstrline, re.M)
if results:
version = results.group(1)
else:
raise RuntimeError("Unable to find version string in {}.".format(path))
return version | [
"def",
"get_package_version",
"(",
"path",
")",
":",
"with",
"open",
"(",
"VERSION_FILE",
",",
"\"rt\"",
")",
"as",
"f",
":",
"verstrline",
"=",
"f",
".",
"read",
"(",
")",
"VERSION",
"=",
"r\"^version = ['\\\"]([^'\\\"]*)['\\\"]\"",
"results",
"=",
"re",
".... | Extracts the version | [
"Extracts",
"the",
"version"
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/setup.py#L13-L26 | train | 211,108 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | _assign_IDS_to_datafiles | def _assign_IDS_to_datafiles(datafiles, parser, measurement_class=None, **kwargs):
"""
Assign measurement IDS to datafiles using specified parser.
Parameters
----------
datafiles : iterable of str
Path to datafiles. An ID will be assigned to each.
Note that this function does not check for uniqueness of IDs!
{_bases_filename_parser}
measurement_class: object
Used to create a temporary object when reading the ID from the datafile.
The measurement class needs to have an `ID_from_data` method.
Only used when parser='read'.
kwargs: dict
Additional parameters to be passed to parser is it is a callable, or 'read'.
If parser is 'read', kwargs are passed to the measurement class's `ID_from_data` method.
Returns
-------
Dict of ID:datafile
"""
if isinstance(parser, collections.Mapping):
fparse = lambda x: parser[x]
elif hasattr(parser, '__call__'):
fparse = lambda x: parser(x, **kwargs)
elif parser == 'name':
kwargs.setdefault('pre', 'Well_')
kwargs.setdefault('post', ['_', '\.', '$'])
kwargs.setdefault('tagtype', str)
fparse = lambda x: get_tag_value(os.path.basename(x), **kwargs)
elif parser == 'number':
fparse = lambda x: int(x.split('.')[-2])
elif parser == 'read':
fparse = lambda x: measurement_class(ID='temporary', datafile=x).ID_from_data(**kwargs)
else:
raise ValueError('Encountered unsupported value "%s" for parser parameter.' % parser)
d = dict((fparse(dfile), dfile) for dfile in datafiles)
return d | python | def _assign_IDS_to_datafiles(datafiles, parser, measurement_class=None, **kwargs):
"""
Assign measurement IDS to datafiles using specified parser.
Parameters
----------
datafiles : iterable of str
Path to datafiles. An ID will be assigned to each.
Note that this function does not check for uniqueness of IDs!
{_bases_filename_parser}
measurement_class: object
Used to create a temporary object when reading the ID from the datafile.
The measurement class needs to have an `ID_from_data` method.
Only used when parser='read'.
kwargs: dict
Additional parameters to be passed to parser is it is a callable, or 'read'.
If parser is 'read', kwargs are passed to the measurement class's `ID_from_data` method.
Returns
-------
Dict of ID:datafile
"""
if isinstance(parser, collections.Mapping):
fparse = lambda x: parser[x]
elif hasattr(parser, '__call__'):
fparse = lambda x: parser(x, **kwargs)
elif parser == 'name':
kwargs.setdefault('pre', 'Well_')
kwargs.setdefault('post', ['_', '\.', '$'])
kwargs.setdefault('tagtype', str)
fparse = lambda x: get_tag_value(os.path.basename(x), **kwargs)
elif parser == 'number':
fparse = lambda x: int(x.split('.')[-2])
elif parser == 'read':
fparse = lambda x: measurement_class(ID='temporary', datafile=x).ID_from_data(**kwargs)
else:
raise ValueError('Encountered unsupported value "%s" for parser parameter.' % parser)
d = dict((fparse(dfile), dfile) for dfile in datafiles)
return d | [
"def",
"_assign_IDS_to_datafiles",
"(",
"datafiles",
",",
"parser",
",",
"measurement_class",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"parser",
",",
"collections",
".",
"Mapping",
")",
":",
"fparse",
"=",
"lambda",
"x",
":"... | Assign measurement IDS to datafiles using specified parser.
Parameters
----------
datafiles : iterable of str
Path to datafiles. An ID will be assigned to each.
Note that this function does not check for uniqueness of IDs!
{_bases_filename_parser}
measurement_class: object
Used to create a temporary object when reading the ID from the datafile.
The measurement class needs to have an `ID_from_data` method.
Only used when parser='read'.
kwargs: dict
Additional parameters to be passed to parser is it is a callable, or 'read'.
If parser is 'read', kwargs are passed to the measurement class's `ID_from_data` method.
Returns
-------
Dict of ID:datafile | [
"Assign",
"measurement",
"IDS",
"to",
"datafiles",
"using",
"specified",
"parser",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L30-L68 | train | 211,109 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | Measurement.set_data | def set_data(self, data=None, **kwargs):
'''
Read data into memory, applying all actions in queue.
Additionally, update queue and history.
'''
if data is None:
data = self.get_data(**kwargs)
setattr(self, '_data', data)
self.history += self.queue
self.queue = [] | python | def set_data(self, data=None, **kwargs):
'''
Read data into memory, applying all actions in queue.
Additionally, update queue and history.
'''
if data is None:
data = self.get_data(**kwargs)
setattr(self, '_data', data)
self.history += self.queue
self.queue = [] | [
"def",
"set_data",
"(",
"self",
",",
"data",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"data",
"is",
"None",
":",
"data",
"=",
"self",
".",
"get_data",
"(",
"*",
"*",
"kwargs",
")",
"setattr",
"(",
"self",
",",
"'_data'",
",",
"data",... | Read data into memory, applying all actions in queue.
Additionally, update queue and history. | [
"Read",
"data",
"into",
"memory",
"applying",
"all",
"actions",
"in",
"queue",
".",
"Additionally",
"update",
"queue",
"and",
"history",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L246-L255 | train | 211,110 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | Measurement.set_meta | def set_meta(self, meta=None, **kwargs):
'''
Assign values to self.meta.
Meta is not returned
'''
if meta is None:
meta = self.get_meta(**kwargs)
setattr(self, '_meta', meta) | python | def set_meta(self, meta=None, **kwargs):
'''
Assign values to self.meta.
Meta is not returned
'''
if meta is None:
meta = self.get_meta(**kwargs)
setattr(self, '_meta', meta) | [
"def",
"set_meta",
"(",
"self",
",",
"meta",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"meta",
"is",
"None",
":",
"meta",
"=",
"self",
".",
"get_meta",
"(",
"*",
"*",
"kwargs",
")",
"setattr",
"(",
"self",
",",
"'_meta'",
",",
"meta",... | Assign values to self.meta.
Meta is not returned | [
"Assign",
"values",
"to",
"self",
".",
"meta",
".",
"Meta",
"is",
"not",
"returned"
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L257-L264 | train | 211,111 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | Measurement.get_data | def get_data(self, **kwargs):
'''
Get the measurement data.
If data is not set, read from 'self.datafile' using 'self.read_data'.
'''
if self.queue:
new = self.apply_queued()
return new.get_data()
else:
return self._get_attr_from_file('data', **kwargs) | python | def get_data(self, **kwargs):
'''
Get the measurement data.
If data is not set, read from 'self.datafile' using 'self.read_data'.
'''
if self.queue:
new = self.apply_queued()
return new.get_data()
else:
return self._get_attr_from_file('data', **kwargs) | [
"def",
"get_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"queue",
":",
"new",
"=",
"self",
".",
"apply_queued",
"(",
")",
"return",
"new",
".",
"get_data",
"(",
")",
"else",
":",
"return",
"self",
".",
"_get_attr_from_fil... | Get the measurement data.
If data is not set, read from 'self.datafile' using 'self.read_data'. | [
"Get",
"the",
"measurement",
"data",
".",
"If",
"data",
"is",
"not",
"set",
"read",
"from",
"self",
".",
"datafile",
"using",
"self",
".",
"read_data",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L287-L296 | train | 211,112 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | Measurement.apply | def apply(self, func, applyto='measurement', noneval=nan, setdata=False):
"""
Apply func either to self or to associated data.
If data is not already parsed, try and read it.
Parameters
----------
func : callable
The function either accepts a measurement object or an FCS object.
Does some calculation and returns the result.
applyto : ['data' | 'measurement']
* 'data' : apply to associated data
* 'measurement' : apply to measurement object itself.
noneval : obj
Value to return if `applyto` is 'data', but no data is available.
setdata : bool
Used only if data is not already set.
If true parsed data will be assigned to self.data
Otherwise data will be discarded at end of apply.
"""
applyto = applyto.lower()
if applyto == 'data':
if self.data is not None:
data = self.data
elif self.datafile is None:
return noneval
else:
data = self.read_data()
if setdata:
self.data = data
return func(data)
elif applyto == 'measurement':
return func(self)
else:
raise ValueError('Encountered unsupported value "%s" for applyto parameter.' % applyto) | python | def apply(self, func, applyto='measurement', noneval=nan, setdata=False):
"""
Apply func either to self or to associated data.
If data is not already parsed, try and read it.
Parameters
----------
func : callable
The function either accepts a measurement object or an FCS object.
Does some calculation and returns the result.
applyto : ['data' | 'measurement']
* 'data' : apply to associated data
* 'measurement' : apply to measurement object itself.
noneval : obj
Value to return if `applyto` is 'data', but no data is available.
setdata : bool
Used only if data is not already set.
If true parsed data will be assigned to self.data
Otherwise data will be discarded at end of apply.
"""
applyto = applyto.lower()
if applyto == 'data':
if self.data is not None:
data = self.data
elif self.datafile is None:
return noneval
else:
data = self.read_data()
if setdata:
self.data = data
return func(data)
elif applyto == 'measurement':
return func(self)
else:
raise ValueError('Encountered unsupported value "%s" for applyto parameter.' % applyto) | [
"def",
"apply",
"(",
"self",
",",
"func",
",",
"applyto",
"=",
"'measurement'",
",",
"noneval",
"=",
"nan",
",",
"setdata",
"=",
"False",
")",
":",
"applyto",
"=",
"applyto",
".",
"lower",
"(",
")",
"if",
"applyto",
"==",
"'data'",
":",
"if",
"self",... | Apply func either to self or to associated data.
If data is not already parsed, try and read it.
Parameters
----------
func : callable
The function either accepts a measurement object or an FCS object.
Does some calculation and returns the result.
applyto : ['data' | 'measurement']
* 'data' : apply to associated data
* 'measurement' : apply to measurement object itself.
noneval : obj
Value to return if `applyto` is 'data', but no data is available.
setdata : bool
Used only if data is not already set.
If true parsed data will be assigned to self.data
Otherwise data will be discarded at end of apply. | [
"Apply",
"func",
"either",
"to",
"self",
"or",
"to",
"associated",
"data",
".",
"If",
"data",
"is",
"not",
"already",
"parsed",
"try",
"and",
"read",
"it",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L327-L361 | train | 211,113 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | MeasurementCollection.from_files | def from_files(cls, ID, datafiles, parser, readdata_kwargs={}, readmeta_kwargs={}, **ID_kwargs):
"""
Create a Collection of measurements from a set of data files.
Parameters
----------
{_bases_ID}
{_bases_data_files}
{_bases_filename_parser}
{_bases_ID_kwargs}
"""
d = _assign_IDS_to_datafiles(datafiles, parser, cls._measurement_class, **ID_kwargs)
measurements = []
for sID, dfile in d.items():
try:
measurements.append(cls._measurement_class(sID, datafile=dfile,
readdata_kwargs=readdata_kwargs,
readmeta_kwargs=readmeta_kwargs))
except:
msg = 'Error occurred while trying to parse file: %s' % dfile
raise IOError(msg)
return cls(ID, measurements) | python | def from_files(cls, ID, datafiles, parser, readdata_kwargs={}, readmeta_kwargs={}, **ID_kwargs):
"""
Create a Collection of measurements from a set of data files.
Parameters
----------
{_bases_ID}
{_bases_data_files}
{_bases_filename_parser}
{_bases_ID_kwargs}
"""
d = _assign_IDS_to_datafiles(datafiles, parser, cls._measurement_class, **ID_kwargs)
measurements = []
for sID, dfile in d.items():
try:
measurements.append(cls._measurement_class(sID, datafile=dfile,
readdata_kwargs=readdata_kwargs,
readmeta_kwargs=readmeta_kwargs))
except:
msg = 'Error occurred while trying to parse file: %s' % dfile
raise IOError(msg)
return cls(ID, measurements) | [
"def",
"from_files",
"(",
"cls",
",",
"ID",
",",
"datafiles",
",",
"parser",
",",
"readdata_kwargs",
"=",
"{",
"}",
",",
"readmeta_kwargs",
"=",
"{",
"}",
",",
"*",
"*",
"ID_kwargs",
")",
":",
"d",
"=",
"_assign_IDS_to_datafiles",
"(",
"datafiles",
",",
... | Create a Collection of measurements from a set of data files.
Parameters
----------
{_bases_ID}
{_bases_data_files}
{_bases_filename_parser}
{_bases_ID_kwargs} | [
"Create",
"a",
"Collection",
"of",
"measurements",
"from",
"a",
"set",
"of",
"data",
"files",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L399-L420 | train | 211,114 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | MeasurementCollection.filter | def filter(self, criteria, applyto='measurement', ID=None):
"""
Filter measurements according to given criteria.
Retain only Measurements for which criteria returns True.
TODO: add support for multiple criteria
Parameters
----------
criteria : callable
Returns bool.
applyto : 'measurement' | 'keys' | 'data' | mapping
'measurement' : criteria is applied to Measurement objects
'keys' : criteria is applied to the keys.
'data' : criteria is applied to the Measurement objects' data.
mapping : for each key criteria is applied to mapping value with same key.
ID : str
ID of the filtered collection.
If None is given, append '.filterd' to the current sample ID.
Returns
-------
Filtered Collection.
"""
fil = criteria
new = self.copy()
if isinstance(applyto, collections.Mapping):
remove = (k for k, v in self.items() if not fil(applyto[k]))
elif applyto == 'measurement':
remove = (k for k, v in self.items() if not fil(v))
elif applyto == 'keys':
remove = (k for k, v in self.items() if not fil(k))
elif applyto == 'data':
remove = (k for k, v in self.items() if not fil(v.get_data()))
else:
raise ValueError('Unsupported value "%s" for applyto parameter.' % applyto)
for r in remove:
del new[r]
if ID is None:
ID = self.ID
new.ID = ID
return new | python | def filter(self, criteria, applyto='measurement', ID=None):
"""
Filter measurements according to given criteria.
Retain only Measurements for which criteria returns True.
TODO: add support for multiple criteria
Parameters
----------
criteria : callable
Returns bool.
applyto : 'measurement' | 'keys' | 'data' | mapping
'measurement' : criteria is applied to Measurement objects
'keys' : criteria is applied to the keys.
'data' : criteria is applied to the Measurement objects' data.
mapping : for each key criteria is applied to mapping value with same key.
ID : str
ID of the filtered collection.
If None is given, append '.filterd' to the current sample ID.
Returns
-------
Filtered Collection.
"""
fil = criteria
new = self.copy()
if isinstance(applyto, collections.Mapping):
remove = (k for k, v in self.items() if not fil(applyto[k]))
elif applyto == 'measurement':
remove = (k for k, v in self.items() if not fil(v))
elif applyto == 'keys':
remove = (k for k, v in self.items() if not fil(k))
elif applyto == 'data':
remove = (k for k, v in self.items() if not fil(v.get_data()))
else:
raise ValueError('Unsupported value "%s" for applyto parameter.' % applyto)
for r in remove:
del new[r]
if ID is None:
ID = self.ID
new.ID = ID
return new | [
"def",
"filter",
"(",
"self",
",",
"criteria",
",",
"applyto",
"=",
"'measurement'",
",",
"ID",
"=",
"None",
")",
":",
"fil",
"=",
"criteria",
"new",
"=",
"self",
".",
"copy",
"(",
")",
"if",
"isinstance",
"(",
"applyto",
",",
"collections",
".",
"Ma... | Filter measurements according to given criteria.
Retain only Measurements for which criteria returns True.
TODO: add support for multiple criteria
Parameters
----------
criteria : callable
Returns bool.
applyto : 'measurement' | 'keys' | 'data' | mapping
'measurement' : criteria is applied to Measurement objects
'keys' : criteria is applied to the keys.
'data' : criteria is applied to the Measurement objects' data.
mapping : for each key criteria is applied to mapping value with same key.
ID : str
ID of the filtered collection.
If None is given, append '.filterd' to the current sample ID.
Returns
-------
Filtered Collection. | [
"Filter",
"measurements",
"according",
"to",
"given",
"criteria",
".",
"Retain",
"only",
"Measurements",
"for",
"which",
"criteria",
"returns",
"True",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L599-L640 | train | 211,115 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | MeasurementCollection.filter_by_key | def filter_by_key(self, keys, ID=None):
"""
Keep only Measurements with given keys.
"""
keys = to_list(keys)
fil = lambda x: x in keys
if ID is None:
ID = self.ID
return self.filter(fil, applyto='keys', ID=ID) | python | def filter_by_key(self, keys, ID=None):
"""
Keep only Measurements with given keys.
"""
keys = to_list(keys)
fil = lambda x: x in keys
if ID is None:
ID = self.ID
return self.filter(fil, applyto='keys', ID=ID) | [
"def",
"filter_by_key",
"(",
"self",
",",
"keys",
",",
"ID",
"=",
"None",
")",
":",
"keys",
"=",
"to_list",
"(",
"keys",
")",
"fil",
"=",
"lambda",
"x",
":",
"x",
"in",
"keys",
"if",
"ID",
"is",
"None",
":",
"ID",
"=",
"self",
".",
"ID",
"retur... | Keep only Measurements with given keys. | [
"Keep",
"only",
"Measurements",
"with",
"given",
"keys",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L642-L650 | train | 211,116 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | MeasurementCollection.filter_by_IDs | def filter_by_IDs(self, ids, ID=None):
"""
Keep only Measurements with given IDs.
"""
fil = lambda x: x in ids
return self.filter_by_attr('ID', fil, ID) | python | def filter_by_IDs(self, ids, ID=None):
"""
Keep only Measurements with given IDs.
"""
fil = lambda x: x in ids
return self.filter_by_attr('ID', fil, ID) | [
"def",
"filter_by_IDs",
"(",
"self",
",",
"ids",
",",
"ID",
"=",
"None",
")",
":",
"fil",
"=",
"lambda",
"x",
":",
"x",
"in",
"ids",
"return",
"self",
".",
"filter_by_attr",
"(",
"'ID'",
",",
"fil",
",",
"ID",
")"
] | Keep only Measurements with given IDs. | [
"Keep",
"only",
"Measurements",
"with",
"given",
"IDs",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L658-L663 | train | 211,117 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | MeasurementCollection.filter_by_rows | def filter_by_rows(self, rows, ID=None):
"""
Keep only Measurements in corresponding rows.
"""
rows = to_list(rows)
fil = lambda x: x in rows
applyto = {k: self._positions[k][0] for k in self.keys()}
if ID is None:
ID = self.ID
return self.filter(fil, applyto=applyto, ID=ID) | python | def filter_by_rows(self, rows, ID=None):
"""
Keep only Measurements in corresponding rows.
"""
rows = to_list(rows)
fil = lambda x: x in rows
applyto = {k: self._positions[k][0] for k in self.keys()}
if ID is None:
ID = self.ID
return self.filter(fil, applyto=applyto, ID=ID) | [
"def",
"filter_by_rows",
"(",
"self",
",",
"rows",
",",
"ID",
"=",
"None",
")",
":",
"rows",
"=",
"to_list",
"(",
"rows",
")",
"fil",
"=",
"lambda",
"x",
":",
"x",
"in",
"rows",
"applyto",
"=",
"{",
"k",
":",
"self",
".",
"_positions",
"[",
"k",
... | Keep only Measurements in corresponding rows. | [
"Keep",
"only",
"Measurements",
"in",
"corresponding",
"rows",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L668-L677 | train | 211,118 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | MeasurementCollection.filter_by_cols | def filter_by_cols(self, cols, ID=None):
"""
Keep only Measurements in corresponding columns.
"""
rows = to_list(cols)
fil = lambda x: x in rows
applyto = {k: self._positions[k][1] for k in self.keys()}
if ID is None:
ID = self.ID + '.filtered_by_cols'
return self.filter(fil, applyto=applyto, ID=ID) | python | def filter_by_cols(self, cols, ID=None):
"""
Keep only Measurements in corresponding columns.
"""
rows = to_list(cols)
fil = lambda x: x in rows
applyto = {k: self._positions[k][1] for k in self.keys()}
if ID is None:
ID = self.ID + '.filtered_by_cols'
return self.filter(fil, applyto=applyto, ID=ID) | [
"def",
"filter_by_cols",
"(",
"self",
",",
"cols",
",",
"ID",
"=",
"None",
")",
":",
"rows",
"=",
"to_list",
"(",
"cols",
")",
"fil",
"=",
"lambda",
"x",
":",
"x",
"in",
"rows",
"applyto",
"=",
"{",
"k",
":",
"self",
".",
"_positions",
"[",
"k",
... | Keep only Measurements in corresponding columns. | [
"Keep",
"only",
"Measurements",
"in",
"corresponding",
"columns",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L679-L688 | train | 211,119 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | OrderedCollection.from_files | def from_files(cls, ID, datafiles, parser='name',
position_mapper=None,
readdata_kwargs={}, readmeta_kwargs={}, ID_kwargs={}, **kwargs):
"""
Create an OrderedCollection of measurements from a set of data files.
Parameters
----------
{_bases_ID}
{_bases_data_files}
{_bases_filename_parser}
{_bases_position_mapper}
{_bases_ID_kwargs}
kwargs : dict
Additional key word arguments to be passed to constructor.
"""
if position_mapper is None:
if isinstance(parser, six.string_types):
position_mapper = parser
else:
msg = "When using a custom parser, you must specify the position_mapper keyword."
raise ValueError(msg)
d = _assign_IDS_to_datafiles(datafiles, parser, cls._measurement_class, **ID_kwargs)
measurements = []
for sID, dfile in d.items():
try:
measurements.append(cls._measurement_class(sID, datafile=dfile,
readdata_kwargs=readdata_kwargs,
readmeta_kwargs=readmeta_kwargs))
except:
msg = 'Error occured while trying to parse file: %s' % dfile
raise IOError(msg)
return cls(ID, measurements, position_mapper, **kwargs) | python | def from_files(cls, ID, datafiles, parser='name',
position_mapper=None,
readdata_kwargs={}, readmeta_kwargs={}, ID_kwargs={}, **kwargs):
"""
Create an OrderedCollection of measurements from a set of data files.
Parameters
----------
{_bases_ID}
{_bases_data_files}
{_bases_filename_parser}
{_bases_position_mapper}
{_bases_ID_kwargs}
kwargs : dict
Additional key word arguments to be passed to constructor.
"""
if position_mapper is None:
if isinstance(parser, six.string_types):
position_mapper = parser
else:
msg = "When using a custom parser, you must specify the position_mapper keyword."
raise ValueError(msg)
d = _assign_IDS_to_datafiles(datafiles, parser, cls._measurement_class, **ID_kwargs)
measurements = []
for sID, dfile in d.items():
try:
measurements.append(cls._measurement_class(sID, datafile=dfile,
readdata_kwargs=readdata_kwargs,
readmeta_kwargs=readmeta_kwargs))
except:
msg = 'Error occured while trying to parse file: %s' % dfile
raise IOError(msg)
return cls(ID, measurements, position_mapper, **kwargs) | [
"def",
"from_files",
"(",
"cls",
",",
"ID",
",",
"datafiles",
",",
"parser",
"=",
"'name'",
",",
"position_mapper",
"=",
"None",
",",
"readdata_kwargs",
"=",
"{",
"}",
",",
"readmeta_kwargs",
"=",
"{",
"}",
",",
"ID_kwargs",
"=",
"{",
"}",
",",
"*",
... | Create an OrderedCollection of measurements from a set of data files.
Parameters
----------
{_bases_ID}
{_bases_data_files}
{_bases_filename_parser}
{_bases_position_mapper}
{_bases_ID_kwargs}
kwargs : dict
Additional key word arguments to be passed to constructor. | [
"Create",
"an",
"OrderedCollection",
"of",
"measurements",
"from",
"a",
"set",
"of",
"data",
"files",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L751-L783 | train | 211,120 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | OrderedCollection._is_valid_position | def _is_valid_position(self, position):
'''
check if given position is valid for this collection
'''
row, col = position
valid_r = row in self.row_labels
valid_c = col in self.col_labels
return valid_r and valid_c | python | def _is_valid_position(self, position):
'''
check if given position is valid for this collection
'''
row, col = position
valid_r = row in self.row_labels
valid_c = col in self.col_labels
return valid_r and valid_c | [
"def",
"_is_valid_position",
"(",
"self",
",",
"position",
")",
":",
"row",
",",
"col",
"=",
"position",
"valid_r",
"=",
"row",
"in",
"self",
".",
"row_labels",
"valid_c",
"=",
"col",
"in",
"self",
".",
"col_labels",
"return",
"valid_r",
"and",
"valid_c"
] | check if given position is valid for this collection | [
"check",
"if",
"given",
"position",
"is",
"valid",
"for",
"this",
"collection"
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L838-L845 | train | 211,121 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | OrderedCollection._get_ID2position_mapper | def _get_ID2position_mapper(self, position_mapper):
'''
Defines a position parser that is used
to map between sample IDs and positions.
Parameters
--------------
{_bases_position_mapper}
TODO: Fix the name to work with more than 26 letters
of the alphabet.
'''
def num_parser(x, order):
i, j = unravel_index(int(x - 1), self.shape, order=order)
return (self.row_labels[i], self.col_labels[j])
if hasattr(position_mapper, '__call__'):
mapper = position_mapper
elif isinstance(position_mapper, collections.Mapping):
mapper = lambda x: position_mapper[x]
elif position_mapper == 'name':
mapper = lambda x: (x[0], int(x[1:]))
elif position_mapper in ('row_first_enumerator', 'number'):
mapper = lambda x: num_parser(x, 'F')
elif position_mapper == 'col_first_enumerator':
mapper = lambda x: num_parser(x, 'C')
else:
msg = '"{}" is not a known key_to_position_parser.'.format(position_mapper)
raise ValueError(msg)
return mapper | python | def _get_ID2position_mapper(self, position_mapper):
'''
Defines a position parser that is used
to map between sample IDs and positions.
Parameters
--------------
{_bases_position_mapper}
TODO: Fix the name to work with more than 26 letters
of the alphabet.
'''
def num_parser(x, order):
i, j = unravel_index(int(x - 1), self.shape, order=order)
return (self.row_labels[i], self.col_labels[j])
if hasattr(position_mapper, '__call__'):
mapper = position_mapper
elif isinstance(position_mapper, collections.Mapping):
mapper = lambda x: position_mapper[x]
elif position_mapper == 'name':
mapper = lambda x: (x[0], int(x[1:]))
elif position_mapper in ('row_first_enumerator', 'number'):
mapper = lambda x: num_parser(x, 'F')
elif position_mapper == 'col_first_enumerator':
mapper = lambda x: num_parser(x, 'C')
else:
msg = '"{}" is not a known key_to_position_parser.'.format(position_mapper)
raise ValueError(msg)
return mapper | [
"def",
"_get_ID2position_mapper",
"(",
"self",
",",
"position_mapper",
")",
":",
"def",
"num_parser",
"(",
"x",
",",
"order",
")",
":",
"i",
",",
"j",
"=",
"unravel_index",
"(",
"int",
"(",
"x",
"-",
"1",
")",
",",
"self",
".",
"shape",
",",
"order",... | Defines a position parser that is used
to map between sample IDs and positions.
Parameters
--------------
{_bases_position_mapper}
TODO: Fix the name to work with more than 26 letters
of the alphabet. | [
"Defines",
"a",
"position",
"parser",
"that",
"is",
"used",
"to",
"map",
"between",
"sample",
"IDs",
"and",
"positions",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L848-L878 | train | 211,122 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | OrderedCollection.set_positions | def set_positions(self, positions=None, position_mapper='name', ids=None):
'''
checks for position validity & collisions,
but not that all measurements are assigned.
Parameters
-----------
positions : is dict-like of measurement_key:(row,col)
parser :
callable - gets key and returns position
mapping - key:pos
'name' - parses things like 'A1', 'G12'
'number' - converts number to positions, going over rows first.
ids :
parser will be applied to specified ids only.
If None is given, parser will be applied to all measurements.
TODO: output a more informative message for position collisions
'''
if positions is None:
if ids is None:
ids = self.keys()
else:
ids = to_list(ids)
mapper = self._get_ID2position_mapper(position_mapper)
positions = dict((ID, mapper(ID)) for ID in ids)
else:
pass
# check that resulting assignment is unique (one measurement per position)
temp = self._positions.copy()
temp.update(positions)
if not len(temp.values()) == len(set(temp.values())):
msg = 'A position can only be occupied by a single measurement'
raise Exception(msg)
for k, pos in positions.items():
if not self._is_valid_position(pos):
msg = 'Position {} is not supported for this collection'.format(pos)
raise ValueError(msg)
self._positions[k] = pos
self[k]._set_position(self.ID, pos) | python | def set_positions(self, positions=None, position_mapper='name', ids=None):
'''
checks for position validity & collisions,
but not that all measurements are assigned.
Parameters
-----------
positions : is dict-like of measurement_key:(row,col)
parser :
callable - gets key and returns position
mapping - key:pos
'name' - parses things like 'A1', 'G12'
'number' - converts number to positions, going over rows first.
ids :
parser will be applied to specified ids only.
If None is given, parser will be applied to all measurements.
TODO: output a more informative message for position collisions
'''
if positions is None:
if ids is None:
ids = self.keys()
else:
ids = to_list(ids)
mapper = self._get_ID2position_mapper(position_mapper)
positions = dict((ID, mapper(ID)) for ID in ids)
else:
pass
# check that resulting assignment is unique (one measurement per position)
temp = self._positions.copy()
temp.update(positions)
if not len(temp.values()) == len(set(temp.values())):
msg = 'A position can only be occupied by a single measurement'
raise Exception(msg)
for k, pos in positions.items():
if not self._is_valid_position(pos):
msg = 'Position {} is not supported for this collection'.format(pos)
raise ValueError(msg)
self._positions[k] = pos
self[k]._set_position(self.ID, pos) | [
"def",
"set_positions",
"(",
"self",
",",
"positions",
"=",
"None",
",",
"position_mapper",
"=",
"'name'",
",",
"ids",
"=",
"None",
")",
":",
"if",
"positions",
"is",
"None",
":",
"if",
"ids",
"is",
"None",
":",
"ids",
"=",
"self",
".",
"keys",
"(",
... | checks for position validity & collisions,
but not that all measurements are assigned.
Parameters
-----------
positions : is dict-like of measurement_key:(row,col)
parser :
callable - gets key and returns position
mapping - key:pos
'name' - parses things like 'A1', 'G12'
'number' - converts number to positions, going over rows first.
ids :
parser will be applied to specified ids only.
If None is given, parser will be applied to all measurements.
TODO: output a more informative message for position collisions | [
"checks",
"for",
"position",
"validity",
"&",
"collisions",
"but",
"not",
"that",
"all",
"measurements",
"are",
"assigned",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L880-L920 | train | 211,123 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | OrderedCollection.get_positions | def get_positions(self, copy=True):
'''
Get a dictionary of measurement positions.
'''
if copy:
return self._positions.copy()
else:
return self._positions | python | def get_positions(self, copy=True):
'''
Get a dictionary of measurement positions.
'''
if copy:
return self._positions.copy()
else:
return self._positions | [
"def",
"get_positions",
"(",
"self",
",",
"copy",
"=",
"True",
")",
":",
"if",
"copy",
":",
"return",
"self",
".",
"_positions",
".",
"copy",
"(",
")",
"else",
":",
"return",
"self",
".",
"_positions"
] | Get a dictionary of measurement positions. | [
"Get",
"a",
"dictionary",
"of",
"measurement",
"positions",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L922-L929 | train | 211,124 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/bases.py | OrderedCollection.dropna | def dropna(self):
'''
Remove rows and cols that have no assigned measurements.
Return new instance.
'''
new = self.copy()
tmp = self._dict2DF(self, nan, True)
new.row_labels = list(tmp.index)
new.col_labels = list(tmp.columns)
return new | python | def dropna(self):
'''
Remove rows and cols that have no assigned measurements.
Return new instance.
'''
new = self.copy()
tmp = self._dict2DF(self, nan, True)
new.row_labels = list(tmp.index)
new.col_labels = list(tmp.columns)
return new | [
"def",
"dropna",
"(",
"self",
")",
":",
"new",
"=",
"self",
".",
"copy",
"(",
")",
"tmp",
"=",
"self",
".",
"_dict2DF",
"(",
"self",
",",
"nan",
",",
"True",
")",
"new",
".",
"row_labels",
"=",
"list",
"(",
"tmp",
".",
"index",
")",
"new",
".",... | Remove rows and cols that have no assigned measurements.
Return new instance. | [
"Remove",
"rows",
"and",
"cols",
"that",
"have",
"no",
"assigned",
"measurements",
".",
"Return",
"new",
"instance",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/bases.py#L945-L954 | train | 211,125 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/gates.py | IntervalGate.validate_input | def validate_input(self):
"""Raise appropriate exception if gate was defined incorrectly."""
if self.vert[1] <= self.vert[0]:
raise ValueError(u'{} must be larger than {}'.format(self.vert[1], self.vert[0])) | python | def validate_input(self):
"""Raise appropriate exception if gate was defined incorrectly."""
if self.vert[1] <= self.vert[0]:
raise ValueError(u'{} must be larger than {}'.format(self.vert[1], self.vert[0])) | [
"def",
"validate_input",
"(",
"self",
")",
":",
"if",
"self",
".",
"vert",
"[",
"1",
"]",
"<=",
"self",
".",
"vert",
"[",
"0",
"]",
":",
"raise",
"ValueError",
"(",
"u'{} must be larger than {}'",
".",
"format",
"(",
"self",
".",
"vert",
"[",
"1",
"]... | Raise appropriate exception if gate was defined incorrectly. | [
"Raise",
"appropriate",
"exception",
"if",
"gate",
"was",
"defined",
"incorrectly",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/gates.py#L233-L236 | train | 211,126 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/gates.py | IntervalGate._identify | def _identify(self, dataframe):
"""Return bool series which is True for indexes that 'pass' the gate"""
idx = ((dataframe[self.channels[0]] <= self.vert[1]) &
(dataframe[self.channels[0]] >= self.vert[0]))
if self.region == 'out':
idx = ~idx
return idx | python | def _identify(self, dataframe):
"""Return bool series which is True for indexes that 'pass' the gate"""
idx = ((dataframe[self.channels[0]] <= self.vert[1]) &
(dataframe[self.channels[0]] >= self.vert[0]))
if self.region == 'out':
idx = ~idx
return idx | [
"def",
"_identify",
"(",
"self",
",",
"dataframe",
")",
":",
"idx",
"=",
"(",
"(",
"dataframe",
"[",
"self",
".",
"channels",
"[",
"0",
"]",
"]",
"<=",
"self",
".",
"vert",
"[",
"1",
"]",
")",
"&",
"(",
"dataframe",
"[",
"self",
".",
"channels",
... | Return bool series which is True for indexes that 'pass' the gate | [
"Return",
"bool",
"series",
"which",
"is",
"True",
"for",
"indexes",
"that",
"pass",
"the",
"gate"
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/gates.py#L238-L246 | train | 211,127 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/__init__.py | _get_paths | def _get_paths():
"""Generate paths to test data. Done in a function to protect namespace a bit."""
import os
base_path = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(base_path, 'tests', 'data', 'Plate01')
test_data_file = os.path.join(test_data_dir, 'RFP_Well_A3.fcs')
return test_data_dir, test_data_file | python | def _get_paths():
"""Generate paths to test data. Done in a function to protect namespace a bit."""
import os
base_path = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(base_path, 'tests', 'data', 'Plate01')
test_data_file = os.path.join(test_data_dir, 'RFP_Well_A3.fcs')
return test_data_dir, test_data_file | [
"def",
"_get_paths",
"(",
")",
":",
"import",
"os",
"base_path",
"=",
"os",
".",
"path",
".",
"dirname",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"__file__",
")",
")",
"test_data_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"base_path",
",",
... | Generate paths to test data. Done in a function to protect namespace a bit. | [
"Generate",
"paths",
"to",
"test",
"data",
".",
"Done",
"in",
"a",
"function",
"to",
"protect",
"namespace",
"a",
"bit",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/__init__.py#L15-L21 | train | 211,128 |
eyurtsev/FlowCytometryTools | doc/make.py | upload_prev | def upload_prev(ver, doc_root='./'):
'push a copy of older release to appropriate version directory'
local_dir = doc_root + 'build/html'
remote_dir = '/usr/share/nginx/pandas/pandas-docs/version/%s/' % ver
cmd = 'cd %s; rsync -avz . pandas@pandas.pydata.org:%s -essh'
cmd = cmd % (local_dir, remote_dir)
print cmd
if os.system(cmd):
raise SystemExit(
'Upload to %s from %s failed' % (remote_dir, local_dir))
local_dir = doc_root + 'build/latex'
pdf_cmd = 'cd %s; scp pandas.pdf pandas@pandas.pydata.org:%s'
pdf_cmd = pdf_cmd % (local_dir, remote_dir)
if os.system(pdf_cmd):
raise SystemExit('Upload PDF to %s from %s failed' % (ver, doc_root)) | python | def upload_prev(ver, doc_root='./'):
'push a copy of older release to appropriate version directory'
local_dir = doc_root + 'build/html'
remote_dir = '/usr/share/nginx/pandas/pandas-docs/version/%s/' % ver
cmd = 'cd %s; rsync -avz . pandas@pandas.pydata.org:%s -essh'
cmd = cmd % (local_dir, remote_dir)
print cmd
if os.system(cmd):
raise SystemExit(
'Upload to %s from %s failed' % (remote_dir, local_dir))
local_dir = doc_root + 'build/latex'
pdf_cmd = 'cd %s; scp pandas.pdf pandas@pandas.pydata.org:%s'
pdf_cmd = pdf_cmd % (local_dir, remote_dir)
if os.system(pdf_cmd):
raise SystemExit('Upload PDF to %s from %s failed' % (ver, doc_root)) | [
"def",
"upload_prev",
"(",
"ver",
",",
"doc_root",
"=",
"'./'",
")",
":",
"local_dir",
"=",
"doc_root",
"+",
"'build/html'",
"remote_dir",
"=",
"'/usr/share/nginx/pandas/pandas-docs/version/%s/'",
"%",
"ver",
"cmd",
"=",
"'cd %s; rsync -avz . pandas@pandas.pydata.org:%s -... | push a copy of older release to appropriate version directory | [
"push",
"a",
"copy",
"of",
"older",
"release",
"to",
"appropriate",
"version",
"directory"
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/doc/make.py#L53-L68 | train | 211,129 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/graph.py | plotFCM | def plotFCM(data, channel_names, kind='histogram', ax=None,
autolabel=True, xlabel_kwargs={}, ylabel_kwargs={},
colorbar=False, grid=False,
**kwargs):
"""
Plots the sample on the current axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
data : DataFrame
{graph_plotFCM_pars}
{common_plot_ax}
Returns
-------
The output of the plot command used
"""
if ax == None: ax = pl.gca()
xlabel_kwargs.setdefault('size', 16)
ylabel_kwargs.setdefault('size', 16)
channel_names = to_list(channel_names)
if len(channel_names) == 1:
# 1D so histogram plot
kwargs.setdefault('color', 'gray')
kwargs.setdefault('histtype', 'stepfilled')
kwargs.setdefault('bins', 200) # Do not move above
x = data[channel_names[0]].values
if len(x) >= 1:
if (len(x) == 1) and isinstance(kwargs['bins'], int):
# Only needed for hist (not hist2d) due to hist function doing
# excessive input checking
warnings.warn("One of the data sets only has a single event. "
"This event won't be plotted unless the bin locations"
" are explicitly provided to the plotting function. ")
return None
plot_output = ax.hist(x, **kwargs)
else:
return None
elif len(channel_names) == 2:
x = data[channel_names[0]].values # value of first channel
y = data[channel_names[1]].values # value of second channel
if len(x) == 0:
# Don't draw a plot if there's no data
return None
if kind == 'scatter':
kwargs.setdefault('edgecolor', 'none')
plot_output = ax.scatter(x, y, **kwargs)
elif kind == 'histogram':
kwargs.setdefault('bins', 200) # Do not move above
kwargs.setdefault('cmin', 1)
kwargs.setdefault('cmap', pl.cm.copper)
kwargs.setdefault('norm', matplotlib.colors.LogNorm())
plot_output = ax.hist2d(x, y, **kwargs)
mappable = plot_output[-1]
if colorbar:
pl.colorbar(mappable, ax=ax)
else:
raise ValueError("Not a valid plot type. Must be 'scatter', 'histogram'")
else:
raise ValueError('Received an unexpected number of channels: "{}"'.format(channel_names))
pl.grid(grid)
if autolabel:
y_label_text = 'Counts' if len(channel_names) == 1 else channel_names[1]
ax.set_xlabel(channel_names[0], **xlabel_kwargs)
ax.set_ylabel(y_label_text, **ylabel_kwargs)
return plot_output | python | def plotFCM(data, channel_names, kind='histogram', ax=None,
autolabel=True, xlabel_kwargs={}, ylabel_kwargs={},
colorbar=False, grid=False,
**kwargs):
"""
Plots the sample on the current axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
data : DataFrame
{graph_plotFCM_pars}
{common_plot_ax}
Returns
-------
The output of the plot command used
"""
if ax == None: ax = pl.gca()
xlabel_kwargs.setdefault('size', 16)
ylabel_kwargs.setdefault('size', 16)
channel_names = to_list(channel_names)
if len(channel_names) == 1:
# 1D so histogram plot
kwargs.setdefault('color', 'gray')
kwargs.setdefault('histtype', 'stepfilled')
kwargs.setdefault('bins', 200) # Do not move above
x = data[channel_names[0]].values
if len(x) >= 1:
if (len(x) == 1) and isinstance(kwargs['bins'], int):
# Only needed for hist (not hist2d) due to hist function doing
# excessive input checking
warnings.warn("One of the data sets only has a single event. "
"This event won't be plotted unless the bin locations"
" are explicitly provided to the plotting function. ")
return None
plot_output = ax.hist(x, **kwargs)
else:
return None
elif len(channel_names) == 2:
x = data[channel_names[0]].values # value of first channel
y = data[channel_names[1]].values # value of second channel
if len(x) == 0:
# Don't draw a plot if there's no data
return None
if kind == 'scatter':
kwargs.setdefault('edgecolor', 'none')
plot_output = ax.scatter(x, y, **kwargs)
elif kind == 'histogram':
kwargs.setdefault('bins', 200) # Do not move above
kwargs.setdefault('cmin', 1)
kwargs.setdefault('cmap', pl.cm.copper)
kwargs.setdefault('norm', matplotlib.colors.LogNorm())
plot_output = ax.hist2d(x, y, **kwargs)
mappable = plot_output[-1]
if colorbar:
pl.colorbar(mappable, ax=ax)
else:
raise ValueError("Not a valid plot type. Must be 'scatter', 'histogram'")
else:
raise ValueError('Received an unexpected number of channels: "{}"'.format(channel_names))
pl.grid(grid)
if autolabel:
y_label_text = 'Counts' if len(channel_names) == 1 else channel_names[1]
ax.set_xlabel(channel_names[0], **xlabel_kwargs)
ax.set_ylabel(y_label_text, **ylabel_kwargs)
return plot_output | [
"def",
"plotFCM",
"(",
"data",
",",
"channel_names",
",",
"kind",
"=",
"'histogram'",
",",
"ax",
"=",
"None",
",",
"autolabel",
"=",
"True",
",",
"xlabel_kwargs",
"=",
"{",
"}",
",",
"ylabel_kwargs",
"=",
"{",
"}",
",",
"colorbar",
"=",
"False",
",",
... | Plots the sample on the current axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
data : DataFrame
{graph_plotFCM_pars}
{common_plot_ax}
Returns
-------
The output of the plot command used | [
"Plots",
"the",
"sample",
"on",
"the",
"current",
"axis",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/graph.py#L23-L101 | train | 211,130 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/graph.py | autoscale_subplots | def autoscale_subplots(subplots=None, axis='both'):
"""
Sets the x and y axis limits for each subplot to match the x and y axis
limits of the most extreme data points encountered.
The limits are set to the same values for all subplots.
Parameters
-----------
subplots : ndarray or list of matplotlib.axes.Axes
axis : ['x' | 'y' | 'both' / 'xy' / 'yx' | 'none' / '']
'x' : autoscales the x axis
'y' : autoscales the y axis
'both', 'xy', 'yx' : autoscales both axis
'none', '' : autoscales nothing
"""
axis_options = ('x', 'y', 'both', 'none', '', 'xy', 'yx')
if axis.lower() not in axis_options:
raise ValueError('axis must be in {0}'.format(axis_options))
if subplots is None:
subplots = plt.gcf().axes
data_limits = [(ax.xaxis.get_data_interval(), ax.yaxis.get_data_interval()) for loc, ax in
numpy.ndenumerate(subplots)] # TODO: Make a proper iterator
xlims, ylims = zip(*data_limits)
xmins_list, xmaxs_list = zip(*xlims)
ymins_list, ymaxs_list = zip(*ylims)
xmin = numpy.min(xmins_list)
xmax = numpy.max(xmaxs_list)
ymin = numpy.min(ymins_list)
ymax = numpy.max(ymaxs_list)
for loc, ax in numpy.ndenumerate(subplots):
if axis in ('x', 'both', 'xy', 'yx'):
ax.set_xlim((xmin, xmax))
if axis in ('y', 'both', 'xy', 'yx'):
ax.set_ylim((ymin, ymax)) | python | def autoscale_subplots(subplots=None, axis='both'):
"""
Sets the x and y axis limits for each subplot to match the x and y axis
limits of the most extreme data points encountered.
The limits are set to the same values for all subplots.
Parameters
-----------
subplots : ndarray or list of matplotlib.axes.Axes
axis : ['x' | 'y' | 'both' / 'xy' / 'yx' | 'none' / '']
'x' : autoscales the x axis
'y' : autoscales the y axis
'both', 'xy', 'yx' : autoscales both axis
'none', '' : autoscales nothing
"""
axis_options = ('x', 'y', 'both', 'none', '', 'xy', 'yx')
if axis.lower() not in axis_options:
raise ValueError('axis must be in {0}'.format(axis_options))
if subplots is None:
subplots = plt.gcf().axes
data_limits = [(ax.xaxis.get_data_interval(), ax.yaxis.get_data_interval()) for loc, ax in
numpy.ndenumerate(subplots)] # TODO: Make a proper iterator
xlims, ylims = zip(*data_limits)
xmins_list, xmaxs_list = zip(*xlims)
ymins_list, ymaxs_list = zip(*ylims)
xmin = numpy.min(xmins_list)
xmax = numpy.max(xmaxs_list)
ymin = numpy.min(ymins_list)
ymax = numpy.max(ymaxs_list)
for loc, ax in numpy.ndenumerate(subplots):
if axis in ('x', 'both', 'xy', 'yx'):
ax.set_xlim((xmin, xmax))
if axis in ('y', 'both', 'xy', 'yx'):
ax.set_ylim((ymin, ymax)) | [
"def",
"autoscale_subplots",
"(",
"subplots",
"=",
"None",
",",
"axis",
"=",
"'both'",
")",
":",
"axis_options",
"=",
"(",
"'x'",
",",
"'y'",
",",
"'both'",
",",
"'none'",
",",
"''",
",",
"'xy'",
",",
"'yx'",
")",
"if",
"axis",
".",
"lower",
"(",
"... | Sets the x and y axis limits for each subplot to match the x and y axis
limits of the most extreme data points encountered.
The limits are set to the same values for all subplots.
Parameters
-----------
subplots : ndarray or list of matplotlib.axes.Axes
axis : ['x' | 'y' | 'both' / 'xy' / 'yx' | 'none' / '']
'x' : autoscales the x axis
'y' : autoscales the y axis
'both', 'xy', 'yx' : autoscales both axis
'none', '' : autoscales nothing | [
"Sets",
"the",
"x",
"and",
"y",
"axis",
"limits",
"for",
"each",
"subplot",
"to",
"match",
"the",
"x",
"and",
"y",
"axis",
"limits",
"of",
"the",
"most",
"extreme",
"data",
"points",
"encountered",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/graph.py#L246-L287 | train | 211,131 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/graph.py | scale_subplots | def scale_subplots(subplots=None, xlim='auto', ylim='auto'):
"""Set the x and y axis limits for a collection of subplots.
Parameters
-----------
subplots : ndarray or list of matplotlib.axes.Axes
xlim : None | 'auto' | (xmin, xmax)
'auto' : sets the limits according to the most
extreme values of data encountered.
ylim : None | 'auto' | (ymin, ymax)
"""
auto_axis = ''
if xlim == 'auto':
auto_axis += 'x'
if ylim == 'auto':
auto_axis += 'y'
autoscale_subplots(subplots, auto_axis)
for loc, ax in numpy.ndenumerate(subplots):
if 'x' not in auto_axis:
ax.set_xlim(xlim)
if 'y' not in auto_axis:
ax.set_ylim(ylim) | python | def scale_subplots(subplots=None, xlim='auto', ylim='auto'):
"""Set the x and y axis limits for a collection of subplots.
Parameters
-----------
subplots : ndarray or list of matplotlib.axes.Axes
xlim : None | 'auto' | (xmin, xmax)
'auto' : sets the limits according to the most
extreme values of data encountered.
ylim : None | 'auto' | (ymin, ymax)
"""
auto_axis = ''
if xlim == 'auto':
auto_axis += 'x'
if ylim == 'auto':
auto_axis += 'y'
autoscale_subplots(subplots, auto_axis)
for loc, ax in numpy.ndenumerate(subplots):
if 'x' not in auto_axis:
ax.set_xlim(xlim)
if 'y' not in auto_axis:
ax.set_ylim(ylim) | [
"def",
"scale_subplots",
"(",
"subplots",
"=",
"None",
",",
"xlim",
"=",
"'auto'",
",",
"ylim",
"=",
"'auto'",
")",
":",
"auto_axis",
"=",
"''",
"if",
"xlim",
"==",
"'auto'",
":",
"auto_axis",
"+=",
"'x'",
"if",
"ylim",
"==",
"'auto'",
":",
"auto_axis"... | Set the x and y axis limits for a collection of subplots.
Parameters
-----------
subplots : ndarray or list of matplotlib.axes.Axes
xlim : None | 'auto' | (xmin, xmax)
'auto' : sets the limits according to the most
extreme values of data encountered.
ylim : None | 'auto' | (ymin, ymax) | [
"Set",
"the",
"x",
"and",
"y",
"axis",
"limits",
"for",
"a",
"collection",
"of",
"subplots",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/graph.py#L290-L314 | train | 211,132 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/graph.py | _plot_table | def _plot_table(matrix, text_format='{:.2f}', cmap=None, **kwargs):
"""
Plot a numpy matrix as a table. Uses the current axis bounding box to decide on limits.
text_format specifies the formatting to apply to the values.
Parameters
----------
matrix : ndarray
text_format : str
Indicates how to format the the values
text_format = {:.2} -> keeps all digits until the first 2 significant digits past the decimal
text_format = {:.2f} -> keeps only 2 digits past the decimal
cmap : None | colormap
if a colormap is provided, this colormap will be used to choose the color of the text.
**kwargs : all other arguments passed to plt.text function
Examples
----------
plot_table(numpy.random.random((3,3))
plt.show()
"""
shape = matrix.shape
xtick_pos = numpy.arange(shape[1])
ytick_pos = numpy.arange(shape[0])
xtick_grid, ytick_grid = numpy.meshgrid(xtick_pos, ytick_pos)
vmax = numpy.nanmax(matrix)
vmin = numpy.nanmin(matrix)
if not kwargs.get('color', None) and cmap is not None:
use_cmap = True
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=False)
else:
use_cmap = False
for (row, col), w in numpy.ndenumerate(matrix):
x = xtick_grid[row, col]
y = ytick_grid[row, col]
if use_cmap:
kwargs['color'] = cmap(norm(w))
plt.text(x, y, text_format.format(w), horizontalalignment='center',
verticalalignment='center', transform=plt.gca().transData, **kwargs) | python | def _plot_table(matrix, text_format='{:.2f}', cmap=None, **kwargs):
"""
Plot a numpy matrix as a table. Uses the current axis bounding box to decide on limits.
text_format specifies the formatting to apply to the values.
Parameters
----------
matrix : ndarray
text_format : str
Indicates how to format the the values
text_format = {:.2} -> keeps all digits until the first 2 significant digits past the decimal
text_format = {:.2f} -> keeps only 2 digits past the decimal
cmap : None | colormap
if a colormap is provided, this colormap will be used to choose the color of the text.
**kwargs : all other arguments passed to plt.text function
Examples
----------
plot_table(numpy.random.random((3,3))
plt.show()
"""
shape = matrix.shape
xtick_pos = numpy.arange(shape[1])
ytick_pos = numpy.arange(shape[0])
xtick_grid, ytick_grid = numpy.meshgrid(xtick_pos, ytick_pos)
vmax = numpy.nanmax(matrix)
vmin = numpy.nanmin(matrix)
if not kwargs.get('color', None) and cmap is not None:
use_cmap = True
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax, clip=False)
else:
use_cmap = False
for (row, col), w in numpy.ndenumerate(matrix):
x = xtick_grid[row, col]
y = ytick_grid[row, col]
if use_cmap:
kwargs['color'] = cmap(norm(w))
plt.text(x, y, text_format.format(w), horizontalalignment='center',
verticalalignment='center', transform=plt.gca().transData, **kwargs) | [
"def",
"_plot_table",
"(",
"matrix",
",",
"text_format",
"=",
"'{:.2f}'",
",",
"cmap",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"shape",
"=",
"matrix",
".",
"shape",
"xtick_pos",
"=",
"numpy",
".",
"arange",
"(",
"shape",
"[",
"1",
"]",
")",
... | Plot a numpy matrix as a table. Uses the current axis bounding box to decide on limits.
text_format specifies the formatting to apply to the values.
Parameters
----------
matrix : ndarray
text_format : str
Indicates how to format the the values
text_format = {:.2} -> keeps all digits until the first 2 significant digits past the decimal
text_format = {:.2f} -> keeps only 2 digits past the decimal
cmap : None | colormap
if a colormap is provided, this colormap will be used to choose the color of the text.
**kwargs : all other arguments passed to plt.text function
Examples
----------
plot_table(numpy.random.random((3,3))
plt.show() | [
"Plot",
"a",
"numpy",
"matrix",
"as",
"a",
"table",
".",
"Uses",
"the",
"current",
"axis",
"bounding",
"box",
"to",
"decide",
"on",
"limits",
".",
"text_format",
"specifies",
"the",
"formatting",
"to",
"apply",
"to",
"the",
"values",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/graph.py#L636-L686 | train | 211,133 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/graph.py | _set_tick_lines_visibility | def _set_tick_lines_visibility(ax, visible=True):
"""Set the visibility of the tick lines of the requested axis."""
for i, thisAxis in enumerate((ax.get_xaxis(), ax.get_yaxis())):
for thisItem in thisAxis.get_ticklines():
if isinstance(visible, list):
thisItem.set_visible(visible[i])
else:
thisItem.set_visible(visible) | python | def _set_tick_lines_visibility(ax, visible=True):
"""Set the visibility of the tick lines of the requested axis."""
for i, thisAxis in enumerate((ax.get_xaxis(), ax.get_yaxis())):
for thisItem in thisAxis.get_ticklines():
if isinstance(visible, list):
thisItem.set_visible(visible[i])
else:
thisItem.set_visible(visible) | [
"def",
"_set_tick_lines_visibility",
"(",
"ax",
",",
"visible",
"=",
"True",
")",
":",
"for",
"i",
",",
"thisAxis",
"in",
"enumerate",
"(",
"(",
"ax",
".",
"get_xaxis",
"(",
")",
",",
"ax",
".",
"get_yaxis",
"(",
")",
")",
")",
":",
"for",
"thisItem"... | Set the visibility of the tick lines of the requested axis. | [
"Set",
"the",
"visibility",
"of",
"the",
"tick",
"lines",
"of",
"the",
"requested",
"axis",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/graph.py#L689-L696 | train | 211,134 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/graph.py | _set_tick_labels_visibility | def _set_tick_labels_visibility(ax, visible=True):
"""Set the visibility of the tick labels of the requested axis."""
for i, thisAxis in enumerate((ax.get_xaxis(), ax.get_yaxis())):
for thisItem in thisAxis.get_ticklabels():
if isinstance(visible, list):
thisItem.set_visible(visible[i])
else:
thisItem.set_visible(visible) | python | def _set_tick_labels_visibility(ax, visible=True):
"""Set the visibility of the tick labels of the requested axis."""
for i, thisAxis in enumerate((ax.get_xaxis(), ax.get_yaxis())):
for thisItem in thisAxis.get_ticklabels():
if isinstance(visible, list):
thisItem.set_visible(visible[i])
else:
thisItem.set_visible(visible) | [
"def",
"_set_tick_labels_visibility",
"(",
"ax",
",",
"visible",
"=",
"True",
")",
":",
"for",
"i",
",",
"thisAxis",
"in",
"enumerate",
"(",
"(",
"ax",
".",
"get_xaxis",
"(",
")",
",",
"ax",
".",
"get_yaxis",
"(",
")",
")",
")",
":",
"for",
"thisItem... | Set the visibility of the tick labels of the requested axis. | [
"Set",
"the",
"visibility",
"of",
"the",
"tick",
"labels",
"of",
"the",
"requested",
"axis",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/graph.py#L699-L706 | train | 211,135 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/graph.py | extract_annotation | def extract_annotation(data):
"""Extract names and values of rows and columns.
Parameter:
data : DataFrame | Panel
Returns:
col_name, col_values, row_name, row_values
"""
xlabel = None
xvalues = None
ylabel = None
yvalues = None
if hasattr(data, 'minor_axis'):
xvalues = data.minor_axis
if hasattr(data.minor_axis, 'name'):
xlabel = data.minor_axis.name
if hasattr(data, 'columns'):
xvalues = data.columns
if hasattr(data.columns, 'name'):
xlabel = data.columns.name
if hasattr(data, 'major_axis'):
yvalues = data.major_axis
if hasattr(data.major_axis, 'name'):
ylabel = data.major_axis.name
if hasattr(data, 'index'):
yvalues = data.index
if hasattr(data.index, 'name'):
ylabel = data.index.name
return xlabel, xvalues, ylabel, yvalues | python | def extract_annotation(data):
"""Extract names and values of rows and columns.
Parameter:
data : DataFrame | Panel
Returns:
col_name, col_values, row_name, row_values
"""
xlabel = None
xvalues = None
ylabel = None
yvalues = None
if hasattr(data, 'minor_axis'):
xvalues = data.minor_axis
if hasattr(data.minor_axis, 'name'):
xlabel = data.minor_axis.name
if hasattr(data, 'columns'):
xvalues = data.columns
if hasattr(data.columns, 'name'):
xlabel = data.columns.name
if hasattr(data, 'major_axis'):
yvalues = data.major_axis
if hasattr(data.major_axis, 'name'):
ylabel = data.major_axis.name
if hasattr(data, 'index'):
yvalues = data.index
if hasattr(data.index, 'name'):
ylabel = data.index.name
return xlabel, xvalues, ylabel, yvalues | [
"def",
"extract_annotation",
"(",
"data",
")",
":",
"xlabel",
"=",
"None",
"xvalues",
"=",
"None",
"ylabel",
"=",
"None",
"yvalues",
"=",
"None",
"if",
"hasattr",
"(",
"data",
",",
"'minor_axis'",
")",
":",
"xvalues",
"=",
"data",
".",
"minor_axis",
"if"... | Extract names and values of rows and columns.
Parameter:
data : DataFrame | Panel
Returns:
col_name, col_values, row_name, row_values | [
"Extract",
"names",
"and",
"values",
"of",
"rows",
"and",
"columns",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/graph.py#L709-L738 | train | 211,136 |
eyurtsev/FlowCytometryTools | doc/source/pyplots/arbitrary_manipulation/transformation.py | transform_using_this_method | def transform_using_this_method(original_sample):
""" This function implements a log transformation on the data. """
# Copy the original sample
new_sample = original_sample.copy()
new_data = new_sample.data
# Our transformation goes here
new_data['Y2-A'] = log(new_data['Y2-A'])
new_data = new_data.dropna() # Removes all NaN entries
new_sample.data = new_data
return new_sample | python | def transform_using_this_method(original_sample):
""" This function implements a log transformation on the data. """
# Copy the original sample
new_sample = original_sample.copy()
new_data = new_sample.data
# Our transformation goes here
new_data['Y2-A'] = log(new_data['Y2-A'])
new_data = new_data.dropna() # Removes all NaN entries
new_sample.data = new_data
return new_sample | [
"def",
"transform_using_this_method",
"(",
"original_sample",
")",
":",
"# Copy the original sample",
"new_sample",
"=",
"original_sample",
".",
"copy",
"(",
")",
"new_data",
"=",
"new_sample",
".",
"data",
"# Our transformation goes here",
"new_data",
"[",
"'Y2-A'",
"]... | This function implements a log transformation on the data. | [
"This",
"function",
"implements",
"a",
"log",
"transformation",
"on",
"the",
"data",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/doc/source/pyplots/arbitrary_manipulation/transformation.py#L17-L27 | train | 211,137 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/containers.py | FCMeasurement.read_data | def read_data(self, **kwargs):
'''
Read the datafile specified in Sample.datafile and
return the resulting object.
Does NOT assign the data to self.data
It's advised not to use this method, but instead to access
the data through the FCMeasurement.data attribute.
'''
meta, data = parse_fcs(self.datafile, **kwargs)
return data | python | def read_data(self, **kwargs):
'''
Read the datafile specified in Sample.datafile and
return the resulting object.
Does NOT assign the data to self.data
It's advised not to use this method, but instead to access
the data through the FCMeasurement.data attribute.
'''
meta, data = parse_fcs(self.datafile, **kwargs)
return data | [
"def",
"read_data",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"meta",
",",
"data",
"=",
"parse_fcs",
"(",
"self",
".",
"datafile",
",",
"*",
"*",
"kwargs",
")",
"return",
"data"
] | Read the datafile specified in Sample.datafile and
return the resulting object.
Does NOT assign the data to self.data
It's advised not to use this method, but instead to access
the data through the FCMeasurement.data attribute. | [
"Read",
"the",
"datafile",
"specified",
"in",
"Sample",
".",
"datafile",
"and",
"return",
"the",
"resulting",
"object",
".",
"Does",
"NOT",
"assign",
"the",
"data",
"to",
"self",
".",
"data"
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/containers.py#L41-L51 | train | 211,138 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/containers.py | FCMeasurement.get_meta_fields | def get_meta_fields(self, fields, kwargs={}):
'''
Return a dictionary of metadata fields
'''
fields = to_list(fields)
meta = self.get_meta()
return {field: meta.get(field) for field in fields} | python | def get_meta_fields(self, fields, kwargs={}):
'''
Return a dictionary of metadata fields
'''
fields = to_list(fields)
meta = self.get_meta()
return {field: meta.get(field) for field in fields} | [
"def",
"get_meta_fields",
"(",
"self",
",",
"fields",
",",
"kwargs",
"=",
"{",
"}",
")",
":",
"fields",
"=",
"to_list",
"(",
"fields",
")",
"meta",
"=",
"self",
".",
"get_meta",
"(",
")",
"return",
"{",
"field",
":",
"meta",
".",
"get",
"(",
"field... | Return a dictionary of metadata fields | [
"Return",
"a",
"dictionary",
"of",
"metadata",
"fields"
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/containers.py#L71-L77 | train | 211,139 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/containers.py | FCMeasurement.plot | def plot(self, channel_names, kind='histogram',
gates=None, gate_colors=None, gate_lw=1, **kwargs):
"""Plot the flow cytometry data associated with the sample on the current axis.
To produce the plot, follow up with a call to matplotlib's show() function.
Parameters
----------
{graph_plotFCM_pars}
{FCMeasurement_plot_pars}
{common_plot_ax}
gates : [None, Gate, list of Gate]
Gate must be of type {_gate_available_classes}.
gate_lw: float | iterable
line width to use when drawing gates
if float, uses the same line width for all gates
if iterable, then cycles between the values
kwargs : dict
Additional keyword arguments to be passed to graph.plotFCM
Returns
-------
None : if no data is present
plot_output : output of plot command used to draw (e.g., output of hist)
Examples
--------
>>> sample.plot('Y2-A', bins=100, alpha=0.7, color='green', normed=1) # 1d histogram
>>> sample.plot(['B1-A', 'Y2-A'], cmap=cm.Oranges, colorbar=False) # 2d histogram
"""
ax = kwargs.get('ax')
channel_names = to_list(channel_names)
gates = to_list(gates)
plot_output = graph.plotFCM(self.data, channel_names, kind=kind, **kwargs)
if gates is not None:
if gate_colors is None:
gate_colors = cycle(('b', 'g', 'r', 'm', 'c', 'y'))
if not isinstance(gate_lw, collections.Iterable):
gate_lw = [gate_lw]
gate_lw = cycle(gate_lw)
for (g, c, lw) in zip(gates, gate_colors, gate_lw):
g.plot(ax=ax, ax_channels=channel_names, color=c, lw=lw)
return plot_output | python | def plot(self, channel_names, kind='histogram',
gates=None, gate_colors=None, gate_lw=1, **kwargs):
"""Plot the flow cytometry data associated with the sample on the current axis.
To produce the plot, follow up with a call to matplotlib's show() function.
Parameters
----------
{graph_plotFCM_pars}
{FCMeasurement_plot_pars}
{common_plot_ax}
gates : [None, Gate, list of Gate]
Gate must be of type {_gate_available_classes}.
gate_lw: float | iterable
line width to use when drawing gates
if float, uses the same line width for all gates
if iterable, then cycles between the values
kwargs : dict
Additional keyword arguments to be passed to graph.plotFCM
Returns
-------
None : if no data is present
plot_output : output of plot command used to draw (e.g., output of hist)
Examples
--------
>>> sample.plot('Y2-A', bins=100, alpha=0.7, color='green', normed=1) # 1d histogram
>>> sample.plot(['B1-A', 'Y2-A'], cmap=cm.Oranges, colorbar=False) # 2d histogram
"""
ax = kwargs.get('ax')
channel_names = to_list(channel_names)
gates = to_list(gates)
plot_output = graph.plotFCM(self.data, channel_names, kind=kind, **kwargs)
if gates is not None:
if gate_colors is None:
gate_colors = cycle(('b', 'g', 'r', 'm', 'c', 'y'))
if not isinstance(gate_lw, collections.Iterable):
gate_lw = [gate_lw]
gate_lw = cycle(gate_lw)
for (g, c, lw) in zip(gates, gate_colors, gate_lw):
g.plot(ax=ax, ax_channels=channel_names, color=c, lw=lw)
return plot_output | [
"def",
"plot",
"(",
"self",
",",
"channel_names",
",",
"kind",
"=",
"'histogram'",
",",
"gates",
"=",
"None",
",",
"gate_colors",
"=",
"None",
",",
"gate_lw",
"=",
"1",
",",
"*",
"*",
"kwargs",
")",
":",
"ax",
"=",
"kwargs",
".",
"get",
"(",
"'ax'"... | Plot the flow cytometry data associated with the sample on the current axis.
To produce the plot, follow up with a call to matplotlib's show() function.
Parameters
----------
{graph_plotFCM_pars}
{FCMeasurement_plot_pars}
{common_plot_ax}
gates : [None, Gate, list of Gate]
Gate must be of type {_gate_available_classes}.
gate_lw: float | iterable
line width to use when drawing gates
if float, uses the same line width for all gates
if iterable, then cycles between the values
kwargs : dict
Additional keyword arguments to be passed to graph.plotFCM
Returns
-------
None : if no data is present
plot_output : output of plot command used to draw (e.g., output of hist)
Examples
--------
>>> sample.plot('Y2-A', bins=100, alpha=0.7, color='green', normed=1) # 1d histogram
>>> sample.plot(['B1-A', 'Y2-A'], cmap=cm.Oranges, colorbar=False) # 2d histogram | [
"Plot",
"the",
"flow",
"cytometry",
"data",
"associated",
"with",
"the",
"sample",
"on",
"the",
"current",
"axis",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/containers.py#L93-L142 | train | 211,140 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/containers.py | FCMeasurement.view | def view(self, channel_names='auto',
gates=None,
diag_kw={}, offdiag_kw={},
gate_colors=None, **kwargs):
"""
Generates a matrix of subplots allowing for a quick way
to examine how the sample looks in different channels.
Parameters
----------
channel_names : [list | 'auto']
List of channel names to plot.
offdiag_plot : ['histogram' | 'scatter']
Specifies the type of plot for the off-diagonal elements.
diag_kw : dict
Not implemented
Returns
------------
axes references
"""
if channel_names == 'auto':
channel_names = list(self.channel_names)
def plot_region(channels, **kwargs):
if channels[0] == channels[1]:
channels = channels[0]
kind = 'histogram'
self.plot(channels, kind=kind, gates=gates,
gate_colors=gate_colors, autolabel=False)
channel_list = np.array(list(channel_names), dtype=object)
channel_mat = [[(x, y) for x in channel_list] for y in channel_list]
channel_mat = DataFrame(channel_mat, columns=channel_list, index=channel_list)
kwargs.setdefault('wspace', 0.1)
kwargs.setdefault('hspace', 0.1)
return plot_ndpanel(channel_mat, plot_region, **kwargs) | python | def view(self, channel_names='auto',
gates=None,
diag_kw={}, offdiag_kw={},
gate_colors=None, **kwargs):
"""
Generates a matrix of subplots allowing for a quick way
to examine how the sample looks in different channels.
Parameters
----------
channel_names : [list | 'auto']
List of channel names to plot.
offdiag_plot : ['histogram' | 'scatter']
Specifies the type of plot for the off-diagonal elements.
diag_kw : dict
Not implemented
Returns
------------
axes references
"""
if channel_names == 'auto':
channel_names = list(self.channel_names)
def plot_region(channels, **kwargs):
if channels[0] == channels[1]:
channels = channels[0]
kind = 'histogram'
self.plot(channels, kind=kind, gates=gates,
gate_colors=gate_colors, autolabel=False)
channel_list = np.array(list(channel_names), dtype=object)
channel_mat = [[(x, y) for x in channel_list] for y in channel_list]
channel_mat = DataFrame(channel_mat, columns=channel_list, index=channel_list)
kwargs.setdefault('wspace', 0.1)
kwargs.setdefault('hspace', 0.1)
return plot_ndpanel(channel_mat, plot_region, **kwargs) | [
"def",
"view",
"(",
"self",
",",
"channel_names",
"=",
"'auto'",
",",
"gates",
"=",
"None",
",",
"diag_kw",
"=",
"{",
"}",
",",
"offdiag_kw",
"=",
"{",
"}",
",",
"gate_colors",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"channel_names",
... | Generates a matrix of subplots allowing for a quick way
to examine how the sample looks in different channels.
Parameters
----------
channel_names : [list | 'auto']
List of channel names to plot.
offdiag_plot : ['histogram' | 'scatter']
Specifies the type of plot for the off-diagonal elements.
diag_kw : dict
Not implemented
Returns
------------
axes references | [
"Generates",
"a",
"matrix",
"of",
"subplots",
"allowing",
"for",
"a",
"quick",
"way",
"to",
"examine",
"how",
"the",
"sample",
"looks",
"in",
"different",
"channels",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/containers.py#L144-L182 | train | 211,141 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/containers.py | FCMeasurement.view_interactively | def view_interactively(self, backend='wx'):
'''Loads the current sample in a graphical interface for drawing gates.
Parameters
----------
backend: 'auto' | 'wx' | 'webagg'
Specifies which backend should be used to view the sample.
'''
if backend == 'auto':
if matplotlib.__version__ >= '1.4.3':
backend = 'WebAgg'
else:
backend = 'wx'
if backend == 'wx':
from FlowCytometryTools.gui.wx_backend import gui
elif backend == 'webagg':
from FlowCytometryTools.gui.webagg_backend import gui
else:
raise ValueError('No support for backend {}'.format(backend))
gui.GUILauncher(measurement=self) | python | def view_interactively(self, backend='wx'):
'''Loads the current sample in a graphical interface for drawing gates.
Parameters
----------
backend: 'auto' | 'wx' | 'webagg'
Specifies which backend should be used to view the sample.
'''
if backend == 'auto':
if matplotlib.__version__ >= '1.4.3':
backend = 'WebAgg'
else:
backend = 'wx'
if backend == 'wx':
from FlowCytometryTools.gui.wx_backend import gui
elif backend == 'webagg':
from FlowCytometryTools.gui.webagg_backend import gui
else:
raise ValueError('No support for backend {}'.format(backend))
gui.GUILauncher(measurement=self) | [
"def",
"view_interactively",
"(",
"self",
",",
"backend",
"=",
"'wx'",
")",
":",
"if",
"backend",
"==",
"'auto'",
":",
"if",
"matplotlib",
".",
"__version__",
">=",
"'1.4.3'",
":",
"backend",
"=",
"'WebAgg'",
"else",
":",
"backend",
"=",
"'wx'",
"if",
"b... | Loads the current sample in a graphical interface for drawing gates.
Parameters
----------
backend: 'auto' | 'wx' | 'webagg'
Specifies which backend should be used to view the sample. | [
"Loads",
"the",
"current",
"sample",
"in",
"a",
"graphical",
"interface",
"for",
"drawing",
"gates",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/containers.py#L184-L205 | train | 211,142 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/containers.py | FCMeasurement.transform | def transform(self, transform, direction='forward',
channels=None, return_all=True, auto_range=True,
use_spln=True, get_transformer=False, ID=None,
apply_now=True,
args=(), **kwargs):
"""
Applies a transformation to the specified channels.
The transformation parameters are shared between all transformed channels.
If different parameters need to be applied to different channels,
use several calls to `transform`.
Parameters
----------
{FCMeasurement_transform_pars}
ID : hashable | None
ID for the resulting collection. If None is passed, the original ID is used.
Returns
-------
new : FCMeasurement
New measurement containing the transformed data.
transformer : Transformation
The Transformation applied to the input measurement.
Only returned if get_transformer=True.
Examples
--------
{FCMeasurement_transform_examples}
"""
# Create new measurement
new = self.copy()
data = new.data
channels = to_list(channels)
if channels is None:
channels = data.columns
## create transformer
if isinstance(transform, Transformation):
transformer = transform
else:
if auto_range: # determine transformation range
if 'd' in kwargs:
warnings.warn(
'Encountered both auto_range=True and user-specified range value in '
'parameter d.\n Range value specified in parameter d is used.')
else:
channel_meta = self.channels
# the -1 below because the channel numbers begin from 1 instead of 0
# (this is fragile code)
ranges = [float(r['$PnR']) for i, r in channel_meta.iterrows() if
self.channel_names[i - 1] in channels]
if not np.allclose(ranges, ranges[0]):
raise Exception("""Not all specified channels have the same data range,
therefore they cannot be transformed together.\n
HINT: Try transforming one channel at a time.
You'll need to provide the name of the channel in the transform.""")
if transform in {'hlog', 'tlog', 'hlog_inv', 'tlog_inv'}:
# Hacky fix to make sure that 'd' is provided only
# for hlog / tlog transformations
kwargs['d'] = np.log10(ranges[0])
transformer = Transformation(transform, direction, args, **kwargs)
## create new data
transformed = transformer(data[channels], use_spln)
if return_all:
new_data = data
else:
new_data = data.filter(channels)
new_data[channels] = transformed
## update new Measurement
new.data = new_data
if ID is not None:
new.ID = ID
if get_transformer:
return new, transformer
else:
return new | python | def transform(self, transform, direction='forward',
channels=None, return_all=True, auto_range=True,
use_spln=True, get_transformer=False, ID=None,
apply_now=True,
args=(), **kwargs):
"""
Applies a transformation to the specified channels.
The transformation parameters are shared between all transformed channels.
If different parameters need to be applied to different channels,
use several calls to `transform`.
Parameters
----------
{FCMeasurement_transform_pars}
ID : hashable | None
ID for the resulting collection. If None is passed, the original ID is used.
Returns
-------
new : FCMeasurement
New measurement containing the transformed data.
transformer : Transformation
The Transformation applied to the input measurement.
Only returned if get_transformer=True.
Examples
--------
{FCMeasurement_transform_examples}
"""
# Create new measurement
new = self.copy()
data = new.data
channels = to_list(channels)
if channels is None:
channels = data.columns
## create transformer
if isinstance(transform, Transformation):
transformer = transform
else:
if auto_range: # determine transformation range
if 'd' in kwargs:
warnings.warn(
'Encountered both auto_range=True and user-specified range value in '
'parameter d.\n Range value specified in parameter d is used.')
else:
channel_meta = self.channels
# the -1 below because the channel numbers begin from 1 instead of 0
# (this is fragile code)
ranges = [float(r['$PnR']) for i, r in channel_meta.iterrows() if
self.channel_names[i - 1] in channels]
if not np.allclose(ranges, ranges[0]):
raise Exception("""Not all specified channels have the same data range,
therefore they cannot be transformed together.\n
HINT: Try transforming one channel at a time.
You'll need to provide the name of the channel in the transform.""")
if transform in {'hlog', 'tlog', 'hlog_inv', 'tlog_inv'}:
# Hacky fix to make sure that 'd' is provided only
# for hlog / tlog transformations
kwargs['d'] = np.log10(ranges[0])
transformer = Transformation(transform, direction, args, **kwargs)
## create new data
transformed = transformer(data[channels], use_spln)
if return_all:
new_data = data
else:
new_data = data.filter(channels)
new_data[channels] = transformed
## update new Measurement
new.data = new_data
if ID is not None:
new.ID = ID
if get_transformer:
return new, transformer
else:
return new | [
"def",
"transform",
"(",
"self",
",",
"transform",
",",
"direction",
"=",
"'forward'",
",",
"channels",
"=",
"None",
",",
"return_all",
"=",
"True",
",",
"auto_range",
"=",
"True",
",",
"use_spln",
"=",
"True",
",",
"get_transformer",
"=",
"False",
",",
... | Applies a transformation to the specified channels.
The transformation parameters are shared between all transformed channels.
If different parameters need to be applied to different channels,
use several calls to `transform`.
Parameters
----------
{FCMeasurement_transform_pars}
ID : hashable | None
ID for the resulting collection. If None is passed, the original ID is used.
Returns
-------
new : FCMeasurement
New measurement containing the transformed data.
transformer : Transformation
The Transformation applied to the input measurement.
Only returned if get_transformer=True.
Examples
--------
{FCMeasurement_transform_examples} | [
"Applies",
"a",
"transformation",
"to",
"the",
"specified",
"channels",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/containers.py#L209-L287 | train | 211,143 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/containers.py | FCCollection.transform | def transform(self, transform, direction='forward', share_transform=True,
channels=None, return_all=True, auto_range=True,
use_spln=True, get_transformer=False, ID=None,
apply_now=True,
args=(), **kwargs):
'''
Apply transform to each Measurement in the Collection.
Return a new Collection with transformed data.
{_containers_held_in_memory_warning}
Parameters
----------
{FCMeasurement_transform_pars}
ID : hashable | None
ID for the resulting collection. If None is passed, the original ID is used.
Returns
-------
new : FCCollection
New collection containing the transformed measurements.
transformer : Transformation
The Transformation applied to the measurements.
Only returned if get_transformer=True & share_transform=True.
Examples
--------
{FCMeasurement_transform_examples}
'''
new = self.copy()
if share_transform:
channel_meta = list(self.values())[0].channels
channel_names = list(self.values())[0].channel_names
if channels is None:
channels = list(channel_names)
else:
channels = to_list(channels)
## create transformer
if isinstance(transform, Transformation):
transformer = transform
else:
if auto_range: # determine transformation range
if 'd' in kwargs:
warnings.warn('Encountered both auto_range=True and user-specified range '
'value in parameter d.\n '
'Range value specified in parameter d is used.')
else:
# the -1 below because the channel numbers begin from 1 instead of 0 (this is fragile code)
ranges = [float(r['$PnR']) for i, r in channel_meta.iterrows() if
channel_names[i - 1] in channels]
if not np.allclose(ranges, ranges[0]):
raise Exception('Not all specified channels have the same '
'data range, therefore they cannot be '
'transformed together.')
if transform in {'hlog', 'tlog', 'hlog_inv', 'tlog_inv'}:
# Hacky fix to make sure that 'd' is provided only
# for hlog / tlog transformations
kwargs['d'] = np.log10(ranges[0])
transformer = Transformation(transform, direction, args, **kwargs)
if use_spln:
xmax = self.apply(lambda x: x[channels].max().max(), applyto='data').max().max()
xmin = self.apply(lambda x: x[channels].min().min(), applyto='data').min().min()
transformer.set_spline(xmin, xmax)
## transform all measurements
for k, v in new.items():
new[k] = v.transform(transformer, channels=channels, return_all=return_all,
use_spln=use_spln, apply_now=apply_now)
else:
for k, v in new.items():
new[k] = v.transform(transform, direction=direction, channels=channels,
return_all=return_all, auto_range=auto_range,
get_transformer=False,
use_spln=use_spln, apply_now=apply_now, args=args, **kwargs)
if ID is not None:
new.ID = ID
if share_transform and get_transformer:
return new, transformer
else:
return new | python | def transform(self, transform, direction='forward', share_transform=True,
channels=None, return_all=True, auto_range=True,
use_spln=True, get_transformer=False, ID=None,
apply_now=True,
args=(), **kwargs):
'''
Apply transform to each Measurement in the Collection.
Return a new Collection with transformed data.
{_containers_held_in_memory_warning}
Parameters
----------
{FCMeasurement_transform_pars}
ID : hashable | None
ID for the resulting collection. If None is passed, the original ID is used.
Returns
-------
new : FCCollection
New collection containing the transformed measurements.
transformer : Transformation
The Transformation applied to the measurements.
Only returned if get_transformer=True & share_transform=True.
Examples
--------
{FCMeasurement_transform_examples}
'''
new = self.copy()
if share_transform:
channel_meta = list(self.values())[0].channels
channel_names = list(self.values())[0].channel_names
if channels is None:
channels = list(channel_names)
else:
channels = to_list(channels)
## create transformer
if isinstance(transform, Transformation):
transformer = transform
else:
if auto_range: # determine transformation range
if 'd' in kwargs:
warnings.warn('Encountered both auto_range=True and user-specified range '
'value in parameter d.\n '
'Range value specified in parameter d is used.')
else:
# the -1 below because the channel numbers begin from 1 instead of 0 (this is fragile code)
ranges = [float(r['$PnR']) for i, r in channel_meta.iterrows() if
channel_names[i - 1] in channels]
if not np.allclose(ranges, ranges[0]):
raise Exception('Not all specified channels have the same '
'data range, therefore they cannot be '
'transformed together.')
if transform in {'hlog', 'tlog', 'hlog_inv', 'tlog_inv'}:
# Hacky fix to make sure that 'd' is provided only
# for hlog / tlog transformations
kwargs['d'] = np.log10(ranges[0])
transformer = Transformation(transform, direction, args, **kwargs)
if use_spln:
xmax = self.apply(lambda x: x[channels].max().max(), applyto='data').max().max()
xmin = self.apply(lambda x: x[channels].min().min(), applyto='data').min().min()
transformer.set_spline(xmin, xmax)
## transform all measurements
for k, v in new.items():
new[k] = v.transform(transformer, channels=channels, return_all=return_all,
use_spln=use_spln, apply_now=apply_now)
else:
for k, v in new.items():
new[k] = v.transform(transform, direction=direction, channels=channels,
return_all=return_all, auto_range=auto_range,
get_transformer=False,
use_spln=use_spln, apply_now=apply_now, args=args, **kwargs)
if ID is not None:
new.ID = ID
if share_transform and get_transformer:
return new, transformer
else:
return new | [
"def",
"transform",
"(",
"self",
",",
"transform",
",",
"direction",
"=",
"'forward'",
",",
"share_transform",
"=",
"True",
",",
"channels",
"=",
"None",
",",
"return_all",
"=",
"True",
",",
"auto_range",
"=",
"True",
",",
"use_spln",
"=",
"True",
",",
"... | Apply transform to each Measurement in the Collection.
Return a new Collection with transformed data.
{_containers_held_in_memory_warning}
Parameters
----------
{FCMeasurement_transform_pars}
ID : hashable | None
ID for the resulting collection. If None is passed, the original ID is used.
Returns
-------
new : FCCollection
New collection containing the transformed measurements.
transformer : Transformation
The Transformation applied to the measurements.
Only returned if get_transformer=True & share_transform=True.
Examples
--------
{FCMeasurement_transform_examples} | [
"Apply",
"transform",
"to",
"each",
"Measurement",
"in",
"the",
"Collection",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/containers.py#L387-L469 | train | 211,144 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/containers.py | FCCollection.gate | def gate(self, gate, ID=None, apply_now=True):
'''
Applies the gate to each Measurement in the Collection, returning a new Collection with gated data.
{_containers_held_in_memory_warning}
Parameters
----------
gate : {_gate_available_classes}
ID : [ str, numeric, None]
New ID to be given to the output. If None, the ID of the current collection will be used.
'''
def func(well):
return well.gate(gate, apply_now=apply_now)
return self.apply(func, output_format='collection', ID=ID) | python | def gate(self, gate, ID=None, apply_now=True):
'''
Applies the gate to each Measurement in the Collection, returning a new Collection with gated data.
{_containers_held_in_memory_warning}
Parameters
----------
gate : {_gate_available_classes}
ID : [ str, numeric, None]
New ID to be given to the output. If None, the ID of the current collection will be used.
'''
def func(well):
return well.gate(gate, apply_now=apply_now)
return self.apply(func, output_format='collection', ID=ID) | [
"def",
"gate",
"(",
"self",
",",
"gate",
",",
"ID",
"=",
"None",
",",
"apply_now",
"=",
"True",
")",
":",
"def",
"func",
"(",
"well",
")",
":",
"return",
"well",
".",
"gate",
"(",
"gate",
",",
"apply_now",
"=",
"apply_now",
")",
"return",
"self",
... | Applies the gate to each Measurement in the Collection, returning a new Collection with gated data.
{_containers_held_in_memory_warning}
Parameters
----------
gate : {_gate_available_classes}
ID : [ str, numeric, None]
New ID to be given to the output. If None, the ID of the current collection will be used. | [
"Applies",
"the",
"gate",
"to",
"each",
"Measurement",
"in",
"the",
"Collection",
"returning",
"a",
"new",
"Collection",
"with",
"gated",
"data",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/containers.py#L472-L489 | train | 211,145 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/containers.py | FCCollection.counts | def counts(self, ids=None, setdata=False, output_format='DataFrame'):
"""
Return the counts in each of the specified measurements.
Parameters
----------
ids : [hashable | iterable of hashables | None]
Keys of measurements to get counts of.
If None is given get counts of all measurements.
setdata : bool
Whether to set the data in the Measurement object.
Used only if data is not already set.
output_format : DataFrame | dict
Specifies the output format for that data.
Returns
-------
[DataFrame | Dictionary]
Dictionary keys correspond to measurement keys.
"""
return self.apply(lambda x: x.counts, ids=ids, setdata=setdata, output_format=output_format) | python | def counts(self, ids=None, setdata=False, output_format='DataFrame'):
"""
Return the counts in each of the specified measurements.
Parameters
----------
ids : [hashable | iterable of hashables | None]
Keys of measurements to get counts of.
If None is given get counts of all measurements.
setdata : bool
Whether to set the data in the Measurement object.
Used only if data is not already set.
output_format : DataFrame | dict
Specifies the output format for that data.
Returns
-------
[DataFrame | Dictionary]
Dictionary keys correspond to measurement keys.
"""
return self.apply(lambda x: x.counts, ids=ids, setdata=setdata, output_format=output_format) | [
"def",
"counts",
"(",
"self",
",",
"ids",
"=",
"None",
",",
"setdata",
"=",
"False",
",",
"output_format",
"=",
"'DataFrame'",
")",
":",
"return",
"self",
".",
"apply",
"(",
"lambda",
"x",
":",
"x",
".",
"counts",
",",
"ids",
"=",
"ids",
",",
"setd... | Return the counts in each of the specified measurements.
Parameters
----------
ids : [hashable | iterable of hashables | None]
Keys of measurements to get counts of.
If None is given get counts of all measurements.
setdata : bool
Whether to set the data in the Measurement object.
Used only if data is not already set.
output_format : DataFrame | dict
Specifies the output format for that data.
Returns
-------
[DataFrame | Dictionary]
Dictionary keys correspond to measurement keys. | [
"Return",
"the",
"counts",
"in",
"each",
"of",
"the",
"specified",
"measurements",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/containers.py#L516-L536 | train | 211,146 |
eyurtsev/FlowCytometryTools | FlowCytometryTools/core/containers.py | FCOrderedCollection.plot | def plot(self, channel_names, kind='histogram',
gates=None, gate_colors=None,
ids=None, row_labels=None, col_labels=None,
xlim='auto', ylim='auto',
autolabel=True,
**kwargs):
"""
Produces a grid plot with each subplot corresponding to the data at the given position.
Parameters
---------------
{FCMeasurement_plot_pars}
{graph_plotFCM_pars}
{_graph_grid_layout}
Returns
-------
{_graph_grid_layout_returns}
Examples
--------
Below, plate is an instance of FCOrderedCollection
>>> plate.plot(['SSC-A', 'FSC-A'], kind='histogram', autolabel=True)
>>> plate.plot(['SSC-A', 'FSC-A'], xlim=(0, 10000))
>>> plate.plot(['B1-A', 'Y2-A'], kind='scatter', color='red', s=1, alpha=0.3)
>>> plate.plot(['B1-A', 'Y2-A'], bins=100, alpha=0.3)
>>> plate.plot(['B1-A', 'Y2-A'], bins=[linspace(-1000, 10000, 100), linspace(-1000, 10000, 100)], alpha=0.3)
.. note::
For more details see documentation for FCMeasurement.plot
**kwargs passes arguments to both grid_plot and to FCMeasurement.plot.
"""
##
# Note
# -------
# The function assumes that grid_plot and FCMeasurement.plot use unique key words.
# Any key word arguments that appear in both functions are passed only to grid_plot in the end.
##
# Automatically figure out which of the kwargs should
# be sent to grid_plot instead of two sample.plot
# (May not be a robust solution, we'll see as the code evolves
grid_arg_list = inspect.getargspec(OrderedCollection.grid_plot).args
grid_plot_kwargs = {'ids': ids,
'row_labels': row_labels,
'col_labels': col_labels}
for key, value in list(kwargs.items()):
if key in grid_arg_list:
kwargs.pop(key)
grid_plot_kwargs[key] = value
##
# Make sure channel names is a list to make the code simpler below
channel_names = to_list(channel_names)
##
# Determine data limits for binning
#
if kind == 'histogram':
nbins = kwargs.get('bins', 200)
if isinstance(nbins, int):
min_list = []
max_list = []
for sample in self:
min_list.append(self[sample].data[channel_names].min().values)
max_list.append(self[sample].data[channel_names].max().values)
min_list = list(zip(*min_list))
max_list = list(zip(*max_list))
bins = []
for i, c in enumerate(channel_names):
min_v = min(min_list[i])
max_v = max(max_list[i])
bins.append(np.linspace(min_v, max_v, nbins))
# Check if 1d
if len(channel_names) == 1:
bins = bins[0] # bins should be an ndarray, not a list of ndarrays
kwargs['bins'] = bins
##########
# Defining the plotting function that will be used.
# At the moment grid_plot handles the labeling
# (rather than sample.plot or the base function
# in GoreUtilities.graph
def plot_sample(sample, ax):
return sample.plot(channel_names, ax=ax,
gates=gates, gate_colors=gate_colors,
colorbar=False,
kind=kind, autolabel=False, **kwargs)
xlabel, ylabel = None, None
if autolabel:
cnames = to_list(channel_names)
xlabel = cnames[0]
if len(cnames) == 2:
ylabel = cnames[1]
return self.grid_plot(plot_sample, xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
**grid_plot_kwargs) | python | def plot(self, channel_names, kind='histogram',
gates=None, gate_colors=None,
ids=None, row_labels=None, col_labels=None,
xlim='auto', ylim='auto',
autolabel=True,
**kwargs):
"""
Produces a grid plot with each subplot corresponding to the data at the given position.
Parameters
---------------
{FCMeasurement_plot_pars}
{graph_plotFCM_pars}
{_graph_grid_layout}
Returns
-------
{_graph_grid_layout_returns}
Examples
--------
Below, plate is an instance of FCOrderedCollection
>>> plate.plot(['SSC-A', 'FSC-A'], kind='histogram', autolabel=True)
>>> plate.plot(['SSC-A', 'FSC-A'], xlim=(0, 10000))
>>> plate.plot(['B1-A', 'Y2-A'], kind='scatter', color='red', s=1, alpha=0.3)
>>> plate.plot(['B1-A', 'Y2-A'], bins=100, alpha=0.3)
>>> plate.plot(['B1-A', 'Y2-A'], bins=[linspace(-1000, 10000, 100), linspace(-1000, 10000, 100)], alpha=0.3)
.. note::
For more details see documentation for FCMeasurement.plot
**kwargs passes arguments to both grid_plot and to FCMeasurement.plot.
"""
##
# Note
# -------
# The function assumes that grid_plot and FCMeasurement.plot use unique key words.
# Any key word arguments that appear in both functions are passed only to grid_plot in the end.
##
# Automatically figure out which of the kwargs should
# be sent to grid_plot instead of two sample.plot
# (May not be a robust solution, we'll see as the code evolves
grid_arg_list = inspect.getargspec(OrderedCollection.grid_plot).args
grid_plot_kwargs = {'ids': ids,
'row_labels': row_labels,
'col_labels': col_labels}
for key, value in list(kwargs.items()):
if key in grid_arg_list:
kwargs.pop(key)
grid_plot_kwargs[key] = value
##
# Make sure channel names is a list to make the code simpler below
channel_names = to_list(channel_names)
##
# Determine data limits for binning
#
if kind == 'histogram':
nbins = kwargs.get('bins', 200)
if isinstance(nbins, int):
min_list = []
max_list = []
for sample in self:
min_list.append(self[sample].data[channel_names].min().values)
max_list.append(self[sample].data[channel_names].max().values)
min_list = list(zip(*min_list))
max_list = list(zip(*max_list))
bins = []
for i, c in enumerate(channel_names):
min_v = min(min_list[i])
max_v = max(max_list[i])
bins.append(np.linspace(min_v, max_v, nbins))
# Check if 1d
if len(channel_names) == 1:
bins = bins[0] # bins should be an ndarray, not a list of ndarrays
kwargs['bins'] = bins
##########
# Defining the plotting function that will be used.
# At the moment grid_plot handles the labeling
# (rather than sample.plot or the base function
# in GoreUtilities.graph
def plot_sample(sample, ax):
return sample.plot(channel_names, ax=ax,
gates=gates, gate_colors=gate_colors,
colorbar=False,
kind=kind, autolabel=False, **kwargs)
xlabel, ylabel = None, None
if autolabel:
cnames = to_list(channel_names)
xlabel = cnames[0]
if len(cnames) == 2:
ylabel = cnames[1]
return self.grid_plot(plot_sample, xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
**grid_plot_kwargs) | [
"def",
"plot",
"(",
"self",
",",
"channel_names",
",",
"kind",
"=",
"'histogram'",
",",
"gates",
"=",
"None",
",",
"gate_colors",
"=",
"None",
",",
"ids",
"=",
"None",
",",
"row_labels",
"=",
"None",
",",
"col_labels",
"=",
"None",
",",
"xlim",
"=",
... | Produces a grid plot with each subplot corresponding to the data at the given position.
Parameters
---------------
{FCMeasurement_plot_pars}
{graph_plotFCM_pars}
{_graph_grid_layout}
Returns
-------
{_graph_grid_layout_returns}
Examples
--------
Below, plate is an instance of FCOrderedCollection
>>> plate.plot(['SSC-A', 'FSC-A'], kind='histogram', autolabel=True)
>>> plate.plot(['SSC-A', 'FSC-A'], xlim=(0, 10000))
>>> plate.plot(['B1-A', 'Y2-A'], kind='scatter', color='red', s=1, alpha=0.3)
>>> plate.plot(['B1-A', 'Y2-A'], bins=100, alpha=0.3)
>>> plate.plot(['B1-A', 'Y2-A'], bins=[linspace(-1000, 10000, 100), linspace(-1000, 10000, 100)], alpha=0.3)
.. note::
For more details see documentation for FCMeasurement.plot
**kwargs passes arguments to both grid_plot and to FCMeasurement.plot. | [
"Produces",
"a",
"grid",
"plot",
"with",
"each",
"subplot",
"corresponding",
"to",
"the",
"data",
"at",
"the",
"given",
"position",
"."
] | 4355632508b875273d68c7e2972c17668bcf7b40 | https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/FlowCytometryTools/core/containers.py#L545-L658 | train | 211,147 |
foutaise/texttable | texttable.py | obj2unicode | def obj2unicode(obj):
"""Return a unicode representation of a python object
"""
if isinstance(obj, unicode_type):
return obj
elif isinstance(obj, bytes_type):
try:
return unicode_type(obj, 'utf-8')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
return unicode_type(obj, 'utf-8', 'replace')
else:
return unicode_type(obj) | python | def obj2unicode(obj):
"""Return a unicode representation of a python object
"""
if isinstance(obj, unicode_type):
return obj
elif isinstance(obj, bytes_type):
try:
return unicode_type(obj, 'utf-8')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
return unicode_type(obj, 'utf-8', 'replace')
else:
return unicode_type(obj) | [
"def",
"obj2unicode",
"(",
"obj",
")",
":",
"if",
"isinstance",
"(",
"obj",
",",
"unicode_type",
")",
":",
"return",
"obj",
"elif",
"isinstance",
"(",
"obj",
",",
"bytes_type",
")",
":",
"try",
":",
"return",
"unicode_type",
"(",
"obj",
",",
"'utf-8'",
... | Return a unicode representation of a python object | [
"Return",
"a",
"unicode",
"representation",
"of",
"a",
"python",
"object"
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L143-L155 | train | 211,148 |
foutaise/texttable | texttable.py | Texttable.set_chars | def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self | python | def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) != 4:
raise ArraySizeError("array should contain 4 characters")
array = [ x[:1] for x in [ str(s) for s in array ] ]
(self._char_horiz, self._char_vert,
self._char_corner, self._char_header) = array
return self | [
"def",
"set_chars",
"(",
"self",
",",
"array",
")",
":",
"if",
"len",
"(",
"array",
")",
"!=",
"4",
":",
"raise",
"ArraySizeError",
"(",
"\"array should contain 4 characters\"",
")",
"array",
"=",
"[",
"x",
"[",
":",
"1",
"]",
"for",
"x",
"in",
"[",
... | Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '='] | [
"Set",
"the",
"characters",
"used",
"to",
"draw",
"lines",
"between",
"rows",
"and",
"columns"
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L227-L244 | train | 211,149 |
foutaise/texttable | texttable.py | Texttable.set_header_align | def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self | python | def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self | [
"def",
"set_header_align",
"(",
"self",
",",
"array",
")",
":",
"self",
".",
"_check_row_size",
"(",
"array",
")",
"self",
".",
"_header_align",
"=",
"array",
"return",
"self"
] | Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right | [
"Set",
"the",
"desired",
"header",
"alignment"
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L266-L278 | train | 211,150 |
foutaise/texttable | texttable.py | Texttable.set_cols_align | def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self | python | def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self | [
"def",
"set_cols_align",
"(",
"self",
",",
"array",
")",
":",
"self",
".",
"_check_row_size",
"(",
"array",
")",
"self",
".",
"_align",
"=",
"array",
"return",
"self"
] | Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right | [
"Set",
"the",
"desired",
"columns",
"alignment"
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L280-L292 | train | 211,151 |
foutaise/texttable | texttable.py | Texttable.set_cols_valign | def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self | python | def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self | [
"def",
"set_cols_valign",
"(",
"self",
",",
"array",
")",
":",
"self",
".",
"_check_row_size",
"(",
"array",
")",
"self",
".",
"_valign",
"=",
"array",
"return",
"self"
] | Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell | [
"Set",
"the",
"desired",
"columns",
"vertical",
"alignment"
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L294-L306 | train | 211,152 |
foutaise/texttable | texttable.py | Texttable.set_cols_dtype | def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self | python | def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self | [
"def",
"set_cols_dtype",
"(",
"self",
",",
"array",
")",
":",
"self",
".",
"_check_row_size",
"(",
"array",
")",
"self",
".",
"_dtype",
"=",
"array",
"return",
"self"
] | Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column | [
"Set",
"the",
"desired",
"columns",
"datatype",
"for",
"the",
"cols",
"."
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L308-L326 | train | 211,153 |
foutaise/texttable | texttable.py | Texttable.add_rows | def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self | python | def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self | [
"def",
"add_rows",
"(",
"self",
",",
"rows",
",",
"header",
"=",
"True",
")",
":",
"# nb: don't use 'iter' on by-dimensional arrays, to get a",
"# usable code for python 2.1",
"if",
"header",
":",
"if",
"hasattr",
"(",
"rows",
",",
"'__iter__'",
")",
"and",
"has... | Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table | [
"Add",
"several",
"rows",
"in",
"the",
"rows",
"stack"
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L386-L405 | train | 211,154 |
foutaise/texttable | texttable.py | Texttable.draw | def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1] | python | def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self._has_border():
out += self._hline()
if self._header:
out += self._draw_line(self._header, isheader=True)
if self._has_header():
out += self._hline_header()
length = 0
for row in self._rows:
length += 1
out += self._draw_line(row)
if self._has_hlines() and length < len(self._rows):
out += self._hline()
if self._has_border():
out += self._hline()
return out[:-1] | [
"def",
"draw",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_header",
"and",
"not",
"self",
".",
"_rows",
":",
"return",
"self",
".",
"_compute_cols_width",
"(",
")",
"self",
".",
"_check_align",
"(",
")",
"out",
"=",
"\"\"",
"if",
"self",
".",
... | Draw the table
- the table is returned as a whole string | [
"Draw",
"the",
"table"
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L407-L432 | train | 211,155 |
foutaise/texttable | texttable.py | Texttable._fmt_int | def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x)))) | python | def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x)))) | [
"def",
"_fmt_int",
"(",
"cls",
",",
"x",
",",
"*",
"*",
"kw",
")",
":",
"return",
"str",
"(",
"int",
"(",
"round",
"(",
"cls",
".",
"_to_float",
"(",
"x",
")",
")",
")",
")"
] | Integer formatting class-method.
- x will be float-converted and then used. | [
"Integer",
"formatting",
"class",
"-",
"method",
"."
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L444-L449 | train | 211,156 |
foutaise/texttable | texttable.py | Texttable._fmt_float | def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x)) | python | def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x)) | [
"def",
"_fmt_float",
"(",
"cls",
",",
"x",
",",
"*",
"*",
"kw",
")",
":",
"n",
"=",
"kw",
".",
"get",
"(",
"'n'",
")",
"return",
"'%.*f'",
"%",
"(",
"n",
",",
"cls",
".",
"_to_float",
"(",
"x",
")",
")"
] | Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument. | [
"Float",
"formatting",
"class",
"-",
"method",
"."
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L452-L461 | train | 211,157 |
foutaise/texttable | texttable.py | Texttable._fmt_exp | def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x)) | python | def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x)) | [
"def",
"_fmt_exp",
"(",
"cls",
",",
"x",
",",
"*",
"*",
"kw",
")",
":",
"n",
"=",
"kw",
".",
"get",
"(",
"'n'",
")",
"return",
"'%.*e'",
"%",
"(",
"n",
",",
"cls",
".",
"_to_float",
"(",
"x",
")",
")"
] | Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument. | [
"Exponential",
"formatting",
"class",
"-",
"method",
"."
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L464-L473 | train | 211,158 |
foutaise/texttable | texttable.py | Texttable._fmt_auto | def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw) | python | def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
else:
if f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw) | [
"def",
"_fmt_auto",
"(",
"cls",
",",
"x",
",",
"*",
"*",
"kw",
")",
":",
"f",
"=",
"cls",
".",
"_to_float",
"(",
"x",
")",
"if",
"abs",
"(",
"f",
")",
">",
"1e8",
":",
"fn",
"=",
"cls",
".",
"_fmt_exp",
"else",
":",
"if",
"f",
"-",
"round",... | auto formatting class-method. | [
"auto",
"formatting",
"class",
"-",
"method",
"."
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L481-L491 | train | 211,159 |
foutaise/texttable | texttable.py | Texttable._str | def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x) | python | def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x) | [
"def",
"_str",
"(",
"self",
",",
"i",
",",
"x",
")",
":",
"FMT",
"=",
"{",
"'a'",
":",
"self",
".",
"_fmt_auto",
",",
"'i'",
":",
"self",
".",
"_fmt_int",
",",
"'f'",
":",
"self",
".",
"_fmt_float",
",",
"'e'",
":",
"self",
".",
"_fmt_exp",
","... | Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format | [
"Handles",
"string",
"formatting",
"of",
"cell",
"data"
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L493-L515 | train | 211,160 |
foutaise/texttable | texttable.py | Texttable._hline | def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string | python | def _hline(self):
"""Print an horizontal line
"""
if not self._hline_string:
self._hline_string = self._build_hline()
return self._hline_string | [
"def",
"_hline",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_hline_string",
":",
"self",
".",
"_hline_string",
"=",
"self",
".",
"_build_hline",
"(",
")",
"return",
"self",
".",
"_hline_string"
] | Print an horizontal line | [
"Print",
"an",
"horizontal",
"line"
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L557-L563 | train | 211,161 |
foutaise/texttable | texttable.py | Texttable._build_hline | def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l | python | def _build_hline(self, is_header=False):
"""Return a string used to separated rows or separate header from
rows
"""
horiz = self._char_horiz
if (is_header):
horiz = self._char_header
# compute cell separator
s = "%s%s%s" % (horiz, [horiz, self._char_corner][self._has_vlines()],
horiz)
# build the line
l = s.join([horiz * n for n in self._width])
# add border if needed
if self._has_border():
l = "%s%s%s%s%s\n" % (self._char_corner, horiz, l, horiz,
self._char_corner)
else:
l += "\n"
return l | [
"def",
"_build_hline",
"(",
"self",
",",
"is_header",
"=",
"False",
")",
":",
"horiz",
"=",
"self",
".",
"_char_horiz",
"if",
"(",
"is_header",
")",
":",
"horiz",
"=",
"self",
".",
"_char_header",
"# compute cell separator",
"s",
"=",
"\"%s%s%s\"",
"%",
"(... | Return a string used to separated rows or separate header from
rows | [
"Return",
"a",
"string",
"used",
"to",
"separated",
"rows",
"or",
"separate",
"header",
"from",
"rows"
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L565-L583 | train | 211,162 |
foutaise/texttable | texttable.py | Texttable._len_cell | def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi | python | def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi | [
"def",
"_len_cell",
"(",
"self",
",",
"cell",
")",
":",
"cell_lines",
"=",
"cell",
".",
"split",
"(",
"'\\n'",
")",
"maxi",
"=",
"0",
"for",
"line",
"in",
"cell_lines",
":",
"length",
"=",
"0",
"parts",
"=",
"line",
".",
"split",
"(",
"'\\t'",
")",... | Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs | [
"Return",
"the",
"width",
"of",
"the",
"cell"
] | 8eea49c20458ec40478e2f26b4b260ad47550838 | https://github.com/foutaise/texttable/blob/8eea49c20458ec40478e2f26b4b260ad47550838/texttable.py#L585-L602 | train | 211,163 |
agschwender/pilbox | pilbox/image.py | Image.region | def region(self, rect):
""" Selects a sub-region of the image using the supplied rectangle,
x, y, width, height.
"""
box = (int(rect[0]), int(rect[1]), int(rect[0]) + int(rect[2]),
int(rect[1]) + int(rect[3]))
if box[2] > self.img.size[0] or box[3] > self.img.size[1]:
raise errors.RectangleError("Region out-of-bounds")
self.img = self.img.crop(box)
return self | python | def region(self, rect):
""" Selects a sub-region of the image using the supplied rectangle,
x, y, width, height.
"""
box = (int(rect[0]), int(rect[1]), int(rect[0]) + int(rect[2]),
int(rect[1]) + int(rect[3]))
if box[2] > self.img.size[0] or box[3] > self.img.size[1]:
raise errors.RectangleError("Region out-of-bounds")
self.img = self.img.crop(box)
return self | [
"def",
"region",
"(",
"self",
",",
"rect",
")",
":",
"box",
"=",
"(",
"int",
"(",
"rect",
"[",
"0",
"]",
")",
",",
"int",
"(",
"rect",
"[",
"1",
"]",
")",
",",
"int",
"(",
"rect",
"[",
"0",
"]",
")",
"+",
"int",
"(",
"rect",
"[",
"2",
"... | Selects a sub-region of the image using the supplied rectangle,
x, y, width, height. | [
"Selects",
"a",
"sub",
"-",
"region",
"of",
"the",
"image",
"using",
"the",
"supplied",
"rectangle",
"x",
"y",
"width",
"height",
"."
] | 8b1d154436fd1b9f9740925549793561c58d4400 | https://github.com/agschwender/pilbox/blob/8b1d154436fd1b9f9740925549793561c58d4400/pilbox/image.py#L171-L180 | train | 211,164 |
agschwender/pilbox | pilbox/signature.py | derive_signature | def derive_signature(key, qs):
"""Derives the signature from the supplied query string using the key."""
key, qs = (key or "", qs or "")
return hmac.new(key.encode(), qs.encode(), hashlib.sha1).hexdigest() | python | def derive_signature(key, qs):
"""Derives the signature from the supplied query string using the key."""
key, qs = (key or "", qs or "")
return hmac.new(key.encode(), qs.encode(), hashlib.sha1).hexdigest() | [
"def",
"derive_signature",
"(",
"key",
",",
"qs",
")",
":",
"key",
",",
"qs",
"=",
"(",
"key",
"or",
"\"\"",
",",
"qs",
"or",
"\"\"",
")",
"return",
"hmac",
".",
"new",
"(",
"key",
".",
"encode",
"(",
")",
",",
"qs",
".",
"encode",
"(",
")",
... | Derives the signature from the supplied query string using the key. | [
"Derives",
"the",
"signature",
"from",
"the",
"supplied",
"query",
"string",
"using",
"the",
"key",
"."
] | 8b1d154436fd1b9f9740925549793561c58d4400 | https://github.com/agschwender/pilbox/blob/8b1d154436fd1b9f9740925549793561c58d4400/pilbox/signature.py#L35-L38 | train | 211,165 |
agschwender/pilbox | pilbox/signature.py | sign | def sign(key, qs):
"""Signs the query string using the key."""
sig = derive_signature(key, qs)
return "%s&%s" % (qs, urlencode([("sig", sig)])) | python | def sign(key, qs):
"""Signs the query string using the key."""
sig = derive_signature(key, qs)
return "%s&%s" % (qs, urlencode([("sig", sig)])) | [
"def",
"sign",
"(",
"key",
",",
"qs",
")",
":",
"sig",
"=",
"derive_signature",
"(",
"key",
",",
"qs",
")",
"return",
"\"%s&%s\"",
"%",
"(",
"qs",
",",
"urlencode",
"(",
"[",
"(",
"\"sig\"",
",",
"sig",
")",
"]",
")",
")"
] | Signs the query string using the key. | [
"Signs",
"the",
"query",
"string",
"using",
"the",
"key",
"."
] | 8b1d154436fd1b9f9740925549793561c58d4400 | https://github.com/agschwender/pilbox/blob/8b1d154436fd1b9f9740925549793561c58d4400/pilbox/signature.py#L41-L44 | train | 211,166 |
agschwender/pilbox | pilbox/signature.py | verify_signature | def verify_signature(key, qs):
"""Verifies that the signature in the query string is correct."""
unsigned_qs = re.sub(r'&?sig=[^&]*', '', qs)
sig = derive_signature(key, unsigned_qs)
return urlparse.parse_qs(qs).get("sig", [None])[0] == sig | python | def verify_signature(key, qs):
"""Verifies that the signature in the query string is correct."""
unsigned_qs = re.sub(r'&?sig=[^&]*', '', qs)
sig = derive_signature(key, unsigned_qs)
return urlparse.parse_qs(qs).get("sig", [None])[0] == sig | [
"def",
"verify_signature",
"(",
"key",
",",
"qs",
")",
":",
"unsigned_qs",
"=",
"re",
".",
"sub",
"(",
"r'&?sig=[^&]*'",
",",
"''",
",",
"qs",
")",
"sig",
"=",
"derive_signature",
"(",
"key",
",",
"unsigned_qs",
")",
"return",
"urlparse",
".",
"parse_qs"... | Verifies that the signature in the query string is correct. | [
"Verifies",
"that",
"the",
"signature",
"in",
"the",
"query",
"string",
"is",
"correct",
"."
] | 8b1d154436fd1b9f9740925549793561c58d4400 | https://github.com/agschwender/pilbox/blob/8b1d154436fd1b9f9740925549793561c58d4400/pilbox/signature.py#L47-L51 | train | 211,167 |
gwpy/gwpy | gwpy/signal/window.py | canonical_name | def canonical_name(name):
"""Find the canonical name for the given window in scipy.signal
Parameters
----------
name : `str`
the name of the window you want
Returns
-------
realname : `str`
the name of the window as implemented in `scipy.signal.window`
Raises
-------
ValueError
if ``name`` cannot be resolved to a window function in `scipy.signal`
Examples
--------
>>> from gwpy.signal.window import canonical_name
>>> canonical_name('hanning')
'hann'
>>> canonical_name('ksr')
'kaiser'
"""
if name.lower() == 'planck': # make sure to handle the Planck window
return 'planck'
try: # use equivalence introduced in scipy 0.16.0
# pylint: disable=protected-access
return scipy_windows._win_equiv[name.lower()].__name__
except AttributeError: # old scipy
try:
return getattr(scipy_windows, name.lower()).__name__
except AttributeError: # no match
pass # raise later
except KeyError: # no match
pass # raise later
raise ValueError('no window function in scipy.signal equivalent to %r'
% name,) | python | def canonical_name(name):
"""Find the canonical name for the given window in scipy.signal
Parameters
----------
name : `str`
the name of the window you want
Returns
-------
realname : `str`
the name of the window as implemented in `scipy.signal.window`
Raises
-------
ValueError
if ``name`` cannot be resolved to a window function in `scipy.signal`
Examples
--------
>>> from gwpy.signal.window import canonical_name
>>> canonical_name('hanning')
'hann'
>>> canonical_name('ksr')
'kaiser'
"""
if name.lower() == 'planck': # make sure to handle the Planck window
return 'planck'
try: # use equivalence introduced in scipy 0.16.0
# pylint: disable=protected-access
return scipy_windows._win_equiv[name.lower()].__name__
except AttributeError: # old scipy
try:
return getattr(scipy_windows, name.lower()).__name__
except AttributeError: # no match
pass # raise later
except KeyError: # no match
pass # raise later
raise ValueError('no window function in scipy.signal equivalent to %r'
% name,) | [
"def",
"canonical_name",
"(",
"name",
")",
":",
"if",
"name",
".",
"lower",
"(",
")",
"==",
"'planck'",
":",
"# make sure to handle the Planck window",
"return",
"'planck'",
"try",
":",
"# use equivalence introduced in scipy 0.16.0",
"# pylint: disable=protected-access",
... | Find the canonical name for the given window in scipy.signal
Parameters
----------
name : `str`
the name of the window you want
Returns
-------
realname : `str`
the name of the window as implemented in `scipy.signal.window`
Raises
-------
ValueError
if ``name`` cannot be resolved to a window function in `scipy.signal`
Examples
--------
>>> from gwpy.signal.window import canonical_name
>>> canonical_name('hanning')
'hann'
>>> canonical_name('ksr')
'kaiser' | [
"Find",
"the",
"canonical",
"name",
"for",
"the",
"given",
"window",
"in",
"scipy",
".",
"signal"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/window.py#L36-L76 | train | 211,168 |
gwpy/gwpy | gwpy/signal/window.py | recommended_overlap | def recommended_overlap(name, nfft=None):
"""Returns the recommended fractional overlap for the given window
If ``nfft`` is given, the return is in samples
Parameters
----------
name : `str`
the name of the window you are using
nfft : `int`, optional
the length of the window
Returns
-------
rov : `float`, `int`
the recommended overlap (ROV) for the given window, in samples if
``nfft` is given (`int`), otherwise fractional (`float`)
Examples
--------
>>> from gwpy.signal.window import recommended_overlap
>>> recommended_overlap('hann')
0.5
>>> recommended_overlap('blackmanharris', nfft=128)
85
"""
try:
name = canonical_name(name)
except KeyError as exc:
raise ValueError(str(exc))
try:
rov = ROV[name]
except KeyError:
raise ValueError("no recommended overlap for %r window" % name)
if nfft:
return int(ceil(nfft * rov))
return rov | python | def recommended_overlap(name, nfft=None):
"""Returns the recommended fractional overlap for the given window
If ``nfft`` is given, the return is in samples
Parameters
----------
name : `str`
the name of the window you are using
nfft : `int`, optional
the length of the window
Returns
-------
rov : `float`, `int`
the recommended overlap (ROV) for the given window, in samples if
``nfft` is given (`int`), otherwise fractional (`float`)
Examples
--------
>>> from gwpy.signal.window import recommended_overlap
>>> recommended_overlap('hann')
0.5
>>> recommended_overlap('blackmanharris', nfft=128)
85
"""
try:
name = canonical_name(name)
except KeyError as exc:
raise ValueError(str(exc))
try:
rov = ROV[name]
except KeyError:
raise ValueError("no recommended overlap for %r window" % name)
if nfft:
return int(ceil(nfft * rov))
return rov | [
"def",
"recommended_overlap",
"(",
"name",
",",
"nfft",
"=",
"None",
")",
":",
"try",
":",
"name",
"=",
"canonical_name",
"(",
"name",
")",
"except",
"KeyError",
"as",
"exc",
":",
"raise",
"ValueError",
"(",
"str",
"(",
"exc",
")",
")",
"try",
":",
"... | Returns the recommended fractional overlap for the given window
If ``nfft`` is given, the return is in samples
Parameters
----------
name : `str`
the name of the window you are using
nfft : `int`, optional
the length of the window
Returns
-------
rov : `float`, `int`
the recommended overlap (ROV) for the given window, in samples if
``nfft` is given (`int`), otherwise fractional (`float`)
Examples
--------
>>> from gwpy.signal.window import recommended_overlap
>>> recommended_overlap('hann')
0.5
>>> recommended_overlap('blackmanharris', nfft=128)
85 | [
"Returns",
"the",
"recommended",
"fractional",
"overlap",
"for",
"the",
"given",
"window"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/window.py#L95-L132 | train | 211,169 |
gwpy/gwpy | gwpy/signal/window.py | planck | def planck(N, nleft=0, nright=0):
"""Return a Planck taper window.
Parameters
----------
N : `int`
Number of samples in the output window
nleft : `int`, optional
Number of samples to taper on the left, should be less than `N/2`
nright : `int`, optional
Number of samples to taper on the right, should be less than `N/2`
Returns
-------
w : `ndarray`
The window, with the maximum value normalized to 1 and at least one
end tapered smoothly to 0.
Examples
--------
To taper 0.1 seconds on both ends of one second of data sampled at 2048 Hz:
>>> from gwpy.signal.window import planck
>>> w = planck(2048, nleft=205, nright=205)
References
----------
.. [1] McKechan, D.J.A., Robinson, C., and Sathyaprakash, B.S. (April
2010). "A tapering window for time-domain templates and simulated
signals in the detection of gravitational waves from coalescing
compact binaries". Classical and Quantum Gravity 27 (8).
:doi:`10.1088/0264-9381/27/8/084020`
.. [2] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function#Planck-taper_window
"""
# construct a Planck taper window
w = numpy.ones(N)
if nleft:
w[0] *= 0
zleft = numpy.array([nleft * (1./k + 1./(k-nleft))
for k in range(1, nleft)])
w[1:nleft] *= expit(-zleft)
if nright:
w[N-1] *= 0
zright = numpy.array([-nright * (1./(k-nright) + 1./k)
for k in range(1, nright)])
w[N-nright:N-1] *= expit(-zright)
return w | python | def planck(N, nleft=0, nright=0):
"""Return a Planck taper window.
Parameters
----------
N : `int`
Number of samples in the output window
nleft : `int`, optional
Number of samples to taper on the left, should be less than `N/2`
nright : `int`, optional
Number of samples to taper on the right, should be less than `N/2`
Returns
-------
w : `ndarray`
The window, with the maximum value normalized to 1 and at least one
end tapered smoothly to 0.
Examples
--------
To taper 0.1 seconds on both ends of one second of data sampled at 2048 Hz:
>>> from gwpy.signal.window import planck
>>> w = planck(2048, nleft=205, nright=205)
References
----------
.. [1] McKechan, D.J.A., Robinson, C., and Sathyaprakash, B.S. (April
2010). "A tapering window for time-domain templates and simulated
signals in the detection of gravitational waves from coalescing
compact binaries". Classical and Quantum Gravity 27 (8).
:doi:`10.1088/0264-9381/27/8/084020`
.. [2] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function#Planck-taper_window
"""
# construct a Planck taper window
w = numpy.ones(N)
if nleft:
w[0] *= 0
zleft = numpy.array([nleft * (1./k + 1./(k-nleft))
for k in range(1, nleft)])
w[1:nleft] *= expit(-zleft)
if nright:
w[N-1] *= 0
zright = numpy.array([-nright * (1./(k-nright) + 1./k)
for k in range(1, nright)])
w[N-nright:N-1] *= expit(-zright)
return w | [
"def",
"planck",
"(",
"N",
",",
"nleft",
"=",
"0",
",",
"nright",
"=",
"0",
")",
":",
"# construct a Planck taper window",
"w",
"=",
"numpy",
".",
"ones",
"(",
"N",
")",
"if",
"nleft",
":",
"w",
"[",
"0",
"]",
"*=",
"0",
"zleft",
"=",
"numpy",
".... | Return a Planck taper window.
Parameters
----------
N : `int`
Number of samples in the output window
nleft : `int`, optional
Number of samples to taper on the left, should be less than `N/2`
nright : `int`, optional
Number of samples to taper on the right, should be less than `N/2`
Returns
-------
w : `ndarray`
The window, with the maximum value normalized to 1 and at least one
end tapered smoothly to 0.
Examples
--------
To taper 0.1 seconds on both ends of one second of data sampled at 2048 Hz:
>>> from gwpy.signal.window import planck
>>> w = planck(2048, nleft=205, nright=205)
References
----------
.. [1] McKechan, D.J.A., Robinson, C., and Sathyaprakash, B.S. (April
2010). "A tapering window for time-domain templates and simulated
signals in the detection of gravitational waves from coalescing
compact binaries". Classical and Quantum Gravity 27 (8).
:doi:`10.1088/0264-9381/27/8/084020`
.. [2] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function#Planck-taper_window | [
"Return",
"a",
"Planck",
"taper",
"window",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/signal/window.py#L139-L189 | train | 211,170 |
gwpy/gwpy | gwpy/utils/env.py | bool_env | def bool_env(key, default=False):
"""Parse an environment variable as a boolean switch
`True` is returned if the variable value matches one of the following:
- ``'1'``
- ``'y'``
- ``'yes'``
- ``'true'``
The match is case-insensitive (so ``'Yes'`` will match as `True`)
Parameters
----------
key : `str`
the name of the environment variable to find
default : `bool`
the default return value if the key is not found
Returns
-------
True
if the environment variable matches as 'yes' or similar
False
otherwise
Examples
--------
>>> import os
>>> from gwpy.utils.env import bool_env
>>> os.environ['GWPY_VALUE'] = 'yes'
>>> print(bool_env('GWPY_VALUE'))
True
>>> os.environ['GWPY_VALUE'] = 'something else'
>>> print(bool_env('GWPY_VALUE'))
False
>>> print(bool_env('GWPY_VALUE2'))
False
"""
try:
return os.environ[key].lower() in TRUE
except KeyError:
return default | python | def bool_env(key, default=False):
"""Parse an environment variable as a boolean switch
`True` is returned if the variable value matches one of the following:
- ``'1'``
- ``'y'``
- ``'yes'``
- ``'true'``
The match is case-insensitive (so ``'Yes'`` will match as `True`)
Parameters
----------
key : `str`
the name of the environment variable to find
default : `bool`
the default return value if the key is not found
Returns
-------
True
if the environment variable matches as 'yes' or similar
False
otherwise
Examples
--------
>>> import os
>>> from gwpy.utils.env import bool_env
>>> os.environ['GWPY_VALUE'] = 'yes'
>>> print(bool_env('GWPY_VALUE'))
True
>>> os.environ['GWPY_VALUE'] = 'something else'
>>> print(bool_env('GWPY_VALUE'))
False
>>> print(bool_env('GWPY_VALUE2'))
False
"""
try:
return os.environ[key].lower() in TRUE
except KeyError:
return default | [
"def",
"bool_env",
"(",
"key",
",",
"default",
"=",
"False",
")",
":",
"try",
":",
"return",
"os",
".",
"environ",
"[",
"key",
"]",
".",
"lower",
"(",
")",
"in",
"TRUE",
"except",
"KeyError",
":",
"return",
"default"
] | Parse an environment variable as a boolean switch
`True` is returned if the variable value matches one of the following:
- ``'1'``
- ``'y'``
- ``'yes'``
- ``'true'``
The match is case-insensitive (so ``'Yes'`` will match as `True`)
Parameters
----------
key : `str`
the name of the environment variable to find
default : `bool`
the default return value if the key is not found
Returns
-------
True
if the environment variable matches as 'yes' or similar
False
otherwise
Examples
--------
>>> import os
>>> from gwpy.utils.env import bool_env
>>> os.environ['GWPY_VALUE'] = 'yes'
>>> print(bool_env('GWPY_VALUE'))
True
>>> os.environ['GWPY_VALUE'] = 'something else'
>>> print(bool_env('GWPY_VALUE'))
False
>>> print(bool_env('GWPY_VALUE2'))
False | [
"Parse",
"an",
"environment",
"variable",
"as",
"a",
"boolean",
"switch"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/env.py#L34-L77 | train | 211,171 |
gwpy/gwpy | gwpy/utils/shell.py | call | def call(cmd, stdout=PIPE, stderr=PIPE, on_error='raise', **kwargs):
"""Call out to the shell using `subprocess.Popen`
Parameters
----------
stdout : `file-like`, optional
stream for stdout
stderr : `file-like`, optional
stderr for stderr
on_error : `str`, optional
what to do when the command fails, one of
- 'ignore' - do nothing
- 'warn' - print a warning
- 'raise' - raise an exception
**kwargs
other keyword arguments to pass to `subprocess.Popen`
Returns
-------
out : `str`
the output stream of the command
err : `str`
the error stream from the command
Raises
------
OSError
if `cmd` is a `str` (or `shell=True` is passed) and the executable
is not found
subprocess.CalledProcessError
if the command fails otherwise
"""
if isinstance(cmd, (list, tuple)):
cmdstr = ' '.join(cmd)
kwargs.setdefault('shell', False)
else:
cmdstr = str(cmd)
kwargs.setdefault('shell', True)
proc = Popen(cmd, stdout=stdout, stderr=stderr, **kwargs)
out, err = proc.communicate()
if proc.returncode:
if on_error == 'ignore':
pass
elif on_error == 'warn':
e = CalledProcessError(proc.returncode, cmdstr)
warnings.warn(str(e))
else:
raise CalledProcessError(proc.returncode, cmdstr)
return out.decode('utf-8'), err.decode('utf-8') | python | def call(cmd, stdout=PIPE, stderr=PIPE, on_error='raise', **kwargs):
"""Call out to the shell using `subprocess.Popen`
Parameters
----------
stdout : `file-like`, optional
stream for stdout
stderr : `file-like`, optional
stderr for stderr
on_error : `str`, optional
what to do when the command fails, one of
- 'ignore' - do nothing
- 'warn' - print a warning
- 'raise' - raise an exception
**kwargs
other keyword arguments to pass to `subprocess.Popen`
Returns
-------
out : `str`
the output stream of the command
err : `str`
the error stream from the command
Raises
------
OSError
if `cmd` is a `str` (or `shell=True` is passed) and the executable
is not found
subprocess.CalledProcessError
if the command fails otherwise
"""
if isinstance(cmd, (list, tuple)):
cmdstr = ' '.join(cmd)
kwargs.setdefault('shell', False)
else:
cmdstr = str(cmd)
kwargs.setdefault('shell', True)
proc = Popen(cmd, stdout=stdout, stderr=stderr, **kwargs)
out, err = proc.communicate()
if proc.returncode:
if on_error == 'ignore':
pass
elif on_error == 'warn':
e = CalledProcessError(proc.returncode, cmdstr)
warnings.warn(str(e))
else:
raise CalledProcessError(proc.returncode, cmdstr)
return out.decode('utf-8'), err.decode('utf-8') | [
"def",
"call",
"(",
"cmd",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"PIPE",
",",
"on_error",
"=",
"'raise'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"isinstance",
"(",
"cmd",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"cmdstr",
"=",
... | Call out to the shell using `subprocess.Popen`
Parameters
----------
stdout : `file-like`, optional
stream for stdout
stderr : `file-like`, optional
stderr for stderr
on_error : `str`, optional
what to do when the command fails, one of
- 'ignore' - do nothing
- 'warn' - print a warning
- 'raise' - raise an exception
**kwargs
other keyword arguments to pass to `subprocess.Popen`
Returns
-------
out : `str`
the output stream of the command
err : `str`
the error stream from the command
Raises
------
OSError
if `cmd` is a `str` (or `shell=True` is passed) and the executable
is not found
subprocess.CalledProcessError
if the command fails otherwise | [
"Call",
"out",
"to",
"the",
"shell",
"using",
"subprocess",
".",
"Popen"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/shell.py#L53-L105 | train | 211,172 |
gwpy/gwpy | gwpy/table/io/utils.py | read_with_columns | def read_with_columns(func):
"""Decorate a Table read method to use the ``columns`` keyword
"""
def wrapper(*args, **kwargs):
# parse columns argument
columns = kwargs.pop("columns", None)
# read table
tab = func(*args, **kwargs)
# filter on columns
if columns is None:
return tab
return tab[columns]
return _safe_wraps(wrapper, func) | python | def read_with_columns(func):
"""Decorate a Table read method to use the ``columns`` keyword
"""
def wrapper(*args, **kwargs):
# parse columns argument
columns = kwargs.pop("columns", None)
# read table
tab = func(*args, **kwargs)
# filter on columns
if columns is None:
return tab
return tab[columns]
return _safe_wraps(wrapper, func) | [
"def",
"read_with_columns",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# parse columns argument",
"columns",
"=",
"kwargs",
".",
"pop",
"(",
"\"columns\"",
",",
"None",
")",
"# read table",
"tab",
"=",
... | Decorate a Table read method to use the ``columns`` keyword | [
"Decorate",
"a",
"Table",
"read",
"method",
"to",
"use",
"the",
"columns",
"keyword"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/utils.py#L39-L54 | train | 211,173 |
gwpy/gwpy | gwpy/table/io/utils.py | read_with_selection | def read_with_selection(func):
"""Decorate a Table read method to apply ``selection`` keyword
"""
def wrapper(*args, **kwargs):
"""Execute a function, then apply a selection filter
"""
# parse selection
selection = kwargs.pop('selection', None) or []
# read table
tab = func(*args, **kwargs)
# apply selection
if selection:
return filter_table(tab, selection)
return tab
return _safe_wraps(wrapper, func) | python | def read_with_selection(func):
"""Decorate a Table read method to apply ``selection`` keyword
"""
def wrapper(*args, **kwargs):
"""Execute a function, then apply a selection filter
"""
# parse selection
selection = kwargs.pop('selection', None) or []
# read table
tab = func(*args, **kwargs)
# apply selection
if selection:
return filter_table(tab, selection)
return tab
return _safe_wraps(wrapper, func) | [
"def",
"read_with_selection",
"(",
"func",
")",
":",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Execute a function, then apply a selection filter\n \"\"\"",
"# parse selection",
"selection",
"=",
"kwargs",
".",
"pop",
"(",
"... | Decorate a Table read method to apply ``selection`` keyword | [
"Decorate",
"a",
"Table",
"read",
"method",
"to",
"apply",
"selection",
"keyword"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/utils.py#L57-L75 | train | 211,174 |
gwpy/gwpy | gwpy/table/io/utils.py | decorate_registered_reader | def decorate_registered_reader(
name,
data_class=EventTable,
columns=True,
selection=True,
):
"""Wrap an existing registered reader to use GWpy's input decorators
Parameters
----------
name : `str`
the name of the registered format
data_class : `type`, optional
the class for whom the format is registered
columns : `bool`, optional
use the `read_with_columns` decorator
selection : `bool`, optional
use the `read_with_selection` decorator
"""
reader = registry.get_reader(name, data_class)
wrapped = ( # noqa
read_with_columns( # use ``columns``
read_with_selection( # use ``selection``
reader
))
)
return registry.register_reader(name, data_class, wrapped, force=True) | python | def decorate_registered_reader(
name,
data_class=EventTable,
columns=True,
selection=True,
):
"""Wrap an existing registered reader to use GWpy's input decorators
Parameters
----------
name : `str`
the name of the registered format
data_class : `type`, optional
the class for whom the format is registered
columns : `bool`, optional
use the `read_with_columns` decorator
selection : `bool`, optional
use the `read_with_selection` decorator
"""
reader = registry.get_reader(name, data_class)
wrapped = ( # noqa
read_with_columns( # use ``columns``
read_with_selection( # use ``selection``
reader
))
)
return registry.register_reader(name, data_class, wrapped, force=True) | [
"def",
"decorate_registered_reader",
"(",
"name",
",",
"data_class",
"=",
"EventTable",
",",
"columns",
"=",
"True",
",",
"selection",
"=",
"True",
",",
")",
":",
"reader",
"=",
"registry",
".",
"get_reader",
"(",
"name",
",",
"data_class",
")",
"wrapped",
... | Wrap an existing registered reader to use GWpy's input decorators
Parameters
----------
name : `str`
the name of the registered format
data_class : `type`, optional
the class for whom the format is registered
columns : `bool`, optional
use the `read_with_columns` decorator
selection : `bool`, optional
use the `read_with_selection` decorator | [
"Wrap",
"an",
"existing",
"registered",
"reader",
"to",
"use",
"GWpy",
"s",
"input",
"decorators"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/utils.py#L83-L112 | train | 211,175 |
gwpy/gwpy | gwpy/table/io/root.py | table_from_root | def table_from_root(source, treename=None, columns=None, **kwargs):
"""Read a Table from a ROOT tree
"""
import root_numpy
# parse column filters into tree2array ``selection`` keyword
# NOTE: not all filters can be passed directly to root_numpy, so we store
# those separately and apply them after-the-fact before returning
try:
selection = kwargs.pop('selection')
except KeyError: # no filters
filters = None
else:
rootfilters = []
filters = []
for col, op_, value in parse_column_filters(selection):
try:
opstr = [key for key in OPERATORS if OPERATORS[key] is op_][0]
except (IndexError, KeyError): # cannot filter with root_numpy
filters.append((col, op_, value))
else: # can filter with root_numpy
rootfilters.append('{0} {1} {2!r}'.format(col, opstr, value))
kwargs['selection'] = ' && '.join(rootfilters)
# pass file name (not path)
if not isinstance(source, string_types):
source = source.name
# find single tree (if only one tree present)
if treename is None:
trees = root_numpy.list_trees(source)
if len(trees) == 1:
treename = trees[0]
elif not trees:
raise ValueError("No trees found in %s" % source)
else:
raise ValueError("Multiple trees found in %s, please select on "
"via the `treename` keyword argument, e.g. "
"`treename='events'`. Available trees are: %s."
% (source, ', '.join(map(repr, trees))))
# read, filter, and return
t = Table(root_numpy.root2array(
source,
treename,
branches=columns,
**kwargs
))
if filters:
return filter_table(t, *filters)
return t | python | def table_from_root(source, treename=None, columns=None, **kwargs):
"""Read a Table from a ROOT tree
"""
import root_numpy
# parse column filters into tree2array ``selection`` keyword
# NOTE: not all filters can be passed directly to root_numpy, so we store
# those separately and apply them after-the-fact before returning
try:
selection = kwargs.pop('selection')
except KeyError: # no filters
filters = None
else:
rootfilters = []
filters = []
for col, op_, value in parse_column_filters(selection):
try:
opstr = [key for key in OPERATORS if OPERATORS[key] is op_][0]
except (IndexError, KeyError): # cannot filter with root_numpy
filters.append((col, op_, value))
else: # can filter with root_numpy
rootfilters.append('{0} {1} {2!r}'.format(col, opstr, value))
kwargs['selection'] = ' && '.join(rootfilters)
# pass file name (not path)
if not isinstance(source, string_types):
source = source.name
# find single tree (if only one tree present)
if treename is None:
trees = root_numpy.list_trees(source)
if len(trees) == 1:
treename = trees[0]
elif not trees:
raise ValueError("No trees found in %s" % source)
else:
raise ValueError("Multiple trees found in %s, please select on "
"via the `treename` keyword argument, e.g. "
"`treename='events'`. Available trees are: %s."
% (source, ', '.join(map(repr, trees))))
# read, filter, and return
t = Table(root_numpy.root2array(
source,
treename,
branches=columns,
**kwargs
))
if filters:
return filter_table(t, *filters)
return t | [
"def",
"table_from_root",
"(",
"source",
",",
"treename",
"=",
"None",
",",
"columns",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"root_numpy",
"# parse column filters into tree2array ``selection`` keyword",
"# NOTE: not all filters can be passed directly to ... | Read a Table from a ROOT tree | [
"Read",
"a",
"Table",
"from",
"a",
"ROOT",
"tree"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/root.py#L32-L82 | train | 211,176 |
gwpy/gwpy | gwpy/table/io/root.py | table_to_root | def table_to_root(table, filename, **kwargs):
"""Write a Table to a ROOT file
"""
import root_numpy
root_numpy.array2root(table.as_array(), filename, **kwargs) | python | def table_to_root(table, filename, **kwargs):
"""Write a Table to a ROOT file
"""
import root_numpy
root_numpy.array2root(table.as_array(), filename, **kwargs) | [
"def",
"table_to_root",
"(",
"table",
",",
"filename",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"root_numpy",
"root_numpy",
".",
"array2root",
"(",
"table",
".",
"as_array",
"(",
")",
",",
"filename",
",",
"*",
"*",
"kwargs",
")"
] | Write a Table to a ROOT file | [
"Write",
"a",
"Table",
"to",
"a",
"ROOT",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/table/io/root.py#L85-L89 | train | 211,177 |
gwpy/gwpy | gwpy/plot/gps.py | _gps_scale_factory | def _gps_scale_factory(unit):
"""Construct a GPSScale for this unit
"""
class FixedGPSScale(GPSScale):
"""`GPSScale` for a specific GPS time unit
"""
name = str('{0}s'.format(unit.long_names[0] if unit.long_names else
unit.names[0]))
def __init__(self, axis, epoch=None):
"""
"""
super(FixedGPSScale, self).__init__(axis, epoch=epoch, unit=unit)
return FixedGPSScale | python | def _gps_scale_factory(unit):
"""Construct a GPSScale for this unit
"""
class FixedGPSScale(GPSScale):
"""`GPSScale` for a specific GPS time unit
"""
name = str('{0}s'.format(unit.long_names[0] if unit.long_names else
unit.names[0]))
def __init__(self, axis, epoch=None):
"""
"""
super(FixedGPSScale, self).__init__(axis, epoch=epoch, unit=unit)
return FixedGPSScale | [
"def",
"_gps_scale_factory",
"(",
"unit",
")",
":",
"class",
"FixedGPSScale",
"(",
"GPSScale",
")",
":",
"\"\"\"`GPSScale` for a specific GPS time unit\n \"\"\"",
"name",
"=",
"str",
"(",
"'{0}s'",
".",
"format",
"(",
"unit",
".",
"long_names",
"[",
"0",
"]... | Construct a GPSScale for this unit | [
"Construct",
"a",
"GPSScale",
"for",
"this",
"unit"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/gps.py#L470-L483 | train | 211,178 |
gwpy/gwpy | gwpy/plot/gps.py | GPSMixin.set_epoch | def set_epoch(self, epoch):
"""Set the GPS epoch
"""
if epoch is None:
self._epoch = None
return
if isinstance(epoch, (Number, Decimal)):
self._epoch = float(epoch)
else:
self._epoch = float(to_gps(epoch)) | python | def set_epoch(self, epoch):
"""Set the GPS epoch
"""
if epoch is None:
self._epoch = None
return
if isinstance(epoch, (Number, Decimal)):
self._epoch = float(epoch)
else:
self._epoch = float(to_gps(epoch)) | [
"def",
"set_epoch",
"(",
"self",
",",
"epoch",
")",
":",
"if",
"epoch",
"is",
"None",
":",
"self",
".",
"_epoch",
"=",
"None",
"return",
"if",
"isinstance",
"(",
"epoch",
",",
"(",
"Number",
",",
"Decimal",
")",
")",
":",
"self",
".",
"_epoch",
"="... | Set the GPS epoch | [
"Set",
"the",
"GPS",
"epoch"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/gps.py#L99-L108 | train | 211,179 |
gwpy/gwpy | gwpy/plot/gps.py | GPSMixin.set_unit | def set_unit(self, unit):
"""Set the GPS step scale
"""
# accept all core time units
if unit is None or (isinstance(unit, units.NamedUnit) and
unit.physical_type == 'time'):
self._unit = unit
return
# convert float to custom unit in seconds
if isinstance(unit, Number):
unit = units.Unit(unit * units.second)
# otherwise, should be able to convert to a time unit
try:
unit = units.Unit(unit)
except ValueError as exc:
# catch annoying plurals
try:
unit = units.Unit(str(unit).rstrip('s'))
except ValueError:
raise exc
# decompose and check that it's actually a time unit
dec = unit.decompose()
if dec.bases != [units.second]:
raise ValueError("Cannot set GPS unit to %s" % unit)
# check equivalent units
for other in TIME_UNITS:
if other.decompose().scale == dec.scale:
self._unit = other
return
raise ValueError("Unrecognised unit: %s" % unit) | python | def set_unit(self, unit):
"""Set the GPS step scale
"""
# accept all core time units
if unit is None or (isinstance(unit, units.NamedUnit) and
unit.physical_type == 'time'):
self._unit = unit
return
# convert float to custom unit in seconds
if isinstance(unit, Number):
unit = units.Unit(unit * units.second)
# otherwise, should be able to convert to a time unit
try:
unit = units.Unit(unit)
except ValueError as exc:
# catch annoying plurals
try:
unit = units.Unit(str(unit).rstrip('s'))
except ValueError:
raise exc
# decompose and check that it's actually a time unit
dec = unit.decompose()
if dec.bases != [units.second]:
raise ValueError("Cannot set GPS unit to %s" % unit)
# check equivalent units
for other in TIME_UNITS:
if other.decompose().scale == dec.scale:
self._unit = other
return
raise ValueError("Unrecognised unit: %s" % unit) | [
"def",
"set_unit",
"(",
"self",
",",
"unit",
")",
":",
"# accept all core time units",
"if",
"unit",
"is",
"None",
"or",
"(",
"isinstance",
"(",
"unit",
",",
"units",
".",
"NamedUnit",
")",
"and",
"unit",
".",
"physical_type",
"==",
"'time'",
")",
":",
"... | Set the GPS step scale | [
"Set",
"the",
"GPS",
"step",
"scale"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/gps.py#L118-L147 | train | 211,180 |
gwpy/gwpy | gwpy/plot/gps.py | GPSMixin.get_unit_name | def get_unit_name(self):
"""Returns the name of the unit for this GPS scale
Note that this returns a simply-pluralised version of the name.
"""
if not self.unit:
return None
name = sorted(self.unit.names, key=len)[-1]
return '%ss' % name | python | def get_unit_name(self):
"""Returns the name of the unit for this GPS scale
Note that this returns a simply-pluralised version of the name.
"""
if not self.unit:
return None
name = sorted(self.unit.names, key=len)[-1]
return '%ss' % name | [
"def",
"get_unit_name",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"unit",
":",
"return",
"None",
"name",
"=",
"sorted",
"(",
"self",
".",
"unit",
".",
"names",
",",
"key",
"=",
"len",
")",
"[",
"-",
"1",
"]",
"return",
"'%ss'",
"%",
"name"... | Returns the name of the unit for this GPS scale
Note that this returns a simply-pluralised version of the name. | [
"Returns",
"the",
"name",
"of",
"the",
"unit",
"for",
"this",
"GPS",
"scale"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/gps.py#L152-L160 | train | 211,181 |
gwpy/gwpy | gwpy/plot/gps.py | GPSTransformBase.transform_non_affine | def transform_non_affine(self, values):
"""Transform an array of GPS times.
This method is designed to filter out transformations that will
generate text elements that require exact precision, and use
`Decimal` objects to do the transformation, and simple `float`
otherwise.
"""
scale = self.scale or 1
epoch = self.epoch or 0
values = numpy.asarray(values)
# handle simple or data transformations with floats
if self._parents or ( # part of composite transform (from draw())
epoch == 0 and # no large additions
scale == 1 # no multiplications
):
return self._transform(values, float(epoch), float(scale))
# otherwise do things carefully (and slowly) with Decimals
# -- ideally this only gets called for transforming tick positions
flat = values.flatten()
def _trans(x):
return self._transform_decimal(x, epoch, scale)
return numpy.asarray(list(map(_trans, flat))).reshape(values.shape) | python | def transform_non_affine(self, values):
"""Transform an array of GPS times.
This method is designed to filter out transformations that will
generate text elements that require exact precision, and use
`Decimal` objects to do the transformation, and simple `float`
otherwise.
"""
scale = self.scale or 1
epoch = self.epoch or 0
values = numpy.asarray(values)
# handle simple or data transformations with floats
if self._parents or ( # part of composite transform (from draw())
epoch == 0 and # no large additions
scale == 1 # no multiplications
):
return self._transform(values, float(epoch), float(scale))
# otherwise do things carefully (and slowly) with Decimals
# -- ideally this only gets called for transforming tick positions
flat = values.flatten()
def _trans(x):
return self._transform_decimal(x, epoch, scale)
return numpy.asarray(list(map(_trans, flat))).reshape(values.shape) | [
"def",
"transform_non_affine",
"(",
"self",
",",
"values",
")",
":",
"scale",
"=",
"self",
".",
"scale",
"or",
"1",
"epoch",
"=",
"self",
".",
"epoch",
"or",
"0",
"values",
"=",
"numpy",
".",
"asarray",
"(",
"values",
")",
"# handle simple or data transfor... | Transform an array of GPS times.
This method is designed to filter out transformations that will
generate text elements that require exact precision, and use
`Decimal` objects to do the transformation, and simple `float`
otherwise. | [
"Transform",
"an",
"array",
"of",
"GPS",
"times",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/plot/gps.py#L196-L223 | train | 211,182 |
gwpy/gwpy | gwpy/utils/decorators.py | deprecated_function | def deprecated_function(func, warning=DEPRECATED_FUNCTION_WARNING):
"""Adds a `DeprecationWarning` to a function
Parameters
----------
func : `callable`
the function to decorate with a `DeprecationWarning`
warning : `str`, optional
the warning to present
Notes
-----
The final warning message is formatted as ``warning.format(func)``
so you can use attribute references to the function itself.
See the default message as an example.
"""
@wraps(func)
def wrapped_func(*args, **kwargs):
warnings.warn(
DEPRECATED_FUNCTION_WARNING.format(func),
category=DeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapped_func | python | def deprecated_function(func, warning=DEPRECATED_FUNCTION_WARNING):
"""Adds a `DeprecationWarning` to a function
Parameters
----------
func : `callable`
the function to decorate with a `DeprecationWarning`
warning : `str`, optional
the warning to present
Notes
-----
The final warning message is formatted as ``warning.format(func)``
so you can use attribute references to the function itself.
See the default message as an example.
"""
@wraps(func)
def wrapped_func(*args, **kwargs):
warnings.warn(
DEPRECATED_FUNCTION_WARNING.format(func),
category=DeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapped_func | [
"def",
"deprecated_function",
"(",
"func",
",",
"warning",
"=",
"DEPRECATED_FUNCTION_WARNING",
")",
":",
"@",
"wraps",
"(",
"func",
")",
"def",
"wrapped_func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"warnings",
".",
"warn",
"(",
"DEPRECATED_F... | Adds a `DeprecationWarning` to a function
Parameters
----------
func : `callable`
the function to decorate with a `DeprecationWarning`
warning : `str`, optional
the warning to present
Notes
-----
The final warning message is formatted as ``warning.format(func)``
so you can use attribute references to the function itself.
See the default message as an example. | [
"Adds",
"a",
"DeprecationWarning",
"to",
"a",
"function"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/decorators.py#L64-L90 | train | 211,183 |
gwpy/gwpy | gwpy/utils/decorators.py | return_as | def return_as(returntype):
"""Decorator to cast return of function as the given type
Parameters
----------
returntype : `type`
the desired return type of the decorated function
"""
def decorator(func):
# @wraps(func) <- we can't use this as normal because it doesn't work
# on python < 3 for instance methods,
# see workaround below
def wrapped(*args, **kwargs):
result = func(*args, **kwargs)
try:
return returntype(result)
except (TypeError, ValueError) as exc:
exc.args = (
'failed to cast return from {0} as {1}: {2}'.format(
func.__name__, returntype.__name__, str(exc)),
)
raise
try:
return wraps(func)(wrapped)
except AttributeError: # python < 3.0.0
wrapped.__doc__ == func.__doc__
return wrapped
return decorator | python | def return_as(returntype):
"""Decorator to cast return of function as the given type
Parameters
----------
returntype : `type`
the desired return type of the decorated function
"""
def decorator(func):
# @wraps(func) <- we can't use this as normal because it doesn't work
# on python < 3 for instance methods,
# see workaround below
def wrapped(*args, **kwargs):
result = func(*args, **kwargs)
try:
return returntype(result)
except (TypeError, ValueError) as exc:
exc.args = (
'failed to cast return from {0} as {1}: {2}'.format(
func.__name__, returntype.__name__, str(exc)),
)
raise
try:
return wraps(func)(wrapped)
except AttributeError: # python < 3.0.0
wrapped.__doc__ == func.__doc__
return wrapped
return decorator | [
"def",
"return_as",
"(",
"returntype",
")",
":",
"def",
"decorator",
"(",
"func",
")",
":",
"# @wraps(func) <- we can't use this as normal because it doesn't work",
"# on python < 3 for instance methods,",
"# see workaround below",
"def",
"wrapped",
... | Decorator to cast return of function as the given type
Parameters
----------
returntype : `type`
the desired return type of the decorated function | [
"Decorator",
"to",
"cast",
"return",
"of",
"function",
"as",
"the",
"given",
"type"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/decorators.py#L93-L121 | train | 211,184 |
gwpy/gwpy | gwpy/utils/sphinx/zenodo.py | format_citations | def format_citations(zid, url='https://zenodo.org/', hits=10, tag_prefix='v'):
"""Query and format a citations page from Zenodo entries
Parameters
----------
zid : `int`, `str`
the Zenodo ID of the target record
url : `str`, optional
the base URL of the Zenodo host, defaults to ``https://zenodo.org``
hist : `int`, optional
the maximum number of hits to show, default: ``10``
tag_prefix : `str`, optional
the prefix for git tags. This is removed to generate the section
headers in the output RST
Returns
-------
rst : `str`
an RST-formatted string of DOI badges with URLs
"""
# query for metadata
url = ('{url}/api/records/?'
'page=1&'
'size={hits}&'
'q=conceptrecid:"{id}"&'
'sort=-version&'
'all_versions=True'.format(id=zid, url=url, hits=hits))
metadata = requests.get(url).json()
lines = []
for i, hit in enumerate(metadata['hits']['hits']):
version = hit['metadata']['version'][len(tag_prefix):]
lines.append('-' * len(version))
lines.append(version)
lines.append('-' * len(version))
lines.append('')
lines.append('.. image:: {badge}\n'
' :target: {doi}'.format(**hit['links']))
if i < hits - 1:
lines.append('')
return '\n'.join(lines) | python | def format_citations(zid, url='https://zenodo.org/', hits=10, tag_prefix='v'):
"""Query and format a citations page from Zenodo entries
Parameters
----------
zid : `int`, `str`
the Zenodo ID of the target record
url : `str`, optional
the base URL of the Zenodo host, defaults to ``https://zenodo.org``
hist : `int`, optional
the maximum number of hits to show, default: ``10``
tag_prefix : `str`, optional
the prefix for git tags. This is removed to generate the section
headers in the output RST
Returns
-------
rst : `str`
an RST-formatted string of DOI badges with URLs
"""
# query for metadata
url = ('{url}/api/records/?'
'page=1&'
'size={hits}&'
'q=conceptrecid:"{id}"&'
'sort=-version&'
'all_versions=True'.format(id=zid, url=url, hits=hits))
metadata = requests.get(url).json()
lines = []
for i, hit in enumerate(metadata['hits']['hits']):
version = hit['metadata']['version'][len(tag_prefix):]
lines.append('-' * len(version))
lines.append(version)
lines.append('-' * len(version))
lines.append('')
lines.append('.. image:: {badge}\n'
' :target: {doi}'.format(**hit['links']))
if i < hits - 1:
lines.append('')
return '\n'.join(lines) | [
"def",
"format_citations",
"(",
"zid",
",",
"url",
"=",
"'https://zenodo.org/'",
",",
"hits",
"=",
"10",
",",
"tag_prefix",
"=",
"'v'",
")",
":",
"# query for metadata",
"url",
"=",
"(",
"'{url}/api/records/?'",
"'page=1&'",
"'size={hits}&'",
"'q=conceptrecid:\"{id}... | Query and format a citations page from Zenodo entries
Parameters
----------
zid : `int`, `str`
the Zenodo ID of the target record
url : `str`, optional
the base URL of the Zenodo host, defaults to ``https://zenodo.org``
hist : `int`, optional
the maximum number of hits to show, default: ``10``
tag_prefix : `str`, optional
the prefix for git tags. This is removed to generate the section
headers in the output RST
Returns
-------
rst : `str`
an RST-formatted string of DOI badges with URLs | [
"Query",
"and",
"format",
"a",
"citations",
"page",
"from",
"Zenodo",
"entries"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/utils/sphinx/zenodo.py#L46-L90 | train | 211,185 |
gwpy/gwpy | gwpy/timeseries/io/gwf/framecpp.py | read | def read(source, channels, start=None, end=None, scaled=None, type=None,
series_class=TimeSeries):
# pylint: disable=redefined-builtin
"""Read a dict of series from one or more GWF files
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of cache file,
- `list` of paths.
channels : `~gwpy.detector.ChannelList`, `list`
a list of channels to read from the source.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional
GPS start time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
scaled : `bool`, optional
apply slope and bias calibration to ADC data.
type : `dict`, optional
a `dict` of ``(name, channel-type)`` pairs, where ``channel-type``
can be one of ``'adc'``, ``'proc'``, or ``'sim'``.
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
data : `~gwpy.timeseries.TimeSeriesDict` or similar
a dict of ``(channel, series)`` pairs read from the GWF source(s).
"""
# parse input source
source = file_list(source)
# parse type
ctype = channel_dict_kwarg(type, channels, (str,))
# read each individually and append
out = series_class.DictClass()
for i, file_ in enumerate(source):
if i == 1: # force data into fresh memory so that append works
for name in out:
out[name] = numpy.require(out[name], requirements=['O'])
# read frame
out.append(read_gwf(file_, channels, start=start, end=end, ctype=ctype,
scaled=scaled, series_class=series_class),
copy=False)
return out | python | def read(source, channels, start=None, end=None, scaled=None, type=None,
series_class=TimeSeries):
# pylint: disable=redefined-builtin
"""Read a dict of series from one or more GWF files
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of cache file,
- `list` of paths.
channels : `~gwpy.detector.ChannelList`, `list`
a list of channels to read from the source.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional
GPS start time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
scaled : `bool`, optional
apply slope and bias calibration to ADC data.
type : `dict`, optional
a `dict` of ``(name, channel-type)`` pairs, where ``channel-type``
can be one of ``'adc'``, ``'proc'``, or ``'sim'``.
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
data : `~gwpy.timeseries.TimeSeriesDict` or similar
a dict of ``(channel, series)`` pairs read from the GWF source(s).
"""
# parse input source
source = file_list(source)
# parse type
ctype = channel_dict_kwarg(type, channels, (str,))
# read each individually and append
out = series_class.DictClass()
for i, file_ in enumerate(source):
if i == 1: # force data into fresh memory so that append works
for name in out:
out[name] = numpy.require(out[name], requirements=['O'])
# read frame
out.append(read_gwf(file_, channels, start=start, end=end, ctype=ctype,
scaled=scaled, series_class=series_class),
copy=False)
return out | [
"def",
"read",
"(",
"source",
",",
"channels",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"scaled",
"=",
"None",
",",
"type",
"=",
"None",
",",
"series_class",
"=",
"TimeSeries",
")",
":",
"# pylint: disable=redefined-builtin",
"# parse input s... | Read a dict of series from one or more GWF files
Parameters
----------
source : `str`, `list`
Source of data, any of the following:
- `str` path of single data file,
- `str` path of cache file,
- `list` of paths.
channels : `~gwpy.detector.ChannelList`, `list`
a list of channels to read from the source.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional
GPS start time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
scaled : `bool`, optional
apply slope and bias calibration to ADC data.
type : `dict`, optional
a `dict` of ``(name, channel-type)`` pairs, where ``channel-type``
can be one of ``'adc'``, ``'proc'``, or ``'sim'``.
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
data : `~gwpy.timeseries.TimeSeriesDict` or similar
a dict of ``(channel, series)`` pairs read from the GWF source(s). | [
"Read",
"a",
"dict",
"of",
"series",
"from",
"one",
"or",
"more",
"GWF",
"files"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/framecpp.py#L83-L139 | train | 211,186 |
gwpy/gwpy | gwpy/timeseries/io/gwf/framecpp.py | read_gwf | def read_gwf(filename, channels, start=None, end=None, scaled=None,
ctype=None, series_class=TimeSeries):
"""Read a dict of series data from a single GWF file
Parameters
----------
filename : `str`
the GWF path from which to read
channels : `~gwpy.detector.ChannelList`, `list`
a list of channels to read from the source.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional
GPS start time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
scaled : `bool`, optional
apply slope and bias calibration to ADC data.
type : `dict`, optional
a `dict` of ``(name, channel-type)`` pairs, where ``channel-type``
can be one of ``'adc'``, ``'proc'``, or ``'sim'``.
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
data : `~gwpy.timeseries.TimeSeriesDict` or similar
a dict of ``(channel, series)`` pairs read from the GWF file.
"""
# parse kwargs
if not start:
start = 0
if not end:
end = 0
span = Segment(start, end)
# open file
stream = io_gwf.open_gwf(filename, 'r')
nframes = stream.GetNumberOfFrames()
# find channels
out = series_class.DictClass()
# loop over frames in GWF
i = 0
while True:
this = i
i += 1
# read frame
try:
frame = stream.ReadFrameNSubset(this, 0)
except IndexError:
if this >= nframes:
break
raise
# check whether we need this frame at all
if not _need_frame(frame, start, end):
continue
# get epoch for this frame
epoch = LIGOTimeGPS(*frame.GetGTime())
# and read all the channels
for channel in channels:
_scaled = _dynamic_scaled(scaled, channel)
try:
new = _read_channel(stream, this, str(channel),
ctype.get(channel, None),
epoch, start, end, scaled=_scaled,
series_class=series_class)
except _Skip: # don't need this frame for this channel
continue
try:
out[channel].append(new)
except KeyError:
out[channel] = numpy.require(new, requirements=['O'])
# if we have all of the data we want, stop now
if all(span in out[channel].span for channel in out):
break
# if any channels weren't read, something went wrong
for channel in channels:
if channel not in out:
msg = "Failed to read {0!r} from {1!r}".format(
str(channel), filename)
if start or end:
msg += ' for {0}'.format(span)
raise ValueError(msg)
return out | python | def read_gwf(filename, channels, start=None, end=None, scaled=None,
ctype=None, series_class=TimeSeries):
"""Read a dict of series data from a single GWF file
Parameters
----------
filename : `str`
the GWF path from which to read
channels : `~gwpy.detector.ChannelList`, `list`
a list of channels to read from the source.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional
GPS start time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
scaled : `bool`, optional
apply slope and bias calibration to ADC data.
type : `dict`, optional
a `dict` of ``(name, channel-type)`` pairs, where ``channel-type``
can be one of ``'adc'``, ``'proc'``, or ``'sim'``.
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
data : `~gwpy.timeseries.TimeSeriesDict` or similar
a dict of ``(channel, series)`` pairs read from the GWF file.
"""
# parse kwargs
if not start:
start = 0
if not end:
end = 0
span = Segment(start, end)
# open file
stream = io_gwf.open_gwf(filename, 'r')
nframes = stream.GetNumberOfFrames()
# find channels
out = series_class.DictClass()
# loop over frames in GWF
i = 0
while True:
this = i
i += 1
# read frame
try:
frame = stream.ReadFrameNSubset(this, 0)
except IndexError:
if this >= nframes:
break
raise
# check whether we need this frame at all
if not _need_frame(frame, start, end):
continue
# get epoch for this frame
epoch = LIGOTimeGPS(*frame.GetGTime())
# and read all the channels
for channel in channels:
_scaled = _dynamic_scaled(scaled, channel)
try:
new = _read_channel(stream, this, str(channel),
ctype.get(channel, None),
epoch, start, end, scaled=_scaled,
series_class=series_class)
except _Skip: # don't need this frame for this channel
continue
try:
out[channel].append(new)
except KeyError:
out[channel] = numpy.require(new, requirements=['O'])
# if we have all of the data we want, stop now
if all(span in out[channel].span for channel in out):
break
# if any channels weren't read, something went wrong
for channel in channels:
if channel not in out:
msg = "Failed to read {0!r} from {1!r}".format(
str(channel), filename)
if start or end:
msg += ' for {0}'.format(span)
raise ValueError(msg)
return out | [
"def",
"read_gwf",
"(",
"filename",
",",
"channels",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"scaled",
"=",
"None",
",",
"ctype",
"=",
"None",
",",
"series_class",
"=",
"TimeSeries",
")",
":",
"# parse kwargs",
"if",
"not",
"start",
":... | Read a dict of series data from a single GWF file
Parameters
----------
filename : `str`
the GWF path from which to read
channels : `~gwpy.detector.ChannelList`, `list`
a list of channels to read from the source.
start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional
GPS start time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
GPS end time of required data, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
scaled : `bool`, optional
apply slope and bias calibration to ADC data.
type : `dict`, optional
a `dict` of ``(name, channel-type)`` pairs, where ``channel-type``
can be one of ``'adc'``, ``'proc'``, or ``'sim'``.
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
data : `~gwpy.timeseries.TimeSeriesDict` or similar
a dict of ``(channel, series)`` pairs read from the GWF file. | [
"Read",
"a",
"dict",
"of",
"series",
"data",
"from",
"a",
"single",
"GWF",
"file"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/framecpp.py#L142-L240 | train | 211,187 |
gwpy/gwpy | gwpy/timeseries/io/gwf/framecpp.py | _read_channel | def _read_channel(stream, num, name, ctype, epoch, start, end,
scaled=True, series_class=TimeSeries):
"""Read a channel from a specific frame in a stream
"""
data = _get_frdata(stream, num, name, ctype=ctype)
return read_frdata(data, epoch, start, end,
scaled=scaled, series_class=series_class) | python | def _read_channel(stream, num, name, ctype, epoch, start, end,
scaled=True, series_class=TimeSeries):
"""Read a channel from a specific frame in a stream
"""
data = _get_frdata(stream, num, name, ctype=ctype)
return read_frdata(data, epoch, start, end,
scaled=scaled, series_class=series_class) | [
"def",
"_read_channel",
"(",
"stream",
",",
"num",
",",
"name",
",",
"ctype",
",",
"epoch",
",",
"start",
",",
"end",
",",
"scaled",
"=",
"True",
",",
"series_class",
"=",
"TimeSeries",
")",
":",
"data",
"=",
"_get_frdata",
"(",
"stream",
",",
"num",
... | Read a channel from a specific frame in a stream | [
"Read",
"a",
"channel",
"from",
"a",
"specific",
"frame",
"in",
"a",
"stream"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/framecpp.py#L243-L249 | train | 211,188 |
gwpy/gwpy | gwpy/timeseries/io/gwf/framecpp.py | _get_frdata | def _get_frdata(stream, num, name, ctype=None):
"""Brute force-ish method to return the FrData structure for a channel
This saves on pulling the channel type from the TOC
"""
ctypes = (ctype,) if ctype else ('adc', 'proc', 'sim')
for ctype in ctypes:
_reader = getattr(stream, 'ReadFr{0}Data'.format(ctype.title()))
try:
return _reader(num, name)
except IndexError as exc:
if FRERR_NO_CHANNEL_OF_TYPE.match(str(exc)):
continue
raise
raise ValueError("no Fr{{Adc,Proc,Sim}}Data structures with the "
"name {0}".format(name)) | python | def _get_frdata(stream, num, name, ctype=None):
"""Brute force-ish method to return the FrData structure for a channel
This saves on pulling the channel type from the TOC
"""
ctypes = (ctype,) if ctype else ('adc', 'proc', 'sim')
for ctype in ctypes:
_reader = getattr(stream, 'ReadFr{0}Data'.format(ctype.title()))
try:
return _reader(num, name)
except IndexError as exc:
if FRERR_NO_CHANNEL_OF_TYPE.match(str(exc)):
continue
raise
raise ValueError("no Fr{{Adc,Proc,Sim}}Data structures with the "
"name {0}".format(name)) | [
"def",
"_get_frdata",
"(",
"stream",
",",
"num",
",",
"name",
",",
"ctype",
"=",
"None",
")",
":",
"ctypes",
"=",
"(",
"ctype",
",",
")",
"if",
"ctype",
"else",
"(",
"'adc'",
",",
"'proc'",
",",
"'sim'",
")",
"for",
"ctype",
"in",
"ctypes",
":",
... | Brute force-ish method to return the FrData structure for a channel
This saves on pulling the channel type from the TOC | [
"Brute",
"force",
"-",
"ish",
"method",
"to",
"return",
"the",
"FrData",
"structure",
"for",
"a",
"channel"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/framecpp.py#L252-L267 | train | 211,189 |
gwpy/gwpy | gwpy/timeseries/io/gwf/framecpp.py | read_frdata | def read_frdata(frdata, epoch, start, end, scaled=True,
series_class=TimeSeries):
"""Read a series from an `FrData` structure
Parameters
----------
frdata : `LDAStools.frameCPP.FrAdcData` or similar
the data structure to read
epoch : `float`
the GPS start time of the containing frame
(`LDAStools.frameCPP.FrameH.GTime`)
start : `float`
the GPS start time of the user request
end : `float`
the GPS end time of the user request
scaled : `bool`, optional
apply slope and bias calibration to ADC data.
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
series : `~gwpy.timeseries.TimeSeriesBase`
the formatted data series
Raises
------
_Skip
if this data structure doesn't overlap with the requested
``[start, end)`` interval.
"""
datastart = epoch + frdata.GetTimeOffset()
try:
trange = frdata.GetTRange()
except AttributeError: # not proc channel
trange = 0.
# check overlap with user-requested span
if (end and datastart >= end) or (trange and datastart + trange < start):
raise _Skip()
# get scaling
try:
slope = frdata.GetSlope()
bias = frdata.GetBias()
except AttributeError: # not FrAdcData
slope = None
bias = None
null_scaling = True
else:
null_scaling = slope == 1. and bias == 0.
out = None
for j in range(frdata.data.size()):
# we use range(frdata.data.size()) to avoid segfault
# related to iterating directly over frdata.data
try:
new = read_frvect(frdata.data[j], datastart, start, end,
name=frdata.GetName(),
series_class=series_class)
except _Skip:
continue
# apply ADC scaling (only if interesting; this prevents unnecessary
# type-casting errors)
if scaled and not null_scaling:
new *= slope
new += bias
if slope is not None:
# user has deliberately disabled the ADC calibration, so
# the stored engineering unit is not valid, revert to 'counts':
new.override_unit('count')
if out is None:
out = new
else:
out.append(new)
return out | python | def read_frdata(frdata, epoch, start, end, scaled=True,
series_class=TimeSeries):
"""Read a series from an `FrData` structure
Parameters
----------
frdata : `LDAStools.frameCPP.FrAdcData` or similar
the data structure to read
epoch : `float`
the GPS start time of the containing frame
(`LDAStools.frameCPP.FrameH.GTime`)
start : `float`
the GPS start time of the user request
end : `float`
the GPS end time of the user request
scaled : `bool`, optional
apply slope and bias calibration to ADC data.
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
series : `~gwpy.timeseries.TimeSeriesBase`
the formatted data series
Raises
------
_Skip
if this data structure doesn't overlap with the requested
``[start, end)`` interval.
"""
datastart = epoch + frdata.GetTimeOffset()
try:
trange = frdata.GetTRange()
except AttributeError: # not proc channel
trange = 0.
# check overlap with user-requested span
if (end and datastart >= end) or (trange and datastart + trange < start):
raise _Skip()
# get scaling
try:
slope = frdata.GetSlope()
bias = frdata.GetBias()
except AttributeError: # not FrAdcData
slope = None
bias = None
null_scaling = True
else:
null_scaling = slope == 1. and bias == 0.
out = None
for j in range(frdata.data.size()):
# we use range(frdata.data.size()) to avoid segfault
# related to iterating directly over frdata.data
try:
new = read_frvect(frdata.data[j], datastart, start, end,
name=frdata.GetName(),
series_class=series_class)
except _Skip:
continue
# apply ADC scaling (only if interesting; this prevents unnecessary
# type-casting errors)
if scaled and not null_scaling:
new *= slope
new += bias
if slope is not None:
# user has deliberately disabled the ADC calibration, so
# the stored engineering unit is not valid, revert to 'counts':
new.override_unit('count')
if out is None:
out = new
else:
out.append(new)
return out | [
"def",
"read_frdata",
"(",
"frdata",
",",
"epoch",
",",
"start",
",",
"end",
",",
"scaled",
"=",
"True",
",",
"series_class",
"=",
"TimeSeries",
")",
":",
"datastart",
"=",
"epoch",
"+",
"frdata",
".",
"GetTimeOffset",
"(",
")",
"try",
":",
"trange",
"... | Read a series from an `FrData` structure
Parameters
----------
frdata : `LDAStools.frameCPP.FrAdcData` or similar
the data structure to read
epoch : `float`
the GPS start time of the containing frame
(`LDAStools.frameCPP.FrameH.GTime`)
start : `float`
the GPS start time of the user request
end : `float`
the GPS end time of the user request
scaled : `bool`, optional
apply slope and bias calibration to ADC data.
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
series : `~gwpy.timeseries.TimeSeriesBase`
the formatted data series
Raises
------
_Skip
if this data structure doesn't overlap with the requested
``[start, end)`` interval. | [
"Read",
"a",
"series",
"from",
"an",
"FrData",
"structure"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/framecpp.py#L282-L364 | train | 211,190 |
gwpy/gwpy | gwpy/timeseries/io/gwf/framecpp.py | read_frvect | def read_frvect(vect, epoch, start, end, name=None, series_class=TimeSeries):
"""Read an array from an `FrVect` structure
Parameters
----------
vect : `LDASTools.frameCPP.FrVect`
the frame vector structur to read
start : `float`
the GPS start time of the request
end : `float`
the GPS end time of the request
epoch : `float`
the GPS start time of the containing `FrData` structure
name : `str`, optional
the name of the output `series_class`; this is also used
to ignore ``FrVect`` structures containing other information
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
series : `~gwpy.timeseries.TimeSeriesBase`
the formatted data series
Raises
------
_Skip
if this vect doesn't overlap with the requested
``[start, end)`` interval, or the name doesn't match.
"""
# only read FrVect with matching name (or no name set)
# frame spec allows for arbitrary other FrVects
# to hold other information
if vect.GetName() and name and vect.GetName() != name:
raise _Skip()
# get array
arr = vect.GetDataArray()
nsamp = arr.size
# and dimensions
dim = vect.GetDim(0)
dx = dim.dx
x0 = dim.startX
# start and end GPS times of this FrVect
dimstart = epoch + x0
dimend = dimstart + nsamp * dx
# index of first required sample
nxstart = int(max(0., float(start-dimstart)) / dx)
# requested start time is after this frame, skip
if nxstart >= nsamp:
raise _Skip()
# index of end sample
if end:
nxend = int(nsamp - ceil(max(0., float(dimend-end)) / dx))
else:
nxend = None
if nxstart or nxend:
arr = arr[nxstart:nxend]
# -- cast as a series
# get unit
unit = vect.GetUnitY() or None
# create array
series = series_class(arr, t0=dimstart+nxstart*dx, dt=dx, name=name,
channel=name, unit=unit, copy=False)
# add information to channel
series.channel.sample_rate = series.sample_rate.value
series.channel.unit = unit
series.channel.dtype = series.dtype
return series | python | def read_frvect(vect, epoch, start, end, name=None, series_class=TimeSeries):
"""Read an array from an `FrVect` structure
Parameters
----------
vect : `LDASTools.frameCPP.FrVect`
the frame vector structur to read
start : `float`
the GPS start time of the request
end : `float`
the GPS end time of the request
epoch : `float`
the GPS start time of the containing `FrData` structure
name : `str`, optional
the name of the output `series_class`; this is also used
to ignore ``FrVect`` structures containing other information
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
series : `~gwpy.timeseries.TimeSeriesBase`
the formatted data series
Raises
------
_Skip
if this vect doesn't overlap with the requested
``[start, end)`` interval, or the name doesn't match.
"""
# only read FrVect with matching name (or no name set)
# frame spec allows for arbitrary other FrVects
# to hold other information
if vect.GetName() and name and vect.GetName() != name:
raise _Skip()
# get array
arr = vect.GetDataArray()
nsamp = arr.size
# and dimensions
dim = vect.GetDim(0)
dx = dim.dx
x0 = dim.startX
# start and end GPS times of this FrVect
dimstart = epoch + x0
dimend = dimstart + nsamp * dx
# index of first required sample
nxstart = int(max(0., float(start-dimstart)) / dx)
# requested start time is after this frame, skip
if nxstart >= nsamp:
raise _Skip()
# index of end sample
if end:
nxend = int(nsamp - ceil(max(0., float(dimend-end)) / dx))
else:
nxend = None
if nxstart or nxend:
arr = arr[nxstart:nxend]
# -- cast as a series
# get unit
unit = vect.GetUnitY() or None
# create array
series = series_class(arr, t0=dimstart+nxstart*dx, dt=dx, name=name,
channel=name, unit=unit, copy=False)
# add information to channel
series.channel.sample_rate = series.sample_rate.value
series.channel.unit = unit
series.channel.dtype = series.dtype
return series | [
"def",
"read_frvect",
"(",
"vect",
",",
"epoch",
",",
"start",
",",
"end",
",",
"name",
"=",
"None",
",",
"series_class",
"=",
"TimeSeries",
")",
":",
"# only read FrVect with matching name (or no name set)",
"# frame spec allows for arbitrary other FrVects",
"# to ... | Read an array from an `FrVect` structure
Parameters
----------
vect : `LDASTools.frameCPP.FrVect`
the frame vector structur to read
start : `float`
the GPS start time of the request
end : `float`
the GPS end time of the request
epoch : `float`
the GPS start time of the containing `FrData` structure
name : `str`, optional
the name of the output `series_class`; this is also used
to ignore ``FrVect`` structures containing other information
series_class : `type`, optional
the `Series` sub-type to return.
Returns
-------
series : `~gwpy.timeseries.TimeSeriesBase`
the formatted data series
Raises
------
_Skip
if this vect doesn't overlap with the requested
``[start, end)`` interval, or the name doesn't match. | [
"Read",
"an",
"array",
"from",
"an",
"FrVect",
"structure"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/framecpp.py#L367-L451 | train | 211,191 |
gwpy/gwpy | gwpy/timeseries/io/gwf/framecpp.py | write | def write(tsdict, outfile, start=None, end=None, name='gwpy', run=0,
compression=257, compression_level=6):
"""Write data to a GWF file using the frameCPP API
"""
# set frame header metadata
if not start:
starts = {LIGOTimeGPS(tsdict[key].x0.value) for key in tsdict}
if len(starts) != 1:
raise RuntimeError("Cannot write multiple TimeSeries to a single "
"frame with different start times, "
"please write into different frames")
start = list(starts)[0]
if not end:
ends = {tsdict[key].span[1] for key in tsdict}
if len(ends) != 1:
raise RuntimeError("Cannot write multiple TimeSeries to a single "
"frame with different end times, "
"please write into different frames")
end = list(ends)[0]
duration = end - start
start = LIGOTimeGPS(start)
ifos = {ts.channel.ifo for ts in tsdict.values() if
ts.channel and ts.channel.ifo and
hasattr(frameCPP, 'DETECTOR_LOCATION_{0}'.format(ts.channel.ifo))}
# create frame
frame = io_gwf.create_frame(time=start, duration=duration, name=name,
run=run, ifos=ifos)
# append channels
for i, key in enumerate(tsdict):
try:
# pylint: disable=protected-access
ctype = tsdict[key].channel._ctype or 'proc'
except AttributeError:
ctype = 'proc'
append_to_frame(frame, tsdict[key].crop(start, end),
type=ctype, channelid=i)
# write frame to file
io_gwf.write_frames(outfile, [frame], compression=compression,
compression_level=compression_level) | python | def write(tsdict, outfile, start=None, end=None, name='gwpy', run=0,
compression=257, compression_level=6):
"""Write data to a GWF file using the frameCPP API
"""
# set frame header metadata
if not start:
starts = {LIGOTimeGPS(tsdict[key].x0.value) for key in tsdict}
if len(starts) != 1:
raise RuntimeError("Cannot write multiple TimeSeries to a single "
"frame with different start times, "
"please write into different frames")
start = list(starts)[0]
if not end:
ends = {tsdict[key].span[1] for key in tsdict}
if len(ends) != 1:
raise RuntimeError("Cannot write multiple TimeSeries to a single "
"frame with different end times, "
"please write into different frames")
end = list(ends)[0]
duration = end - start
start = LIGOTimeGPS(start)
ifos = {ts.channel.ifo for ts in tsdict.values() if
ts.channel and ts.channel.ifo and
hasattr(frameCPP, 'DETECTOR_LOCATION_{0}'.format(ts.channel.ifo))}
# create frame
frame = io_gwf.create_frame(time=start, duration=duration, name=name,
run=run, ifos=ifos)
# append channels
for i, key in enumerate(tsdict):
try:
# pylint: disable=protected-access
ctype = tsdict[key].channel._ctype or 'proc'
except AttributeError:
ctype = 'proc'
append_to_frame(frame, tsdict[key].crop(start, end),
type=ctype, channelid=i)
# write frame to file
io_gwf.write_frames(outfile, [frame], compression=compression,
compression_level=compression_level) | [
"def",
"write",
"(",
"tsdict",
",",
"outfile",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
",",
"name",
"=",
"'gwpy'",
",",
"run",
"=",
"0",
",",
"compression",
"=",
"257",
",",
"compression_level",
"=",
"6",
")",
":",
"# set frame header metad... | Write data to a GWF file using the frameCPP API | [
"Write",
"data",
"to",
"a",
"GWF",
"file",
"using",
"the",
"frameCPP",
"API"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/framecpp.py#L456-L497 | train | 211,192 |
gwpy/gwpy | gwpy/timeseries/io/gwf/framecpp.py | append_to_frame | def append_to_frame(frame, timeseries, type='proc', channelid=0):
# pylint: disable=redefined-builtin
"""Append data from a `TimeSeries` to a `~frameCPP.FrameH`
Parameters
----------
frame : `~frameCPP.FrameH`
frame object to append to
timeseries : `TimeSeries`
the timeseries to append
type : `str`
the type of the channel, one of 'adc', 'proc', 'sim'
channelid : `int`, optional
the ID of the channel within the group (only used for ADC channels)
"""
if timeseries.channel:
channel = str(timeseries.channel)
else:
channel = str(timeseries.name)
offset = float(LIGOTimeGPS(timeseries.t0.value) -
LIGOTimeGPS(*frame.GetGTime()))
# create the data container
if type.lower() == 'adc':
frdata = frameCPP.FrAdcData(
channel,
0, # channel group
channelid, # channel number in group
16, # number of bits in ADC
timeseries.sample_rate.value, # sample rate
)
frdata.SetTimeOffset(offset)
append = frame.AppendFrAdcData
elif type.lower() == 'proc':
frdata = frameCPP.FrProcData(
channel, # channel name
str(timeseries.name), # comment
frameCPP.FrProcData.TIME_SERIES, # ID as time-series
frameCPP.FrProcData.UNKNOWN_SUB_TYPE, # empty sub-type (fseries)
offset, # offset of first sample relative to frame start
abs(timeseries.span), # duration of data
0., # heterodyne frequency
0., # phase of heterodyne
0., # frequency range
0., # resolution bandwidth
)
append = frame.AppendFrProcData
elif type.lower() == 'sim':
frdata = frameCPP.FrSimData(
str(timeseries.channel), # channel name
str(timeseries.name), # comment
timeseries.sample_rate.value, # sample rate
offset, # time offset of first sample
0., # heterodyne frequency
0., # phase of heterodyne
)
append = frame.AppendFrSimData
else:
raise RuntimeError("Invalid channel type {!r}, please select one of "
"'adc, 'proc', or 'sim'".format(type))
# append an FrVect
frdata.AppendData(create_frvect(timeseries))
append(frdata) | python | def append_to_frame(frame, timeseries, type='proc', channelid=0):
# pylint: disable=redefined-builtin
"""Append data from a `TimeSeries` to a `~frameCPP.FrameH`
Parameters
----------
frame : `~frameCPP.FrameH`
frame object to append to
timeseries : `TimeSeries`
the timeseries to append
type : `str`
the type of the channel, one of 'adc', 'proc', 'sim'
channelid : `int`, optional
the ID of the channel within the group (only used for ADC channels)
"""
if timeseries.channel:
channel = str(timeseries.channel)
else:
channel = str(timeseries.name)
offset = float(LIGOTimeGPS(timeseries.t0.value) -
LIGOTimeGPS(*frame.GetGTime()))
# create the data container
if type.lower() == 'adc':
frdata = frameCPP.FrAdcData(
channel,
0, # channel group
channelid, # channel number in group
16, # number of bits in ADC
timeseries.sample_rate.value, # sample rate
)
frdata.SetTimeOffset(offset)
append = frame.AppendFrAdcData
elif type.lower() == 'proc':
frdata = frameCPP.FrProcData(
channel, # channel name
str(timeseries.name), # comment
frameCPP.FrProcData.TIME_SERIES, # ID as time-series
frameCPP.FrProcData.UNKNOWN_SUB_TYPE, # empty sub-type (fseries)
offset, # offset of first sample relative to frame start
abs(timeseries.span), # duration of data
0., # heterodyne frequency
0., # phase of heterodyne
0., # frequency range
0., # resolution bandwidth
)
append = frame.AppendFrProcData
elif type.lower() == 'sim':
frdata = frameCPP.FrSimData(
str(timeseries.channel), # channel name
str(timeseries.name), # comment
timeseries.sample_rate.value, # sample rate
offset, # time offset of first sample
0., # heterodyne frequency
0., # phase of heterodyne
)
append = frame.AppendFrSimData
else:
raise RuntimeError("Invalid channel type {!r}, please select one of "
"'adc, 'proc', or 'sim'".format(type))
# append an FrVect
frdata.AppendData(create_frvect(timeseries))
append(frdata) | [
"def",
"append_to_frame",
"(",
"frame",
",",
"timeseries",
",",
"type",
"=",
"'proc'",
",",
"channelid",
"=",
"0",
")",
":",
"# pylint: disable=redefined-builtin",
"if",
"timeseries",
".",
"channel",
":",
"channel",
"=",
"str",
"(",
"timeseries",
".",
"channel... | Append data from a `TimeSeries` to a `~frameCPP.FrameH`
Parameters
----------
frame : `~frameCPP.FrameH`
frame object to append to
timeseries : `TimeSeries`
the timeseries to append
type : `str`
the type of the channel, one of 'adc', 'proc', 'sim'
channelid : `int`, optional
the ID of the channel within the group (only used for ADC channels) | [
"Append",
"data",
"from",
"a",
"TimeSeries",
"to",
"a",
"~frameCPP",
".",
"FrameH"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/framecpp.py#L500-L566 | train | 211,193 |
gwpy/gwpy | gwpy/timeseries/io/gwf/framecpp.py | create_frvect | def create_frvect(timeseries):
"""Create a `~frameCPP.FrVect` from a `TimeSeries`
This method is primarily designed to make writing data to GWF files a
bit easier.
Parameters
----------
timeseries : `TimeSeries`
the input `TimeSeries`
Returns
-------
frvect : `~frameCPP.FrVect`
the output `FrVect`
"""
# create timing dimension
dims = frameCPP.Dimension(
timeseries.size, timeseries.dx.value,
str(timeseries.dx.unit), 0)
# create FrVect
vect = frameCPP.FrVect(
timeseries.name or '', FRVECT_TYPE_FROM_NUMPY[timeseries.dtype.type],
1, dims, str(timeseries.unit))
# populate FrVect and return
vect.GetDataArray()[:] = numpy.require(timeseries.value,
requirements=['C'])
return vect | python | def create_frvect(timeseries):
"""Create a `~frameCPP.FrVect` from a `TimeSeries`
This method is primarily designed to make writing data to GWF files a
bit easier.
Parameters
----------
timeseries : `TimeSeries`
the input `TimeSeries`
Returns
-------
frvect : `~frameCPP.FrVect`
the output `FrVect`
"""
# create timing dimension
dims = frameCPP.Dimension(
timeseries.size, timeseries.dx.value,
str(timeseries.dx.unit), 0)
# create FrVect
vect = frameCPP.FrVect(
timeseries.name or '', FRVECT_TYPE_FROM_NUMPY[timeseries.dtype.type],
1, dims, str(timeseries.unit))
# populate FrVect and return
vect.GetDataArray()[:] = numpy.require(timeseries.value,
requirements=['C'])
return vect | [
"def",
"create_frvect",
"(",
"timeseries",
")",
":",
"# create timing dimension",
"dims",
"=",
"frameCPP",
".",
"Dimension",
"(",
"timeseries",
".",
"size",
",",
"timeseries",
".",
"dx",
".",
"value",
",",
"str",
"(",
"timeseries",
".",
"dx",
".",
"unit",
... | Create a `~frameCPP.FrVect` from a `TimeSeries`
This method is primarily designed to make writing data to GWF files a
bit easier.
Parameters
----------
timeseries : `TimeSeries`
the input `TimeSeries`
Returns
-------
frvect : `~frameCPP.FrVect`
the output `FrVect` | [
"Create",
"a",
"~frameCPP",
".",
"FrVect",
"from",
"a",
"TimeSeries"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/io/gwf/framecpp.py#L569-L596 | train | 211,194 |
gwpy/gwpy | gwpy/timeseries/statevector.py | _bool_segments | def _bool_segments(array, start=0, delta=1, minlen=1):
"""Yield segments of consecutive `True` values in a boolean array
Parameters
----------
array : `iterable`
An iterable of boolean-castable values.
start : `float`
The value of the first sample on the indexed axis
(e.g.the GPS start time of the array).
delta : `float`
The step size on the indexed axis (e.g. sample duration).
minlen : `int`, optional
The minimum number of consecutive `True` values for a segment.
Yields
------
segment : `tuple`
``(start + i * delta, start + (i + n) * delta)`` for a sequence
of ``n`` consecutive True values starting at position ``i``.
Notes
-----
This method is adapted from original code written by Kipp Cannon and
distributed under GPLv3.
The datatype of the values returned will be the larger of the types
of ``start`` and ``delta``.
Examples
--------
>>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1]))
[(1, 2), (5, 8), (9, 10)]
>>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1]
... start=100., delta=0.1))
[(100.1, 100.2), (100.5, 100.8), (100.9, 101.0)]
"""
array = iter(array)
i = 0
while True:
try: # get next value
val = next(array)
except StopIteration: # end of array
return
if val: # start of new segment
n = 1 # count consecutive True
try:
while next(array): # run until segment will end
n += 1
except StopIteration: # have reached the end
return # stop
finally: # yield segment (including at StopIteration)
if n >= minlen: # ... if long enough
yield (start + i * delta, start + (i + n) * delta)
i += n
i += 1 | python | def _bool_segments(array, start=0, delta=1, minlen=1):
"""Yield segments of consecutive `True` values in a boolean array
Parameters
----------
array : `iterable`
An iterable of boolean-castable values.
start : `float`
The value of the first sample on the indexed axis
(e.g.the GPS start time of the array).
delta : `float`
The step size on the indexed axis (e.g. sample duration).
minlen : `int`, optional
The minimum number of consecutive `True` values for a segment.
Yields
------
segment : `tuple`
``(start + i * delta, start + (i + n) * delta)`` for a sequence
of ``n`` consecutive True values starting at position ``i``.
Notes
-----
This method is adapted from original code written by Kipp Cannon and
distributed under GPLv3.
The datatype of the values returned will be the larger of the types
of ``start`` and ``delta``.
Examples
--------
>>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1]))
[(1, 2), (5, 8), (9, 10)]
>>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1]
... start=100., delta=0.1))
[(100.1, 100.2), (100.5, 100.8), (100.9, 101.0)]
"""
array = iter(array)
i = 0
while True:
try: # get next value
val = next(array)
except StopIteration: # end of array
return
if val: # start of new segment
n = 1 # count consecutive True
try:
while next(array): # run until segment will end
n += 1
except StopIteration: # have reached the end
return # stop
finally: # yield segment (including at StopIteration)
if n >= minlen: # ... if long enough
yield (start + i * delta, start + (i + n) * delta)
i += n
i += 1 | [
"def",
"_bool_segments",
"(",
"array",
",",
"start",
"=",
"0",
",",
"delta",
"=",
"1",
",",
"minlen",
"=",
"1",
")",
":",
"array",
"=",
"iter",
"(",
"array",
")",
"i",
"=",
"0",
"while",
"True",
":",
"try",
":",
"# get next value",
"val",
"=",
"n... | Yield segments of consecutive `True` values in a boolean array
Parameters
----------
array : `iterable`
An iterable of boolean-castable values.
start : `float`
The value of the first sample on the indexed axis
(e.g.the GPS start time of the array).
delta : `float`
The step size on the indexed axis (e.g. sample duration).
minlen : `int`, optional
The minimum number of consecutive `True` values for a segment.
Yields
------
segment : `tuple`
``(start + i * delta, start + (i + n) * delta)`` for a sequence
of ``n`` consecutive True values starting at position ``i``.
Notes
-----
This method is adapted from original code written by Kipp Cannon and
distributed under GPLv3.
The datatype of the values returned will be the larger of the types
of ``start`` and ``delta``.
Examples
--------
>>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1]))
[(1, 2), (5, 8), (9, 10)]
>>> print(list(_bool_segments([0, 1, 0, 0, 0, 1, 1, 1, 0, 1]
... start=100., delta=0.1))
[(100.1, 100.2), (100.5, 100.8), (100.9, 101.0)] | [
"Yield",
"segments",
"of",
"consecutive",
"True",
"values",
"in",
"a",
"boolean",
"array"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L54-L113 | train | 211,195 |
gwpy/gwpy | gwpy/timeseries/statevector.py | StateTimeSeries.to_dqflag | def to_dqflag(self, name=None, minlen=1, dtype=None, round=False,
label=None, description=None):
"""Convert this series into a `~gwpy.segments.DataQualityFlag`.
Each contiguous set of `True` values are grouped as a
`~gwpy.segments.Segment` running from the GPS time the first
found `True`, to the GPS time of the next `False` (or the end
of the series)
Parameters
----------
minlen : `int`, optional
minimum number of consecutive `True` values to identify as a
`~gwpy.segments.Segment`. This is useful to ignore single
bit flips, for example.
dtype : `type`, `callable`
output segment entry type, can pass either a type for simple
casting, or a callable function that accepts a float and returns
another numeric type, defaults to the `dtype` of the time index
round : `bool`, optional
choose to round each `~gwpy.segments.Segment` to its
inclusive integer boundaries
label : `str`, optional
the :attr:`~gwpy.segments.DataQualityFlag.label` for the
output flag.
description : `str`, optional
the :attr:`~gwpy.segments.DataQualityFlag.description` for the
output flag.
Returns
-------
dqflag : `~gwpy.segments.DataQualityFlag`
a segment representation of this `StateTimeSeries`, the span
defines the `known` segments, while the contiguous `True`
sets defined each of the `active` segments
"""
from ..segments import DataQualityFlag
# format dtype
if dtype is None:
dtype = self.t0.dtype
if isinstance(dtype, numpy.dtype): # use callable dtype
dtype = dtype.type
start = dtype(self.t0.value)
dt = dtype(self.dt.value)
# build segmentlists (can use simple objects since DQFlag converts)
active = _bool_segments(self.value, start, dt, minlen=int(minlen))
known = [tuple(map(dtype, self.span))]
# build flag and return
out = DataQualityFlag(name=name or self.name, active=active,
known=known, label=label or self.name,
description=description)
if round:
return out.round()
return out | python | def to_dqflag(self, name=None, minlen=1, dtype=None, round=False,
label=None, description=None):
"""Convert this series into a `~gwpy.segments.DataQualityFlag`.
Each contiguous set of `True` values are grouped as a
`~gwpy.segments.Segment` running from the GPS time the first
found `True`, to the GPS time of the next `False` (or the end
of the series)
Parameters
----------
minlen : `int`, optional
minimum number of consecutive `True` values to identify as a
`~gwpy.segments.Segment`. This is useful to ignore single
bit flips, for example.
dtype : `type`, `callable`
output segment entry type, can pass either a type for simple
casting, or a callable function that accepts a float and returns
another numeric type, defaults to the `dtype` of the time index
round : `bool`, optional
choose to round each `~gwpy.segments.Segment` to its
inclusive integer boundaries
label : `str`, optional
the :attr:`~gwpy.segments.DataQualityFlag.label` for the
output flag.
description : `str`, optional
the :attr:`~gwpy.segments.DataQualityFlag.description` for the
output flag.
Returns
-------
dqflag : `~gwpy.segments.DataQualityFlag`
a segment representation of this `StateTimeSeries`, the span
defines the `known` segments, while the contiguous `True`
sets defined each of the `active` segments
"""
from ..segments import DataQualityFlag
# format dtype
if dtype is None:
dtype = self.t0.dtype
if isinstance(dtype, numpy.dtype): # use callable dtype
dtype = dtype.type
start = dtype(self.t0.value)
dt = dtype(self.dt.value)
# build segmentlists (can use simple objects since DQFlag converts)
active = _bool_segments(self.value, start, dt, minlen=int(minlen))
known = [tuple(map(dtype, self.span))]
# build flag and return
out = DataQualityFlag(name=name or self.name, active=active,
known=known, label=label or self.name,
description=description)
if round:
return out.round()
return out | [
"def",
"to_dqflag",
"(",
"self",
",",
"name",
"=",
"None",
",",
"minlen",
"=",
"1",
",",
"dtype",
"=",
"None",
",",
"round",
"=",
"False",
",",
"label",
"=",
"None",
",",
"description",
"=",
"None",
")",
":",
"from",
".",
".",
"segments",
"import",... | Convert this series into a `~gwpy.segments.DataQualityFlag`.
Each contiguous set of `True` values are grouped as a
`~gwpy.segments.Segment` running from the GPS time the first
found `True`, to the GPS time of the next `False` (or the end
of the series)
Parameters
----------
minlen : `int`, optional
minimum number of consecutive `True` values to identify as a
`~gwpy.segments.Segment`. This is useful to ignore single
bit flips, for example.
dtype : `type`, `callable`
output segment entry type, can pass either a type for simple
casting, or a callable function that accepts a float and returns
another numeric type, defaults to the `dtype` of the time index
round : `bool`, optional
choose to round each `~gwpy.segments.Segment` to its
inclusive integer boundaries
label : `str`, optional
the :attr:`~gwpy.segments.DataQualityFlag.label` for the
output flag.
description : `str`, optional
the :attr:`~gwpy.segments.DataQualityFlag.description` for the
output flag.
Returns
-------
dqflag : `~gwpy.segments.DataQualityFlag`
a segment representation of this `StateTimeSeries`, the span
defines the `known` segments, while the contiguous `True`
sets defined each of the `active` segments | [
"Convert",
"this",
"series",
"into",
"a",
"~gwpy",
".",
"segments",
".",
"DataQualityFlag",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L230-L290 | train | 211,196 |
gwpy/gwpy | gwpy/timeseries/statevector.py | StateVector.bits | def bits(self):
"""list of `Bits` for this `StateVector`
:type: `Bits`
"""
try:
return self._bits
except AttributeError:
if self.dtype.name.startswith(('uint', 'int')):
nbits = self.itemsize * 8
self.bits = Bits(['Bit %d' % b for b in range(nbits)],
channel=self.channel, epoch=self.epoch)
return self.bits
elif hasattr(self.channel, 'bits'):
self.bits = self.channel.bits
return self.bits
return None | python | def bits(self):
"""list of `Bits` for this `StateVector`
:type: `Bits`
"""
try:
return self._bits
except AttributeError:
if self.dtype.name.startswith(('uint', 'int')):
nbits = self.itemsize * 8
self.bits = Bits(['Bit %d' % b for b in range(nbits)],
channel=self.channel, epoch=self.epoch)
return self.bits
elif hasattr(self.channel, 'bits'):
self.bits = self.channel.bits
return self.bits
return None | [
"def",
"bits",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_bits",
"except",
"AttributeError",
":",
"if",
"self",
".",
"dtype",
".",
"name",
".",
"startswith",
"(",
"(",
"'uint'",
",",
"'int'",
")",
")",
":",
"nbits",
"=",
"self",
".... | list of `Bits` for this `StateVector`
:type: `Bits` | [
"list",
"of",
"Bits",
"for",
"this",
"StateVector"
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L518-L534 | train | 211,197 |
gwpy/gwpy | gwpy/timeseries/statevector.py | StateVector.boolean | def boolean(self):
"""A mapping of this `StateVector` to a 2-D array containing all
binary bits as booleans, for each time point.
"""
try:
return self._boolean
except AttributeError:
nbits = len(self.bits)
boolean = numpy.zeros((self.size, nbits), dtype=bool)
for i, sample in enumerate(self.value):
boolean[i, :] = [int(sample) >> j & 1 for j in range(nbits)]
self._boolean = Array2D(boolean, name=self.name,
x0=self.x0, dx=self.dx, y0=0, dy=1)
return self.boolean | python | def boolean(self):
"""A mapping of this `StateVector` to a 2-D array containing all
binary bits as booleans, for each time point.
"""
try:
return self._boolean
except AttributeError:
nbits = len(self.bits)
boolean = numpy.zeros((self.size, nbits), dtype=bool)
for i, sample in enumerate(self.value):
boolean[i, :] = [int(sample) >> j & 1 for j in range(nbits)]
self._boolean = Array2D(boolean, name=self.name,
x0=self.x0, dx=self.dx, y0=0, dy=1)
return self.boolean | [
"def",
"boolean",
"(",
"self",
")",
":",
"try",
":",
"return",
"self",
".",
"_boolean",
"except",
"AttributeError",
":",
"nbits",
"=",
"len",
"(",
"self",
".",
"bits",
")",
"boolean",
"=",
"numpy",
".",
"zeros",
"(",
"(",
"self",
".",
"size",
",",
... | A mapping of this `StateVector` to a 2-D array containing all
binary bits as booleans, for each time point. | [
"A",
"mapping",
"of",
"this",
"StateVector",
"to",
"a",
"2",
"-",
"D",
"array",
"containing",
"all",
"binary",
"bits",
"as",
"booleans",
"for",
"each",
"time",
"point",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L555-L568 | train | 211,198 |
gwpy/gwpy | gwpy/timeseries/statevector.py | StateVector.get_bit_series | def get_bit_series(self, bits=None):
"""Get the `StateTimeSeries` for each bit of this `StateVector`.
Parameters
----------
bits : `list`, optional
a list of bit indices or bit names, defaults to all bits
Returns
-------
bitseries : `StateTimeSeriesDict`
a `dict` of `StateTimeSeries`, one for each given bit
"""
if bits is None:
bits = [b for b in self.bits if b not in {None, ''}]
bindex = []
for bit in bits:
try:
bindex.append((self.bits.index(bit), bit))
except (IndexError, ValueError) as exc:
exc.args = ('Bit %r not found in StateVector' % bit,)
raise
self._bitseries = StateTimeSeriesDict()
for i, bit in bindex:
self._bitseries[bit] = StateTimeSeries(
self.value >> i & 1, name=bit, epoch=self.x0.value,
channel=self.channel, sample_rate=self.sample_rate)
return self._bitseries | python | def get_bit_series(self, bits=None):
"""Get the `StateTimeSeries` for each bit of this `StateVector`.
Parameters
----------
bits : `list`, optional
a list of bit indices or bit names, defaults to all bits
Returns
-------
bitseries : `StateTimeSeriesDict`
a `dict` of `StateTimeSeries`, one for each given bit
"""
if bits is None:
bits = [b for b in self.bits if b not in {None, ''}]
bindex = []
for bit in bits:
try:
bindex.append((self.bits.index(bit), bit))
except (IndexError, ValueError) as exc:
exc.args = ('Bit %r not found in StateVector' % bit,)
raise
self._bitseries = StateTimeSeriesDict()
for i, bit in bindex:
self._bitseries[bit] = StateTimeSeries(
self.value >> i & 1, name=bit, epoch=self.x0.value,
channel=self.channel, sample_rate=self.sample_rate)
return self._bitseries | [
"def",
"get_bit_series",
"(",
"self",
",",
"bits",
"=",
"None",
")",
":",
"if",
"bits",
"is",
"None",
":",
"bits",
"=",
"[",
"b",
"for",
"b",
"in",
"self",
".",
"bits",
"if",
"b",
"not",
"in",
"{",
"None",
",",
"''",
"}",
"]",
"bindex",
"=",
... | Get the `StateTimeSeries` for each bit of this `StateVector`.
Parameters
----------
bits : `list`, optional
a list of bit indices or bit names, defaults to all bits
Returns
-------
bitseries : `StateTimeSeriesDict`
a `dict` of `StateTimeSeries`, one for each given bit | [
"Get",
"the",
"StateTimeSeries",
"for",
"each",
"bit",
"of",
"this",
"StateVector",
"."
] | 7a92b917e7dd2d99b15895293a1fa1d66cdb210a | https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/statevector.py#L582-L609 | train | 211,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.