code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if self._resolved_time is None or self.source_clock_overflow_period is None:
self._resolved_time = decimal.Decimal(source_clock_sample)
self._prev_source_sample = source_clock_sample
self._prev_target_sample = target_clock_sample
else:
# Time betw... | def update(self, source_clock_sample, target_clock_sample) | Args:
source_clock_sample: Sample of the source clock, in seconds
target_clock_sample: Sample of the target clock, in seconds
Returns: Resolved absolute source clock value | 3.338574 | 3.33796 | 1.000184 |
pi = float(self._source_time_resolver.update(source_clock_sample, target_clock_sample))
qi = target_clock_sample
# Initialization
if self._p is None:
self._p = pi
self._q = qi
# Sync error - refer to the reference implementation of the algorithm... | def update(self, source_clock_sample, target_clock_sample) | Args:
source_clock_sample: E.g. value received from the source system, in seconds
target_clock_sample: E.g. target time sampled when the data arrived to the local system, in seconds
Returns: Event timestamp converted to the target time domain. | 4.881685 | 4.905222 | 0.995202 |
if not isinstance(obj, CompoundValue) and hasattr(obj, 'transfer'):
if hasattr(obj, 'message'):
payload = obj.message
header = 'Message'
elif hasattr(obj, 'request'):
payload = obj.request
header = 'Request'
elif hasattr(obj, 'response'):
... | def to_yaml(obj) | This function returns correct YAML representation of a UAVCAN structure (message, request, or response), or
a DSDL entity (array or primitive), or a UAVCAN transfer, with comments for human benefit.
Args:
obj: Object to convert.
Returns: Unicode string containing YAML representation of t... | 3.599547 | 3.12235 | 1.152833 |
# Extracting constants
uavcan_type = uavcan.get_uavcan_data_type(struct)
if uavcan.is_request(struct):
consts = uavcan_type.request_constants
fields = uavcan_type.request_fields
elif uavcan.is_response(struct):
consts = uavcan_type.response_constants
fields = uavcan_... | def value_to_constant_name(struct, field_name, keep_literal=False) | This function accepts a UAVCAN struct (message, request, or response), and a field name; and returns
the name of constant or bit mask that match the value. If no match could be established, the literal
value will be returned as is.
Args:
struct: UAVCAN struct to work with
field_name:... | 2.87944 | 2.736615 | 1.05219 |
windows_com_port = device_name.replace('\\', '').replace('.', '').lower().startswith('com')
unix_tty = device_name.startswith('/dev/')
if windows_com_port or unix_tty:
return SLCAN(device_name, **kwargs)
elif SocketCAN is not None:
return SocketCAN(device_name, **kwargs)
else:
... | def make_driver(device_name, **kwargs) | Creates an instance of CAN driver.
The right driver class will be selected automatically based on the device_name.
:param device_name: This parameter is used to select driver class. E.g. "/dev/ttyACM0", "COM9", "can0".
:param kwargs: Passed directly to the constructor. | 4.651406 | 4.39569 | 1.058174 |
global DATATYPES, TYPENAMES
paths = list(paths)
# Try to prepend the built-in DSDL files
# TODO: why do we need try/except here?
# noinspection PyBroadException
try:
if not args.get("exclude_dist", None):
dsdl_path = pkg_resources.resource_filename(__name__, "dsdl_file... | def load_dsdl(*paths, **args) | Loads the DSDL files under the given directory/directories, and creates
types for each of them in the current module's namespace.
If the exclude_dist argument is not present, or False, the DSDL
definitions installed with this package will be loaded first.
Also adds entries for all datatype (ID, kind)s... | 4.040499 | 3.824323 | 1.056527 |
attr, _, subpath = attrpath.partition(".")
if attr not in self.__dict__:
self.__dict__[attr] = Namespace()
self.__namespaces.add(attr)
if subpath:
return self.__dict__[attr]._path(subpath)
else:
return self.__dict__[attr] | def _path(self, attrpath) | Returns the namespace object at the given .-separated path,
creating any namespaces in the path that don't already exist. | 3.37566 | 2.826932 | 1.194107 |
can = driver.make_driver(can_device_name, **kwargs)
return Node(can, **kwargs) | def make_node(can_device_name, **kwargs) | Constructs a node instance with specified CAN device.
:param can_device_name: CAN device name, e.g. "/dev/ttyACM0", "COM9", "can0".
:param kwargs: These arguments will be supplied to the CAN driver factory and to the node constructor. | 5.194514 | 5.703785 | 0.910713 |
priority = 1
event = self._scheduler.enter(timeout_seconds, priority, callback, ())
return self._make_sched_handle(lambda: event) | def defer(self, timeout_seconds, callback) | This method allows to invoke the callback with specified arguments once the specified amount of time.
:returns: EventHandle object. Call .remove() on it to cancel the event. | 10.028252 | 11.65363 | 0.860526 |
priority = 0
def caller(scheduled_deadline):
# Event MUST be re-registered first in order to ensure that it can be cancelled from the callback
scheduled_deadline += period_seconds
event_holder[0] = self._scheduler.enterabs(scheduled_deadline, priority, calle... | def periodic(self, period_seconds, callback) | This method allows to invoke the callback periodically, with specified time intervals.
Note that the scheduler features zero phase drift.
:returns: EventHandle object. Call .remove() on it to cancel the event. | 5.463672 | 5.934519 | 0.92066 |
return self._handler_dispatcher.add_handler(uavcan_type, handler, **kwargs) | def add_handler(self, uavcan_type, handler, **kwargs) | Adds a handler for the specified data type.
:param uavcan_type: DSDL data type. Only transfers of this type will be accepted for this handler.
:param handler: The handler. This must be either a callable or a class.
:param **kwargs: Extra arguments for the handler.
:return: A remov... | 3.749048 | 4.978752 | 0.753009 |
if timeout != 0:
deadline = (time.monotonic() + timeout) if timeout is not None else sys.float_info.max
def execute_once():
next_event_at = self._poll_scheduler_and_get_next_deadline()
if next_event_at is None:
next_event_at =... | def spin(self, timeout=None) | Runs background processes until timeout expires.
Note that all processing is implemented in one thread.
:param timeout: The method will return once this amount of time expires.
If None, the method will never return.
If zero, the method will handle only tho... | 2.79848 | 2.847117 | 0.982917 |
'''Feed ASCII string or bytes to the signature function'''
try:
if isinstance(data_bytes, basestring): # Python 2.7 compatibility
data_bytes = map(ord, data_bytes)
except NameError:
if isinstance(data_bytes, str): # This branch will be taken on Python 3
... | def add(self, data_bytes) | Feed ASCII string or bytes to the signature function | 3.540259 | 2.955063 | 1.198032 |
# noinspection PyShadowingNames
def walk():
import fnmatch
from functools import partial
def on_walk_error(directory, ex):
raise DsdlException('OS error in [%s]: %s' % (directory, str(ex)))
for source_dir in source_dirs:
walker = os.walk(source_dir,... | def parse_namespaces(source_dirs, search_dirs=None) | Use only this function to parse DSDL definitions.
This function takes a list of root namespace directories (containing DSDL definition files to parse) and an
optional list of search directories (containing DSDL definition files that can be referenced from the types
that are going to be parsed).
Returns... | 3.305669 | 3.171792 | 1.042209 |
cast_mode = 'saturated' if self.cast_mode == PrimitiveType.CAST_MODE_SATURATED else 'truncated'
primary_type = {
PrimitiveType.KIND_BOOLEAN: 'bool',
PrimitiveType.KIND_UNSIGNED_INT: 'uint' + str(self.bitlen),
PrimitiveType.KIND_SIGNED_INT: 'int' + str(self.bi... | def get_normalized_definition(self) | Please refer to the specification for details about normalized definitions. | 3.341041 | 3.092815 | 1.080259 |
low, high = self.value_range
if not low <= value <= high:
error('Value [%s] is out of range %s', value, self.value_range) | def validate_value_range(self, value) | Args:
value: Throws DsdlException if this value cannot be represented by this type. | 4.374103 | 4.591408 | 0.952671 |
typedef = self.value_type.get_normalized_definition()
return ('%s[<=%d]' if self.mode == ArrayType.MODE_DYNAMIC else '%s[%d]') % (typedef, self.max_size) | def get_normalized_definition(self) | Please refer to the specification for details about normalized definitions. | 9.578068 | 8.70352 | 1.100482 |
payload_max_bitlen = self.max_size * self.value_type.get_max_bitlen()
return {
self.MODE_DYNAMIC: payload_max_bitlen + self.max_size.bit_length(),
self.MODE_STATIC: payload_max_bitlen
}[self.mode] | def get_max_bitlen(self) | Returns total maximum bit length of the array, including length field if applicable. | 3.852377 | 3.489232 | 1.104076 |
txt = StringIO()
txt.write(self.full_name + '\n')
def adjoin(attrs):
return txt.write('\n'.join(x.get_normalized_definition() for x in attrs) + '\n')
if self.kind == CompoundType.KIND_SERVICE:
if self.request_union:
txt.write('\n@union\n... | def get_dsdl_signature_source_definition(self) | Returns normalized DSDL definition text.
Please refer to the specification for details about normalized DSDL definitions. | 3.278012 | 3.092102 | 1.060124 |
if self._data_type_signature is None:
sig = Signature(self.get_dsdl_signature())
fields = self.request_fields + self.response_fields if self.kind == CompoundType.KIND_SERVICE else self.fields
for field in fields:
field_sig = field.type.get_data_type_s... | def get_data_type_signature(self) | Computes data type signature of this type. The data type signature is
guaranteed to match only if all nested data structures are compatible.
Please refer to the specification for details about signatures. | 3.63905 | 3.431532 | 1.060474 |
self._handle.remove()
self._node_monitor_event_handle.remove()
self._allocation_table.close() | def close(self) | Stops the instance and closes the allocation table storage. | 14.933288 | 9.574369 | 1.559715 |
'''Returns a nice human readable path to 'filename'.'''
try:
a = os.path.abspath(filename)
r = os.path.relpath(filename)
except ValueError:
# Catch relpath exception. Happens, because it can not produce relative path
# if wroking directory is on different drive.
a = r... | def pretty_filename(filename) | Returns a nice human readable path to 'filename'. | 8.605169 | 7.210204 | 1.193471 |
# Check if event is of type event_pb2.Event proto.
if not isinstance(event, event_pb2.Event):
raise TypeError("expected an event_pb2.Event proto, "
" but got %s" % type(event))
return self._write_serialized_event(event.SerializeToString()) | def write_event(self, event) | Appends event to the file. | 3.888622 | 3.771599 | 1.031028 |
if self._num_outstanding_events == 0 or self._recordio_writer is None:
return
self._recordio_writer.flush()
if self._logger is not None:
self._logger.info('wrote %d %s to disk', self._num_outstanding_events,
'event' if self._num_outs... | def flush(self) | Flushes the event file to disk. | 2.798269 | 2.634749 | 1.062063 |
self.flush()
if self._recordio_writer is not None:
self._recordio_writer.close()
self._recordio_writer = None | def close(self) | Flushes the pending events and closes the writer after it is done. | 3.899686 | 3.083695 | 1.264615 |
if self._closed:
self._worker = _EventLoggerThread(self._event_queue, self._ev_writer,
self._flush_secs, self._sentinel_event)
self._worker.start()
self._closed = False | def reopen(self) | Reopens the EventFileWriter.
Can be called after `close()` to add more events in the same directory.
The events will go into a new events file.
Does nothing if the `EventFileWriter` was not closed. | 8.048051 | 6.410844 | 1.255381 |
if not self._closed:
self.add_event(self._sentinel_event)
self.flush()
self._worker.join()
self._ev_writer.close()
self._closed = True | def close(self) | Flushes the event file to disk and close the file.
Call this method when you do not need the summary writer anymore. | 5.802923 | 4.713627 | 1.231095 |
header = struct.pack('Q', len(event_str))
header += struct.pack('I', masked_crc32c(header))
footer = struct.pack('I', masked_crc32c(event_str))
self._writer.write(header + event_str + footer) | def write_record(self, event_str) | Writes a serialized event to file. | 3.243515 | 3.029092 | 1.070788 |
if self._writer is not None:
self.flush()
self._writer.close()
self._writer = None | def close(self) | Closes the record writer. | 3.440955 | 2.724323 | 1.26305 |
if isinstance(summary, bytes):
summ = summary_pb2.Summary()
summ.ParseFromString(summary)
summary = summ
# We strip metadata from values with tags that we have seen before in order
# to save space - we just store the metadata on the first value with ... | def add_summary(self, summary, global_step=None) | Adds a `Summary` protocol buffer to the event file.
This method wraps the provided summary in an `Event` protocol buffer and adds it
to the event file.
Parameters
----------
summary : A `Summary` protocol buffer
Optionally serialized as a string.
global... | 4.284337 | 4.346186 | 0.985769 |
event = event_pb2.Event(graph_def=graph.SerializeToString())
self._add_event(event, None) | def add_graph(self, graph) | Adds a `Graph` protocol buffer to the event file. | 5.892663 | 4.112292 | 1.432939 |
if self._default_bins is None:
v = 1E-12
buckets = []
neg_buckets = []
while v < 1E20:
buckets.append(v)
neg_buckets.append(-v)
v *= 1.1
self._default_bins = neg_buckets[::-1] + [0] + buckets
... | def _get_default_bins(self) | Ported from the C++ function InitDefaultBucketsInner() in the following file.
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/histogram/histogram.cc
See the following tutorial for more details on how TensorFlow initialize bin distribution.
https://www.tensorflow.org/prog... | 3.028434 | 2.6545 | 1.140868 |
if tag not in self._scalar_dict.keys():
self._scalar_dict[tag] = []
self._scalar_dict[tag].append([timestamp, global_step, float(scalar_value)]) | def _append_to_scalar_dict(self, tag, scalar_value, global_step, timestamp) | Adds a list [timestamp, step, value] to the value of `self._scalar_dict[tag]`.
This allows users to store scalars in memory and dump them to a json file later. | 2.126711 | 1.775644 | 1.197713 |
if isinstance(value, (tuple, list, dict)):
if isinstance(value, (tuple, list)):
if len(value) != 2:
raise ValueError('expected two elements in value, while received %d'
% len(value))
value = {value[0]: ... | def add_scalar(self, tag, value, global_step=None) | Adds scalar data to the event file.
Parameters
----------
tag : str
Name for the scalar plot.
value : float, tuple, list, or dict
If value is a float, the corresponding curve would have no name attached in the
plot.
... | 3.700557 | 3.549107 | 1.042673 |
timestamp = time.time()
fw_logdir = self._file_writer.get_logdir()
for scalar_name, scalar_value in scalar_dict.items():
fw_tag = fw_logdir + '/' + tag + '/' + scalar_name
if fw_tag in self._all_writers.keys():
fw = self._all_writers[fw_tag]
... | def _add_scalars(self, tag, scalar_dict, global_step=None) | Adds multiple scalars to summary. This enables drawing multiple curves in one plot.
Parameters
----------
tag : str
Name for the plot.
scalar_dict : dict
Values to be saved.
global_step : int
Global step value to record... | 2.38797 | 2.473337 | 0.965485 |
if os.path.exists(path) and os.path.isfile(path):
logging.warning('%s already exists and will be overwritten by scalar dict', path)
with open(path, "w") as f:
json.dump(self._scalar_dict, f) | def export_scalars(self, path) | Exports to the given path an ASCII file containing all the scalars written
so far by this instance, with the following format:
{writer_id : [[timestamp, step, value], ...], ...} | 3.301036 | 3.369184 | 0.979773 |
if bins == 'default':
bins = self._get_default_bins()
self._file_writer.add_summary(histogram_summary(tag, values, bins), global_step) | def add_histogram(self, tag, values, global_step=None, bins='default') | Add histogram data to the event file.
Note: This function internally calls `asnumpy()` if `values` is an MXNet NDArray.
Since `asnumpy()` is a blocking function call, this function would block the main
thread till it returns. It may consequently affect the performance of async execution
... | 3.098844 | 4.673509 | 0.663066 |
self._file_writer.add_summary(image_summary(tag, image), global_step) | def add_image(self, tag, image, global_step=None) | Add image data to the event file.
This function supports input as a 2D, 3D, or 4D image.
If the input image is 2D, a channel axis is prepended as the first dimension
and image will be replicated three times and concatenated along the channel axis.
If the input image is 3D, it will be rep... | 4.51201 | 7.896509 | 0.571393 |
self._file_writer.add_summary(audio_summary(tag, audio, sample_rate=sample_rate),
global_step) | def add_audio(self, tag, audio, sample_rate=44100, global_step=None) | Add audio data to the event file.
Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs.
Since `asnumpy()` is a blocking function call, this function would block the main
thread till it returns. It may consequently affect the performance of async execution
of the M... | 3.734565 | 5.866628 | 0.636578 |
self._file_writer.add_summary(text_summary(tag, text), global_step)
if tag not in self._text_tags:
self._text_tags.append(tag)
extension_dir = self.get_logdir() + '/plugins/tensorboard_text/'
if not os.path.exists(extension_dir):
os.makedirs(e... | def add_text(self, tag, text, global_step=None) | Add text data to the event file.
Parameters
----------
tag : str
Name for the `text`.
text : str
Text to be saved to the event file.
global_step : int
Global step value to record. | 2.814092 | 3.054695 | 0.921235 |
embedding_shape = embedding.shape
if len(embedding_shape) != 2:
raise ValueError('expected 2D NDArray as embedding data, while received an array with'
' ndim=%d' % len(embedding_shape))
data_dir = _get_embedding_dir(tag, global_step)
save... | def add_embedding(self, tag, embedding, labels=None, images=None, global_step=None) | Adds embedding projector data to the event file. It will also create a config file
used by the embedding projector in TensorBoard. The folder containing the embedding
data is named using the formula:
If global_step is not None, the folder name is `tag + '_' + str(global_step).zfill(6)`;
... | 2.808191 | 2.854127 | 0.983905 |
if num_thresholds < 2:
raise ValueError('num_thresholds must be >= 2')
labels = _make_numpy_array(labels)
predictions = _make_numpy_array(predictions)
self._file_writer.add_summary(pr_curve_summary(tag, labels, predictions,
... | def add_pr_curve(self, tag, labels, predictions, num_thresholds,
global_step=None, weights=None) | Adds precision-recall curve.
Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs.
Since `asnumpy()` is a blocking function call, this function would block the main
thread till it returns. It may consequently affect the performance of async execution
of the MXNet ... | 2.319356 | 3.061311 | 0.757635 |
for i in n:
if len(i) != len(n[0]):
return False
return True | def _rectangular(n) | Checks to see if a 2D list is a valid 2D matrix | 3.772285 | 2.420287 | 1.558611 |
return ((isinstance(matrix[0], list) and _rectangular(matrix) and
not isinstance(matrix[0][0], list)) or
(not isinstance(matrix, list) and matrix.shape == 2)) | def _is_2D_matrix(matrix) | Checks to see if a ndarray is 2D or a list of lists is 2D | 4.763127 | 4.292598 | 1.109614 |
if not isinstance(image, NDArray):
raise TypeError('MXNet NDArray expected, received {}'.format(str(type(image))))
image = _prepare_image(image, nrow=nrow, padding=padding, square_image=square_image)
if Image is None:
raise ImportError('saving image failed because PIL is not found')
... | def _save_image(image, filename, nrow=8, padding=2, square_image=True) | Saves a given Tensor into an image file. If the input tensor contains multiple images,
a grid of images will be saved.
Parameters
----------
image : `NDArray`
Input image(s) in the format of HW, CHW, or NCHW.
filename : str
Filename of the saved image(s).
nro... | 3.334473 | 3.675867 | 0.907126 |
if isinstance(img, np.ndarray):
img = nd.array(img, dtype=img.dtype, ctx=current_context())
if not isinstance(img, NDArray):
raise TypeError('expected MXNet NDArray or numpy.ndarray, '
'while received type {}'.format(str(type(img))))
assert img.ndim == 2 or img.n... | def _prepare_image(img, nrow=8, padding=2, square_image=False) | Given an image of format HW, CHW, or NCHW, returns a image of format HWC.
If the input is a batch of images, a grid of images is made by stitching them together.
If data type is float, values must be in the range [0, 1], and then they are rescaled to
range [0, 255]. If data type is 'uint8`, values are uncha... | 2.167143 | 2.179743 | 0.994219 |
if isinstance(metadata, NDArray):
metadata = metadata.asnumpy()
elif isinstance(metadata, list):
metadata = np.array(metadata)
elif not isinstance(metadata, np.ndarray):
raise TypeError('expected NDArray or np.ndarray or 1D/2D list, while received '
'type... | def _make_metadata_tsv(metadata, save_path) | Given an `NDArray` or a `numpy.ndarray` or a list as metadata e.g. labels, save the
flattened array into the file metadata.tsv under the path provided by the user. The
labels can be 1D or 2D with multiple labels per data point.
Made to satisfy the requirement in the following link:
https://www.tensorflo... | 2.18149 | 2.022094 | 1.078827 |
if isinstance(images, np.ndarray):
images = nd.array(images, dtype=images.dtype, ctx=current_context())
elif not isinstance(images, (NDArray, np.ndarray)):
raise TypeError('images must be an MXNet NDArray or numpy.ndarray,'
' while received type {}'.format(str(type(i... | def _make_sprite_image(images, save_path) | Given an NDArray as a batch images, make a sprite image out of it following the rule
defined in
https://www.tensorflow.org/programmers_guide/embedding
and save it in sprite.png under the path provided by the user. | 3.436628 | 3.246198 | 1.058663 |
with open(os.path.join(file_path, 'projector_config.pbtxt'), 'a') as f:
s = 'embeddings {\n'
s += 'tensor_name: "{}"\n'.format(data_dir)
s += 'tensor_path: "{}"\n'.format(os.path.join(data_dir, 'tensors.tsv'))
if has_metadata:
s += 'metadata_path: "{}"\n'.format(os.p... | def _add_embedding_config(file_path, data_dir, has_metadata=False, label_img_shape=None) | Creates a config file used by the embedding projector.
Adapted from the TensorFlow function `visualize_embeddings()` at
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/tensorboard/plugins/projector/__init__.py | 2.403511 | 2.275091 | 1.056446 |
if isinstance(data, np.ndarray):
data_list = data.tolist()
elif isinstance(data, NDArray):
data_list = data.asnumpy().tolist()
else:
raise TypeError('expected NDArray of np.ndarray, while received type {}'.format(
str(type(data))))
with open(os.path.join(file_pat... | def _save_embedding_tsv(data, file_path) | Given a 2D `NDarray` or a `numpy.ndarray` as embeding,
save it in tensors.tsv under the path provided by the user. | 2.527216 | 2.103978 | 1.201161 |
# In the past, the first argument to summary ops was a tag, which allowed
# arbitrary characters. Now we are changing the first argument to be the node
# name. This has a number of advantages (users of summary ops now can
# take advantage of the tf name scope system) but risks breaking existing
... | def _clean_tag(name) | Cleans a tag. Removes illegal characters for instance.
Adapted from the TensorFlow function `clean_tag()` at
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/ops/summary_op_util.py
Parameters
----------
name : str
The original tag name to be processed.
Ret... | 6.506063 | 6.482557 | 1.003626 |
tag = _clean_tag(tag)
scalar = _make_numpy_array(scalar)
assert(scalar.squeeze().ndim == 0), 'scalar should be 0D'
scalar = float(scalar)
return Summary(value=[Summary.Value(tag=tag, simple_value=scalar)]) | def scalar_summary(tag, scalar) | Outputs a `Summary` protocol buffer containing a single scalar value.
The generated Summary has a Tensor.proto containing the input Tensor.
Adapted from the TensorFlow function `scalar()` at
https://github.com/tensorflow/tensorflow/blob/r1.6/tensorflow/python/summary/summary.py
Parameters
---------... | 3.452307 | 4.608978 | 0.74904 |
tag = _clean_tag(tag)
values = _make_numpy_array(values)
hist = _make_histogram(values.astype(float), bins)
return Summary(value=[Summary.Value(tag=tag, histo=hist)]) | def histogram_summary(tag, values, bins) | Outputs a `Summary` protocol buffer with a histogram.
Adding a histogram summary makes it possible to visualize the data's distribution in
TensorBoard. See detailed explanation of the TensorBoard histogram dashboard at
https://www.tensorflow.org/get_started/tensorboard_histograms
This op reports an `Inv... | 3.477409 | 4.649336 | 0.747937 |
values = values.reshape(-1)
counts, limits = np.histogram(values, bins=bins)
limits = limits[1:]
sum_sq = values.dot(values)
return HistogramProto(min=values.min(),
max=values.max(),
num=len(values),
sum=values.sum()... | def _make_histogram(values, bins) | Converts values into a histogram proto using logic from
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/lib/histogram/histogram.cc | 2.92291 | 2.467951 | 1.184347 |
tag = _clean_tag(tag)
image = _prepare_image(image)
image = _make_image(image)
return Summary(value=[Summary.Value(tag=tag, image=image)]) | def image_summary(tag, image) | Outputs a `Summary` protocol buffer with image(s).
Parameters
----------
tag : str
A name for the generated summary. Will also serve as a series name in TensorBoard.
image : MXNet `NDArray` or `numpy.ndarray`
Image data that is one of the following layout: (H, W), (C, H,... | 3.538652 | 4.650449 | 0.760927 |
assert isinstance(tensor, NDArray)
if Image is None:
raise ImportError('need to install PIL for visualizing images')
height, width, channel = tensor.shape
tensor = _make_numpy_array(tensor)
image = Image.fromarray(tensor)
output = io.BytesIO()
image.save(output, format='PNG')
... | def _make_image(tensor) | Converts an NDArray type image to Image protobuf | 2.740776 | 2.393902 | 1.144899 |
audio = audio.squeeze()
if audio.ndim != 1:
raise ValueError('input audio must be squeezable to 1D, input audio squeezed '
'shape is {}'.format(audio.shape))
audio = _make_numpy_array(audio)
tensor_list = [int(32767.0 * x) for x in audio]
fio = io.BytesIO()
... | def audio_summary(tag, audio, sample_rate=44100) | Outputs a `Summary` protocol buffer with audio data.
Parameters
----------
tag : str
A name for the generated summary. Will also serve as a series name in TensorBoard.
audio : MXNet `NDArray` or `numpy.ndarray`
Audio data that can be squeezed into 1D array. The values ar... | 2.516223 | 2.492583 | 1.009484 |
plugin_data = [SummaryMetadata.PluginData(plugin_name='text')]
smd = SummaryMetadata(plugin_data=plugin_data)
tensor = TensorProto(dtype='DT_STRING',
string_val=[text.encode(encoding='utf_8')],
tensor_shape=TensorShapeProto(dim=[TensorShapeProto.Dim(siz... | def text_summary(tag, text) | Outputs a `Summary` protocol buffer with audio data.
Parameters
----------
tag : str
A name for the generated summary. Will also serve as a series name in TensorBoard.
text : str
Text data.
Returns
-------
A `Summary` protobuf of the audio data. | 3.350461 | 3.182843 | 1.052663 |
# num_thresholds > 127 results in failure of creating protobuf,
# probably a bug of protobuf
if num_thresholds > 127:
logging.warning('num_thresholds>127 would result in failure of creating pr_curve protobuf,'
' clipping it at 127')
num_thresholds = 127
label... | def pr_curve_summary(tag, labels, predictions, num_thresholds, weights=None) | Outputs a precision-recall curve `Summary` protocol buffer.
Parameters
----------
tag : str
A tag attached to the summary. Used by TensorBoard for organization.
labels : MXNet `NDArray` or `numpy.ndarray`.
The ground truth values. A tensor of 0/1 values with arbitrary sh... | 2.974266 | 3.127773 | 0.950921 |
if weights is None:
weights = 1.0
# Compute bins of true positives and false positives.
bucket_indices = np.int32(np.floor(predictions * (num_thresholds - 1)))
float_labels = labels.astype(np.float)
histogram_range = (0, num_thresholds - 1)
tp_buckets, _ = np.histogram(
bu... | def _compute_curve(labels, predictions, num_thresholds, weights=None) | This function is another implementation of functions in
https://github.com/tensorflow/tensorboard/blob/master/tensorboard/plugins/pr_curve/summary.py | 2.256516 | 2.253552 | 1.001315 |
if not isinstance(sym, Symbol):
raise TypeError('sym must be an `mxnet.symbol.Symbol`,'
' received type {}'.format(str(type(sym))))
conf = json.loads(sym.tojson())
nodes = conf['nodes']
data2op = {} # key: data id, value: list of ops to whom data is an input
for... | def _get_nodes_from_symbol(sym) | Given a symbol and shapes, return a list of `NodeDef`s for visualizing the
the graph in TensorBoard. | 3.214533 | 3.12561 | 1.02845 |
mask = {}
indices = range(data.shape[0])
lags = lags or [0]
criteria = criteria or {'framewise_displacement': ('>', 0.5),
'std_dvars': ('>', 1.5)}
for metric, (criterion, threshold) in criteria.items():
if criterion == '<':
mask[metric] = set(np.w... | def spike_regressors(data, criteria=None, header_prefix='motion_outlier',
lags=None, minimum_contiguous=None, concatenate=True,
output='spikes') | Add spike regressors to a confound/nuisance matrix.
Parameters
----------
data: pandas DataFrame object
A tabulation of observations from which spike regressors should be
estimated.
criteria: dict{str: ('>' or '<', float)}
Criteria for generating a spike regressor. If, for a giv... | 2.74229 | 2.676166 | 1.024709 |
variables_deriv = OrderedDict()
data_deriv = OrderedDict()
if 0 in order:
data_deriv[0] = data[variables]
variables_deriv[0] = variables
order = set(order) - set([0])
for o in order:
variables_deriv[o] = ['{}_derivative{}'.format(v, o)
f... | def temporal_derivatives(order, variables, data) | Compute temporal derivative terms by the method of backwards differences.
Parameters
----------
order: range or list(int)
A list of temporal derivative terms to include. For instance, [1, 2]
indicates that the first and second derivative terms should be added.
To retain the original... | 2.682291 | 2.585475 | 1.037446 |
variables_exp = OrderedDict()
data_exp = OrderedDict()
if 1 in order:
data_exp[1] = data[variables]
variables_exp[1] = variables
order = set(order) - set([1])
for o in order:
variables_exp[o] = ['{}_power{}'.format(v, o) for v in variables]
data_exp[o] = data... | def exponential_terms(order, variables, data) | Compute exponential expansions.
Parameters
----------
order: range or list(int)
A list of exponential terms to include. For instance, [1, 2]
indicates that the first and second exponential terms should be added.
To retain the original terms, 1 *must* be included in the list.
var... | 2.787845 | 2.694917 | 1.034482 |
order = order.split('-')
order = [int(o) for o in order]
if len(order) > 1:
order = range(order[0], (order[-1] + 1))
return order | def _order_as_range(order) | Convert a hyphenated string representing order for derivative or
exponential terms into a range object that can be passed as input to the
appropriate expansion function. | 2.567503 | 2.677654 | 0.958863 |
if re.search(r'\^\^[0-9]+$', expr):
order = re.compile(r'\^\^([0-9]+)$').findall(expr)
order = range(1, int(*order) + 1)
variables, data = exponential_terms(order, variables, data)
elif re.search(r'\^[0-9]+[\-]?[0-9]*$', expr):
order = re.compile(r'\^([0-9]+[\-]?[0-9]*)').fi... | def _check_and_expand_exponential(expr, variables, data) | Check if the current operation specifies exponential expansion. ^^6
specifies all powers up to the 6th, ^5-6 the 5th and 6th powers, ^6 the
6th only. | 2.887493 | 2.860055 | 1.009593 |
if re.search(r'^dd[0-9]+', expr):
order = re.compile(r'^dd([0-9]+)').findall(expr)
order = range(0, int(*order) + 1)
(variables, data) = temporal_derivatives(order, variables, data)
elif re.search(r'^d[0-9]+[\-]?[0-9]*', expr):
order = re.compile(r'^d([0-9]+[\-]?[0-9]*)').fi... | def _check_and_expand_derivative(expr, variables, data) | Check if the current operation specifies a temporal derivative. dd6x
specifies all derivatives up to the 6th, d5-6x the 5th and 6th, d6x the
6th only. | 3.034133 | 2.704741 | 1.121783 |
grouping_depth = 0
for i, char in enumerate(expression):
if char == '(':
if grouping_depth == 0:
formula_delimiter = i + 1
grouping_depth += 1
elif char == ')':
grouping_depth -= 1
if grouping_depth == 0:
expr =... | def _check_and_expand_subformula(expression, parent_data, variables, data) | Check if the current operation contains a suboperation, and parse it
where appropriate. | 2.815257 | 2.767668 | 1.017195 |
variables = None
data = None
variables, data = _check_and_expand_subformula(expression,
parent_data,
variables,
data)
variables, data = _check_and... | def parse_expression(expression, parent_data) | Parse an expression in a model formula.
Parameters
----------
expression: str
Formula expression: either a single variable or a variable group
paired with an operation (exponentiation or differentiation).
parent_data: pandas DataFrame
The source data for the model expansion.
... | 3.104138 | 2.849263 | 1.089453 |
wm = 'white_matter'
gsr = 'global_signal'
rps = 'trans_x + trans_y + trans_z + rot_x + rot_y + rot_z'
fd = 'framewise_displacement'
acc = _get_matches_from_data('a_comp_cor_[0-9]+', variables)
tcc = _get_matches_from_data('t_comp_cor_[0-9]+', variables)
dv = _get_matches_from_data('^std... | def _expand_shorthand(model_formula, variables) | Expand shorthand terms in the model formula. | 2.400448 | 2.388506 | 1.005 |
matches = ['_power[0-9]+', '_derivative[0-9]+']
var = OrderedDict((c, deque()) for c in parent_data.columns)
for c in data.columns:
col = c
for m in matches:
col = re.sub(m, '', col)
if col == c:
var[col].appendleft(c)
else:
var[col].a... | def _unscramble_regressor_columns(parent_data, data) | Reorder the columns of a confound matrix such that the columns are in
the same order as the input data with any expansion columns inserted
immediately after the originals. | 3.94802 | 3.860067 | 1.022785 |
variables = {}
data = {}
expr_delimiter = 0
grouping_depth = 0
model_formula = _expand_shorthand(model_formula, parent_data.columns)
for i, char in enumerate(model_formula):
if char == '(':
grouping_depth += 1
elif char == ')':
grouping_depth -= 1
... | def parse_formula(model_formula, parent_data, unscramble=False) | Recursively parse a model formula by breaking it into additive atoms
and tracking grouping symbol depth.
Parameters
----------
model_formula: str
Expression for the model formula, e.g.
'(a + b)^^2 + dd1(c + (d + e)^3) + f'
Note that any expressions to be expanded *must* be in pa... | 2.510636 | 2.557268 | 0.981765 |
import nibabel as nb
import os
# Load the input image
in_nii = nb.load(in_file)
# Load the mask image
mask_nii = nb.load(mask_file)
# Set all non-mask voxels in the input file to zero.
data = in_nii.get_data()
data[mask_nii.get_data() == 0] = 0
# Save the new masked image.
... | def mask(in_file, mask_file, new_name) | Apply a binary mask to an image.
Parameters
----------
in_file : str
Path to a NIfTI file to mask
mask_file : str
Path to a binary mask
new_name : str
Path/filename for the masked output image.
Returns
-------
str
Absolute path of the masked output image... | 1.915724 | 1.988675 | 0.963317 |
import os
import numpy as np
import nibabel as nb
from nipype.utils.filemanip import fname_presuffix
if out_path is None:
out_path = fname_presuffix(in_file, suffix='_cfm', newpath=os.getcwd())
else:
out_path = os.path.abspath(out_path)
if not global_mask and not lesio... | def create_cfm(in_file, lesion_mask=None, global_mask=True, out_path=None) | Create a mask to constrain registration.
Parameters
----------
in_file : str
Path to an existing image (usually a mask).
If global_mask = True, this is used as a size/dimension reference.
out_path : str
Path/filename for the new cost function mask.
lesion_mask : str, optiona... | 3.012302 | 2.869976 | 1.049591 |
# If user-defined settings exist...
if isdefined(self.inputs.settings):
# Note this in the log and return those settings.
NIWORKFLOWS_LOG.info('User-defined settings, overriding defaults')
return self.inputs.settings
# Define a prefix for output file... | def _get_settings(self) | Return any settings defined by the user, as well as any pre-defined
settings files that exist for the image modalities to be registered. | 6.934263 | 6.146121 | 1.128234 |
r
s = "\n".join(source)
if s.find("$") == -1:
return
# This searches for "$blah$" inside a pair of curly braces --
# don't change these, since they're probably coming from a nested
# math environment. So for each match, we replace it with a temporary
# string, and later on we substi... | def dollars_to_math(source) | r"""
Replace dollar signs with backticks.
More precisely, do a regular expression search. Replace a plain
dollar sign ($) by a backtick (`). Replace an escaped dollar sign
(\$) by a dollar sign ($). Don't change a dollar sign preceded or
followed by a backtick (`$ or $`), because of strings like... | 5.486075 | 5.605058 | 0.978772 |
self._fixed_image = self.inputs.after
self._moving_image = self.inputs.before
self._contour = self.inputs.wm_seg if isdefined(self.inputs.wm_seg) else None
NIWORKFLOWS_LOG.info(
'Report - setting before (%s) and after (%s) images',
self._fixed_image, self... | def _post_run_hook(self, runtime) | there is not inner interface to run | 6.514266 | 6.304049 | 1.033346 |
data_dir = data_dir or ''
default_dirs = [Path(d).expanduser().resolve()
for d in os.getenv('CRN_SHARED_DATA', '').split(os.pathsep)
if d.strip()]
default_dirs += [Path(d).expanduser().resolve()
for d in os.getenv('CRN_DATA', '').split(os.pa... | def _get_data_path(data_dir=None) | Get data storage directory
data_dir: str
Path of the data directory. Used to force data storage in
a specified location.
:returns:
a list of paths where the dataset could be stored,
ordered by priority | 3.111761 | 3.351086 | 0.928583 |
dataset_folder = dataset_name if not dataset_prefix \
else '%s%s' % (dataset_prefix, dataset_name)
default_paths = default_paths or ''
paths = [p / dataset_folder for p in _get_data_path(data_dir)]
all_paths = [Path(p) / dataset_folder
for p in default_paths.split(os.paths... | def _get_dataset(dataset_name, dataset_prefix=None,
data_dir=None, default_paths=None,
verbose=1) | Create if necessary and returns data directory of given dataset.
data_dir: str
Path of the data directory. Used to force data storage in
a specified location.
default_paths: list(str)
Default system paths in which the dataset may already have been installed
by a third party software. Th... | 2.975024 | 3.267021 | 0.910623 |
path = os.readlink(link)
if op.isabs(path):
return path
return op.join(op.dirname(link), path) | def readlinkabs(link) | Return an absolute path for the destination
of a symlink | 2.582408 | 2.809552 | 0.919153 |
with Path(path).open('rb') as fhandle:
md5sum = hashlib.md5()
while True:
data = fhandle.read(8192)
if not data:
break
md5sum.update(data)
return md5sum.hexdigest() | def _md5_sum_file(path) | Calculates the MD5 sum of a file. | 1.808751 | 1.785344 | 1.013111 |
try:
if total_size is None:
total_size = response.info().get('Content-Length').strip()
total_size = int(total_size) + initial_size
except Exception as exc:
if verbose > 2:
NIWORKFLOWS_LOG.warn('Total size of chunk could not be determined')
if verb... | def _chunk_read_(response, local_file, chunk_size=8192, report_hook=None,
initial_size=0, total_size=None, verbose=1) | Download a file chunk by chunk and show advancement
:param urllib.response.addinfourl response: response to the download
request in order to get file size
:param str local_file: hard disk file where data should be written
:param int chunk_size: size of downloaded chunks. Default: 8192
:param bo... | 3.005439 | 3.110784 | 0.966136 |
if not total_size:
sys.stderr.write("\rDownloaded {0:d} of ? bytes.".format(bytes_so_far))
else:
# Estimate remaining download time
total_percent = float(bytes_so_far) / total_size
current_download_size = bytes_so_far - initial_size
bytes_remaining = total_size - ... | def _chunk_report_(bytes_so_far, total_size, initial_size, t_0) | Show downloading percentage.
:param int bytes_so_far: number of downloaded bytes
:param int total_size: total size of the file (may be 0/None, depending
on download method).
:param int t_0: the time in seconds (as returned by time.time()) at which
the download was resumed / started.
:pa... | 3.329981 | 3.32917 | 1.000244 |
# Read aseg data
bmask = aseg.copy()
bmask[bmask > 0] = 1
bmask = bmask.astype(np.uint8)
# Morphological operations
selem = sim.ball(ball_size)
newmask = sim.binary_closing(bmask, selem)
newmask = binary_fill_holes(newmask.astype(np.uint8), selem).astype(np.uint8)
return newma... | def refine_aseg(aseg, ball_size=4) | First step to reconcile ANTs' and FreeSurfer's brain masks.
Here, the ``aseg.mgz`` mask from FreeSurfer is refined in two
steps, using binary morphological operations:
1. With a binary closing operation the sulci are included
into the mask. This results in a smoother brain mask
that do... | 2.815914 | 3.11144 | 0.90502 |
selem = sim.ball(bw)
if ants_segs is None:
ants_segs = np.zeros_like(aseg, dtype=np.uint8)
aseg[aseg == 42] = 3 # Collapse both hemispheres
gm = anat.copy()
gm[aseg != 3] = 0
refined = refine_aseg(aseg)
newrefmask = sim.binary_dilation(refined, selem) - refined
indices =... | def grow_mask(anat, aseg, ants_segs=None, ww=7, zval=2.0, bw=4) | Grow mask including pixels that have a high likelihood.
GM tissue parameters are sampled in image patches of ``ww`` size.
This is inspired on mindboggle's solution to the problem:
https://github.com/nipy/mindboggle/blob/master/mindboggle/guts/segment.py#L1660 | 3.529007 | 3.544843 | 0.995533 |
import nibabel as nb
import numpy as np
import os
fn = os.path.basename(in_file)
if not target_subject.startswith('fs'):
return in_file
cortex = nb.freesurfer.read_label(os.path.join(
subjects_dir, target_subject, 'label', '{}.cortex.label'.format(fn[:2])))
func = nb.l... | def medial_wall_to_nan(in_file, subjects_dir, target_subject, newpath=None) | Convert values on medial wall to NaNs | 2.432442 | 2.371516 | 1.025691 |
if self._is_at_section():
return
# If several signatures present, take the last one
while True:
summary = self._doc.read_to_next_empty_line()
summary_str = " ".join([s.strip() for s in summary]).strip()
if re.compile('^([\w., ]+=)?\s*[\w\... | def _parse_summary(self) | Grab signature (if given) and summary | 5.290898 | 4.821434 | 1.09737 |
''' generates a report showing nine slices, three per axis, of an
arbitrary volume of `in_files`, with the resulting segmentation
overlaid '''
self._anat_file = self.inputs.in_files[0]
outputs = self.aggregate_outputs(runtime=runtime)
self._mask_file = outputs.tissue_clas... | def _post_run_hook(self, runtime) | generates a report showing nine slices, three per axis, of an
arbitrary volume of `in_files`, with the resulting segmentation
overlaid | 10.092503 | 5.730394 | 1.761223 |
''' generates a report showing nine slices, three per axis, of an
arbitrary volume of `in_files`, with the resulting segmentation
overlaid '''
outputs = self.aggregate_outputs(runtime=runtime)
self._anat_file = os.path.join(outputs.subjects_dir,
... | def _post_run_hook(self, runtime) | generates a report showing nine slices, three per axis, of an
arbitrary volume of `in_files`, with the resulting segmentation
overlaid | 7.364975 | 3.642952 | 2.021705 |
''' generates a report showing nine slices, three per axis, of an
arbitrary volume of `in_files`, with the resulting segmentation
overlaid '''
outputs = self.aggregate_outputs(runtime=runtime)
self._melodic_dir = outputs.out_dir
NIWORKFLOWS_LOG.info('Generating report fo... | def _post_run_hook(self, runtime) | generates a report showing nine slices, three per axis, of an
arbitrary volume of `in_files`, with the resulting segmentation
overlaid | 17.795265 | 5.13713 | 3.464048 |
try:
base = app.config.github_project_url
if not base:
raise AttributeError
if not base.endswith('/'):
base += '/'
except AttributeError as err:
raise ValueError('github_project_url configuration value is not set (%s)' % str(err))
ref = base + t... | def make_link_node(rawtext, app, type, slug, options) | Create a link to a github resource.
:param rawtext: Text being replaced with link node.
:param app: Sphinx application context
:param type: Link type (issues, changeset, etc.)
:param slug: ID of the thing to link to
:param options: Options dictionary passed to role func. | 4.080116 | 3.781775 | 1.078889 |
try:
issue_num = int(text)
if issue_num <= 0:
raise ValueError
except ValueError:
msg = inliner.reporter.error(
'GitHub issue number must be a number greater than or equal to 1; '
'"%s" is invalid.' % text, line=lineno)
prb = inliner.prob... | def ghissue_role(name, rawtext, text, lineno, inliner, options={}, content=[]) | Link to a GitHub issue.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked wi... | 2.26643 | 2.255896 | 1.00467 |
app = inliner.document.settings.env.app
#app.info('user link %r' % text)
ref = 'https://www.github.com/' + text
node = nodes.reference(rawtext, text, refuri=ref, **options)
return [node], [] | def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]) | Link to a GitHub user.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked wit... | 3.34164 | 3.798531 | 0.879719 |
app = inliner.document.settings.env.app
#app.info('user link %r' % text)
try:
base = app.config.github_project_url
if not base:
raise AttributeError
if not base.endswith('/'):
base += '/'
except AttributeError as err:
raise ValueError('github_... | def ghcommit_role(name, rawtext, text, lineno, inliner, options={}, content=[]) | Link to a GitHub commit.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked w... | 3.804386 | 4.149671 | 0.916792 |
app.info('Initializing GitHub plugin')
app.add_role('ghissue', ghissue_role)
app.add_role('ghpull', ghissue_role)
app.add_role('ghuser', ghuser_role)
app.add_role('ghcommit', ghcommit_role)
app.add_config_value('github_project_url', None, 'env')
return | def setup(app) | Install the plugin.
:param app: Sphinx application context. | 2.650626 | 2.782974 | 0.952444 |
''' Set package_name
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> docwriter.root_path == sphinx.__path__[0]
True
>>> docwriter.package_name = 'docutils'
>>> import docutils
>>> docwriter.root_path == docutils.__path__[0]
True
... | def set_package_name(self, package_name) | Set package_name
>>> docwriter = ApiDocWriter('sphinx')
>>> import sphinx
>>> docwriter.root_path == sphinx.__path__[0]
True
>>> docwriter.package_name = 'docutils'
>>> import docutils
>>> docwriter.root_path == docutils.__path__[0]
True | 5.231213 | 3.012271 | 1.736634 |
''' Import namespace package '''
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod | def _import(self, name) | Import namespace package | 2.934889 | 3.007656 | 0.975806 |
''' Return module sequence discovered from ``self.package_name``
Parameters
----------
None
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``
Examples
--------
>>> dw = ApiDocWriter('sphin... | def discover_modules(self) | Return module sequence discovered from ``self.package_name``
Parameters
----------
None
Returns
-------
mods : sequence
Sequence of module names within ``self.package_name``
Examples
--------
>>> dw = ApiDocWriter('sphinx')
... | 5.03092 | 3.18964 | 1.577269 |
if isinstance(bids_dir, BIDSLayout):
layout = bids_dir
else:
layout = BIDSLayout(str(bids_dir), validate=bids_validate)
all_participants = set(layout.get_subjects())
# Error: bids_dir does not contain subjects
if not all_participants:
raise BIDSError(
'Cou... | def collect_participants(bids_dir, participant_label=None, strict=False,
bids_validate=True) | List the participants under the BIDS root and checks that participants
designated with the participant_label argument exist in that folder.
Returns the list of participants to be finally processed.
Requesting all subjects in a BIDS directory root:
>>> collect_participants(str(datadir / 'ds114'), bids_va... | 2.872303 | 2.726972 | 1.053294 |
return _init_layout(in_file, bids_dir, validate).get_metadata(
str(in_file)) | def get_metadata_for_nifti(in_file, bids_dir=None, validate=True) | Fetch metadata for a given nifti file
>>> metadata = get_metadata_for_nifti(
... datadir / 'ds054' / 'sub-100185' / 'fmap' / 'sub-100185_phasediff.nii.gz',
... validate=False)
>>> metadata['Manufacturer']
'SIEMENS'
>>> | 8.314244 | 12.616259 | 0.65901 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.