after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __init__(
self,
info,
data,
events,
event_id=None,
tmin=-0.2,
tmax=0.5,
baseline=(None, 0),
raw=None,
picks=None,
reject=None,
flat=None,
decim=1,
reject_tmin=None,
reject_tmax=None,
detrend=None,
proj=True,
on_missing="raise",
preload_at_end=False,
selection=None,
drop_log=None,
filename=None,
metadata=None,
event_repeated="error",
verbose=None,
): # noqa: D102
self.verbose = verbose
if events is not None: # RtEpochs can have events=None
events_type = type(events)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore") # deprecation for object array
events = np.asarray(events)
if not np.issubdtype(events.dtype, np.integer):
raise TypeError(
f"events should be a NumPy array of integers, got {events_type}"
)
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError(f"events must be of shape (N, 3), got {events.shape}")
events_max = events.max()
if events_max > INT32_MAX:
raise ValueError(
f"events array values must not exceed {INT32_MAX}, got {events_max}"
)
event_id = _check_event_id(event_id, events)
self.event_id = event_id
del event_id
if events is not None: # RtEpochs can have events=None
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = "No matching events found for %s (event id %i)" % (key, val)
_on_missing(on_missing, msg)
# ensure metadata matches original events size
self.selection = np.arange(len(events))
self.events = events
self.metadata = metadata
del events
values = list(self.event_id.values())
selected = np.where(np.in1d(self.events[:, 2], values))[0]
if selection is None:
selection = selected
else:
selection = np.array(selection, int)
if selection.shape != (len(selected),):
raise ValueError(
"selection must be shape %s got shape %s"
% (selected.shape, selection.shape)
)
self.selection = selection
if drop_log is None:
self.drop_log = tuple(
() if k in self.selection else ("IGNORED",)
for k in range(max(len(self.events), max(self.selection) + 1))
)
else:
self.drop_log = drop_log
self.events = self.events[selected]
self.events, self.event_id, self.selection, self.drop_log = (
_handle_event_repeated(
self.events,
self.event_id,
event_repeated,
self.selection,
self.drop_log,
)
)
# then subselect
sub = np.where(np.in1d(selection, self.selection))[0]
if isinstance(metadata, list):
metadata = [metadata[s] for s in sub]
elif metadata is not None:
metadata = metadata.iloc[sub]
self.metadata = metadata
del metadata
n_events = len(self.events)
if n_events > 1:
if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0:
warn(
"The events passed to the Epochs constructor are not "
"chronologically ordered.",
RuntimeWarning,
)
if n_events > 0:
logger.info("%d matching events found" % n_events)
else:
raise ValueError("No desired events found.")
else:
self.drop_log = list()
self.selection = np.array([], int)
self.metadata = metadata
# do not set self.events here, let subclass do it
if (detrend not in [None, 0, 1]) or isinstance(detrend, bool):
raise ValueError("detrend must be None, 0, or 1")
self.detrend = detrend
self._raw = raw
info._check_consistency()
self.picks = _picks_to_idx(info, picks, none="all", exclude=(), allow_empty=False)
self.info = pick_info(info, self.picks)
del info
self._current = 0
if data is None:
self.preload = False
self._data = None
self._do_baseline = True
else:
assert decim == 1
if (
data.ndim != 3
or data.shape[2] != round((tmax - tmin) * self.info["sfreq"]) + 1
):
raise RuntimeError("bad data shape")
if data.shape[0] != len(self.events):
raise ValueError("The number of epochs and the number of events must match")
self.preload = True
self._data = data
self._do_baseline = False
self._offset = None
if tmin > tmax:
raise ValueError("tmin has to be less than or equal to tmax")
# Handle times
sfreq = float(self.info["sfreq"])
start_idx = int(round(tmin * sfreq))
self._raw_times = np.arange(start_idx, int(round(tmax * sfreq)) + 1) / sfreq
self._set_times(self._raw_times)
# check reject_tmin and reject_tmax
if reject_tmin is not None:
if np.isclose(reject_tmin, tmin):
# adjust for potential small deviations due to sampling freq
reject_tmin = self.tmin
elif reject_tmin < tmin:
raise ValueError(
f"reject_tmin needs to be None or >= tmin (got {reject_tmin})"
)
if reject_tmax is not None:
if np.isclose(reject_tmax, tmax):
# adjust for potential small deviations due to sampling freq
reject_tmax = self.tmax
elif reject_tmax > tmax:
raise ValueError(
f"reject_tmax needs to be None or <= tmax (got {reject_tmax})"
)
if (reject_tmin is not None) and (reject_tmax is not None):
if reject_tmin >= reject_tmax:
raise ValueError(
f"reject_tmin ({reject_tmin}) needs to be "
f" < reject_tmax ({reject_tmax})"
)
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
# decimation
self._decim = 1
self.decimate(decim)
# baseline correction: replace `None` tuple elements with actual times
self.baseline = _check_baseline(
baseline, times=self.times, sfreq=self.info["sfreq"]
)
if self.baseline is not None and self.baseline != baseline:
logger.info(
f"Setting baseline interval to [{self.baseline[0]}, {self.baseline[1]}] sec"
)
logger.info(_log_rescale(self.baseline))
# setup epoch rejection
self.reject = None
self.flat = None
self._reject_setup(reject, flat)
# do the rest
valid_proj = [True, "delayed", False]
if proj not in valid_proj:
raise ValueError('"proj" must be one of %s, not %s' % (valid_proj, proj))
if proj == "delayed":
self._do_delayed_proj = True
logger.info("Entering delayed SSP mode.")
else:
self._do_delayed_proj = False
activate = False if self._do_delayed_proj else proj
self._projector, self.info = setup_proj(self.info, False, activate=activate)
if preload_at_end:
assert self._data is None
assert self.preload is False
self.load_data() # this will do the projection
elif proj is True and self._projector is not None and data is not None:
# let's make sure we project if data was provided and proj
# requested
# we could do this with np.einsum, but iteration should be
# more memory safe in most instances
for ii, epoch in enumerate(self._data):
self._data[ii] = np.dot(self._projector, epoch)
self._filename = str(filename) if filename is not None else filename
self._check_consistency()
|
def __init__(
self,
info,
data,
events,
event_id=None,
tmin=-0.2,
tmax=0.5,
baseline=(None, 0),
raw=None,
picks=None,
reject=None,
flat=None,
decim=1,
reject_tmin=None,
reject_tmax=None,
detrend=None,
proj=True,
on_missing="raise",
preload_at_end=False,
selection=None,
drop_log=None,
filename=None,
metadata=None,
event_repeated="error",
verbose=None,
): # noqa: D102
self.verbose = verbose
if events is not None: # RtEpochs can have events=None
events_type = type(events)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore") # deprecation for object array
events = np.asarray(events)
if not np.issubdtype(events.dtype, np.integer):
raise TypeError(
f"events should be a NumPy array of integers, got {events_type}"
)
if events.ndim != 2 or events.shape[1] != 3:
raise ValueError(f"events must be of shape (N, 3), got {events.shape}")
events_max = events.max()
if events_max > INT32_MAX:
raise ValueError(
f"events array values must not exceed {INT32_MAX}, got {events_max}"
)
event_id = _check_event_id(event_id, events)
self.event_id = event_id
del event_id
if events is not None: # RtEpochs can have events=None
for key, val in self.event_id.items():
if val not in events[:, 2]:
msg = "No matching events found for %s (event id %i)" % (key, val)
_on_missing(on_missing, msg)
# ensure metadata matches original events size
self.selection = np.arange(len(events))
self.events = events
self.metadata = metadata
del events
values = list(self.event_id.values())
selected = np.where(np.in1d(self.events[:, 2], values))[0]
if selection is None:
selection = selected
else:
selection = np.array(selection, int)
if selection.shape != (len(selected),):
raise ValueError(
"selection must be shape %s got shape %s"
% (selected.shape, selection.shape)
)
self.selection = selection
if drop_log is None:
self.drop_log = tuple(
() if k in self.selection else ("IGNORED",)
for k in range(max(len(self.events), max(self.selection) + 1))
)
else:
self.drop_log = drop_log
self.events = self.events[selected]
self.events, self.event_id, self.selection, self.drop_log = (
_handle_event_repeated(
self.events,
self.event_id,
event_repeated,
self.selection,
self.drop_log,
)
)
# then subselect
sub = np.where(np.in1d(selection, self.selection))[0]
if isinstance(metadata, list):
metadata = [metadata[s] for s in sub]
elif metadata is not None:
metadata = metadata.iloc[sub]
self.metadata = metadata
del metadata
n_events = len(self.events)
if n_events > 1:
if np.diff(self.events.astype(np.int64)[:, 0]).min() <= 0:
warn(
"The events passed to the Epochs constructor are not "
"chronologically ordered.",
RuntimeWarning,
)
if n_events > 0:
logger.info("%d matching events found" % n_events)
else:
raise ValueError("No desired events found.")
else:
self.drop_log = list()
self.selection = np.array([], int)
self.metadata = metadata
# do not set self.events here, let subclass do it
# check reject_tmin and reject_tmax
if (reject_tmin is not None) and (reject_tmin < tmin):
raise ValueError("reject_tmin needs to be None or >= tmin")
if (reject_tmax is not None) and (reject_tmax > tmax):
raise ValueError("reject_tmax needs to be None or <= tmax")
if (reject_tmin is not None) and (reject_tmax is not None):
if reject_tmin >= reject_tmax:
raise ValueError("reject_tmin needs to be < reject_tmax")
if (detrend not in [None, 0, 1]) or isinstance(detrend, bool):
raise ValueError("detrend must be None, 0, or 1")
self.reject_tmin = reject_tmin
self.reject_tmax = reject_tmax
self.detrend = detrend
self._raw = raw
info._check_consistency()
self.picks = _picks_to_idx(info, picks, none="all", exclude=(), allow_empty=False)
self.info = pick_info(info, self.picks)
del info
self._current = 0
if data is None:
self.preload = False
self._data = None
self._do_baseline = True
else:
assert decim == 1
if (
data.ndim != 3
or data.shape[2] != round((tmax - tmin) * self.info["sfreq"]) + 1
):
raise RuntimeError("bad data shape")
if data.shape[0] != len(self.events):
raise ValueError("The number of epochs and the number of events must match")
self.preload = True
self._data = data
self._do_baseline = False
self._offset = None
if tmin > tmax:
raise ValueError("tmin has to be less than or equal to tmax")
# Handle times
sfreq = float(self.info["sfreq"])
start_idx = int(round(tmin * sfreq))
self._raw_times = np.arange(start_idx, int(round(tmax * sfreq)) + 1) / sfreq
self._set_times(self._raw_times)
self._decim = 1
self.decimate(decim)
# baseline correction: replace `None` tuple elements with actual times
self.baseline = _check_baseline(
baseline, times=self.times, sfreq=self.info["sfreq"]
)
if self.baseline is not None and self.baseline != baseline:
logger.info(
f"Setting baseline interval to [{self.baseline[0]}, {self.baseline[1]}] sec"
)
logger.info(_log_rescale(self.baseline))
# setup epoch rejection
self.reject = None
self.flat = None
self._reject_setup(reject, flat)
# do the rest
valid_proj = [True, "delayed", False]
if proj not in valid_proj:
raise ValueError('"proj" must be one of %s, not %s' % (valid_proj, proj))
if proj == "delayed":
self._do_delayed_proj = True
logger.info("Entering delayed SSP mode.")
else:
self._do_delayed_proj = False
activate = False if self._do_delayed_proj else proj
self._projector, self.info = setup_proj(self.info, False, activate=activate)
if preload_at_end:
assert self._data is None
assert self.preload is False
self.load_data() # this will do the projection
elif proj is True and self._projector is not None and data is not None:
# let's make sure we project if data was provided and proj
# requested
# we could do this with np.einsum, but iteration should be
# more memory safe in most instances
for ii, epoch in enumerate(self._data):
self._data[ii] = np.dot(self._projector, epoch)
self._filename = str(filename) if filename is not None else filename
self._check_consistency()
|
https://github.com/mne-tools/mne-python/issues/8816
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/test.py in
25
26 epochs.save(epochs_fname, overwrite=True)
---> 27 epochs_read = mne.read_epochs(epochs_fname)
<decorator-gen-212> in read_epochs(fname, proj, preload, verbose)
~/Development/mne-python/mne/epochs.py in read_epochs(fname, proj, preload, verbose)
2704 The epochs.
2705 """
-> 2706 return EpochsFIF(fname, proj, preload, verbose)
2707
2708
<decorator-gen-213> in __init__(self, fname, proj, preload, verbose)
~/Development/mne-python/mne/epochs.py in __init__(self, fname, proj, preload, verbose)
2819 # again, ensure we're retaining the baseline period originally loaded
2820 # from disk without trying to re-apply baseline correction
-> 2821 super(EpochsFIF, self).__init__(
2822 info, data, events, event_id, tmin, tmax, baseline=None, raw=raw,
2823 proj=proj, preload_at_end=False, on_missing='ignore',
<decorator-gen-198> in __init__(self, info, data, events, event_id, tmin, tmax, baseline, raw, picks, reject, flat, decim, reject_tmin, reject_tmax, detrend, proj, on_missing, preload_at_end, selection, drop_log, filename, metadata, event_repeated, verbose)
~/Development/mne-python/mne/epochs.py in __init__(***failed resolving arguments***)
481 # check reject_tmin and reject_tmax
482 if (reject_tmin is not None) and (reject_tmin < tmin):
--> 483 raise ValueError("reject_tmin needs to be None or >= tmin")
484 if (reject_tmax is not None) and (reject_tmax > tmax):
485 raise ValueError("reject_tmax needs to be None or <= tmax")
ValueError: reject_tmin needs to be None or >= tmin
|
ValueError
|
def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None):
"""Crop a time interval from the epochs.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The cropped epochs object, modified in-place.
Notes
-----
%(notes_tmax_included_by_default)s
"""
# XXX this could be made to work on non-preloaded data...
_check_preload(self, "Modifying data of epochs")
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warn("tmin is not in epochs time interval. tmin is set to epochs.tmin")
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warn("tmax is not in epochs time interval. tmax is set to epochs.tmax")
tmax = self.tmax
tmask = _time_mask(
self.times, tmin, tmax, sfreq=self.info["sfreq"], include_tmax=include_tmax
)
self._set_times(self.times[tmask])
self._raw_times = self._raw_times[tmask]
self._data = self._data[:, :, tmask]
# Adjust rejection period
if self.reject_tmin is not None and self.reject_tmin < self.tmin:
logger.info(
f"reject_tmin is not in epochs time interval. "
f"Setting reject_tmin to epochs.tmin ({self.tmin} sec)"
)
self.reject_tmin = self.tmin
if self.reject_tmax is not None and self.reject_tmax > self.tmax:
logger.info(
f"reject_tmax is not in epochs time interval. "
f"Setting reject_tmax to epochs.tmax ({self.tmax} sec)"
)
self.reject_tmax = self.tmax
return self
|
def crop(self, tmin=None, tmax=None, include_tmax=True, verbose=None):
"""Crop a time interval from the epochs.
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
%(include_tmax)s
%(verbose_meth)s
Returns
-------
epochs : instance of Epochs
The cropped epochs object, modified in-place.
Notes
-----
%(notes_tmax_included_by_default)s
"""
# XXX this could be made to work on non-preloaded data...
_check_preload(self, "Modifying data of epochs")
if tmin is None:
tmin = self.tmin
elif tmin < self.tmin:
warn("tmin is not in epochs time interval. tmin is set to epochs.tmin")
tmin = self.tmin
if tmax is None:
tmax = self.tmax
elif tmax > self.tmax:
warn("tmax is not in epochs time interval. tmax is set to epochs.tmax")
tmax = self.tmax
tmask = _time_mask(
self.times, tmin, tmax, sfreq=self.info["sfreq"], include_tmax=include_tmax
)
self._set_times(self.times[tmask])
self._raw_times = self._raw_times[tmask]
self._data = self._data[:, :, tmask]
return self
|
https://github.com/mne-tools/mne-python/issues/8816
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/test.py in
25
26 epochs.save(epochs_fname, overwrite=True)
---> 27 epochs_read = mne.read_epochs(epochs_fname)
<decorator-gen-212> in read_epochs(fname, proj, preload, verbose)
~/Development/mne-python/mne/epochs.py in read_epochs(fname, proj, preload, verbose)
2704 The epochs.
2705 """
-> 2706 return EpochsFIF(fname, proj, preload, verbose)
2707
2708
<decorator-gen-213> in __init__(self, fname, proj, preload, verbose)
~/Development/mne-python/mne/epochs.py in __init__(self, fname, proj, preload, verbose)
2819 # again, ensure we're retaining the baseline period originally loaded
2820 # from disk without trying to re-apply baseline correction
-> 2821 super(EpochsFIF, self).__init__(
2822 info, data, events, event_id, tmin, tmax, baseline=None, raw=raw,
2823 proj=proj, preload_at_end=False, on_missing='ignore',
<decorator-gen-198> in __init__(self, info, data, events, event_id, tmin, tmax, baseline, raw, picks, reject, flat, decim, reject_tmin, reject_tmax, detrend, proj, on_missing, preload_at_end, selection, drop_log, filename, metadata, event_repeated, verbose)
~/Development/mne-python/mne/epochs.py in __init__(***failed resolving arguments***)
481 # check reject_tmin and reject_tmax
482 if (reject_tmin is not None) and (reject_tmin < tmin):
--> 483 raise ValueError("reject_tmin needs to be None or >= tmin")
484 if (reject_tmax is not None) and (reject_tmax > tmax):
485 raise ValueError("reject_tmax needs to be None or <= tmax")
ValueError: reject_tmin needs to be None or >= tmin
|
ValueError
|
def plot_head_positions(
pos,
mode="traces",
cmap="viridis",
direction="z",
show=True,
destination=None,
info=None,
color="k",
axes=None,
):
"""Plot head positions.
Parameters
----------
pos : ndarray, shape (n_pos, 10) | list of ndarray
The head position data. Can also be a list to treat as a
concatenation of runs.
mode : str
Can be 'traces' (default) to show position and quaternion traces,
or 'field' to show the position as a vector field over time.
The 'field' mode requires matplotlib 1.4+.
cmap : colormap
Colormap to use for the trace plot, default is "viridis".
direction : str
Can be any combination of "x", "y", or "z" (default: "z") to show
directional axes in "field" mode.
show : bool
Show figure if True. Defaults to True.
destination : str | array-like, shape (3,) | None
The destination location for the head, assumed to be in head
coordinates. See :func:`mne.preprocessing.maxwell_filter` for
details.
.. versionadded:: 0.16
info : instance of mne.Info | None
Measurement information. If provided, will be used to show the
destination position when ``destination is None``, and for
showing the MEG sensors.
.. versionadded:: 0.16
color : color object
The color to use for lines in ``mode == 'traces'`` and quiver
arrows in ``mode == 'field'``.
.. versionadded:: 0.16
axes : array-like, shape (3, 2)
The matplotlib axes to use. Only used for ``mode == 'traces'``.
.. versionadded:: 0.16
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from ..chpi import head_pos_to_trans_rot_t
from ..preprocessing.maxwell import _check_destination
import matplotlib.pyplot as plt
_check_option("mode", mode, ["traces", "field"])
dest_info = dict(dev_head_t=None) if info is None else info
destination = _check_destination(destination, dest_info, head_frame=True)
if destination is not None:
destination = _ensure_trans(destination, "head", "meg") # probably inv
destination = destination["trans"][:3].copy()
destination[:, 3] *= 1000
if not isinstance(pos, (list, tuple)):
pos = [pos]
for ii, p in enumerate(pos):
p = np.array(p, float)
if p.ndim != 2 or p.shape[1] != 10:
raise ValueError(
"pos (or each entry in pos if a list) must be "
"dimension (N, 10), got %s" % (p.shape,)
)
if ii > 0: # concatenation
p[:, 0] += pos[ii - 1][-1, 0] - p[0, 0]
pos[ii] = p
borders = np.cumsum([len(pp) for pp in pos])
pos = np.concatenate(pos, axis=0)
trans, rot, t = head_pos_to_trans_rot_t(pos) # also ensures pos is okay
# trans, rot, and t are for dev_head_t, but what we really want
# is head_dev_t (i.e., where the head origin is in device coords)
use_trans = einsum("ijk,ik->ij", rot[:, :3, :3].transpose([0, 2, 1]), -trans) * 1000
use_rot = rot.transpose([0, 2, 1])
use_quats = -pos[:, 1:4] # inverse (like doing rot.T)
surf = rrs = lims = None
if info is not None:
meg_picks = pick_types(info, meg=True, ref_meg=False, exclude=())
if len(meg_picks) > 0:
rrs = 1000 * np.array(
[info["chs"][pick]["loc"][:3] for pick in meg_picks], float
)
if mode == "traces":
lims = np.array((rrs.min(0), rrs.max(0))).T
else: # mode == 'field'
surf = get_meg_helmet_surf(info)
transform_surface_to(surf, "meg", info["dev_head_t"], copy=False)
surf["rr"] *= 1000.0
helmet_color = (0.0, 0.0, 0.6)
if mode == "traces":
if axes is None:
axes = plt.subplots(3, 2, sharex=True)[1]
else:
axes = np.array(axes)
if axes.shape != (3, 2):
raise ValueError("axes must have shape (3, 2), got %s" % (axes.shape,))
fig = axes[0, 0].figure
labels = ["xyz", ("$q_1$", "$q_2$", "$q_3$")]
for ii, (quat, coord) in enumerate(zip(use_quats.T, use_trans.T)):
axes[ii, 0].plot(t, coord, color, lw=1.0, zorder=3)
axes[ii, 0].set(ylabel=labels[0][ii], xlim=t[[0, -1]])
axes[ii, 1].plot(t, quat, color, lw=1.0, zorder=3)
axes[ii, 1].set(ylabel=labels[1][ii], xlim=t[[0, -1]])
for b in borders[:-1]:
for jj in range(2):
axes[ii, jj].axvline(t[b], color="r")
for ii, title in enumerate(("Position (mm)", "Rotation (quat)")):
axes[0, ii].set(title=title)
axes[-1, ii].set(xlabel="Time (s)")
if rrs is not None:
pos_bads = np.any(
[
(use_trans[:, ii] <= lims[ii, 0])
| (use_trans[:, ii] >= lims[ii, 1])
for ii in range(3)
],
axis=0,
)
for ii in range(3):
oidx = list(range(ii)) + list(range(ii + 1, 3))
# knowing it will generally be spherical, we can approximate
# how far away we are along the axis line by taking the
# point to the left and right with the smallest distance
from scipy.spatial.distance import cdist
dists = cdist(rrs[:, oidx], use_trans[:, oidx])
left = rrs[:, [ii]] < use_trans[:, ii]
left_dists_all = dists.copy()
left_dists_all[~left] = np.inf
# Don't show negative Z direction
if ii != 2 and np.isfinite(left_dists_all).any():
idx = np.argmin(left_dists_all, axis=0)
left_dists = rrs[idx, ii]
bads = (
~np.isfinite(left_dists_all[idx, np.arange(len(idx))])
| pos_bads
)
left_dists[bads] = np.nan
axes[ii, 0].plot(
t, left_dists, color=helmet_color, ls="-", lw=0.5, zorder=2
)
else:
axes[ii, 0].axhline(
lims[ii][0], color=helmet_color, ls="-", lw=0.5, zorder=2
)
right_dists_all = dists
right_dists_all[left] = np.inf
if np.isfinite(right_dists_all).any():
idx = np.argmin(right_dists_all, axis=0)
right_dists = rrs[idx, ii]
bads = (
~np.isfinite(right_dists_all[idx, np.arange(len(idx))])
| pos_bads
)
right_dists[bads] = np.nan
axes[ii, 0].plot(
t, right_dists, color=helmet_color, ls="-", lw=0.5, zorder=2
)
else:
axes[ii, 0].axhline(
lims[ii][1], color=helmet_color, ls="-", lw=0.5, zorder=2
)
for ii in range(3):
axes[ii, 1].set(ylim=[-1, 1])
if destination is not None:
vals = np.array(
[destination[:, 3], rot_to_quat(destination[:, :3])]
).T.ravel()
for ax, val in zip(fig.axes, vals):
ax.axhline(val, color="r", ls=":", zorder=2, lw=1.0)
else: # mode == 'field':
from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from mpl_toolkits.mplot3d import Axes3D # noqa: F401, analysis:ignore
fig, ax = plt.subplots(1, subplot_kw=dict(projection="3d"))
# First plot the trajectory as a colormap:
# http://matplotlib.org/examples/pylab_examples/multicolored_line.html
pts = use_trans[:, np.newaxis]
segments = np.concatenate([pts[:-1], pts[1:]], axis=1)
norm = Normalize(t[0], t[-2])
lc = Line3DCollection(segments, cmap=cmap, norm=norm)
lc.set_array(t[:-1])
ax.add_collection(lc)
# now plot the head directions as a quiver
dir_idx = dict(x=0, y=1, z=2)
kwargs = dict(pivot="tail")
for d, length in zip(direction, [5.0, 2.5, 1.0]):
use_dir = use_rot[:, :, dir_idx[d]]
# draws stems, then heads
array = np.concatenate((t, np.repeat(t, 2)))
ax.quiver(
use_trans[:, 0],
use_trans[:, 1],
use_trans[:, 2],
use_dir[:, 0],
use_dir[:, 1],
use_dir[:, 2],
norm=norm,
cmap=cmap,
array=array,
length=length,
**kwargs,
)
if destination is not None:
ax.quiver(
destination[0, 3],
destination[1, 3],
destination[2, 3],
destination[dir_idx[d], 0],
destination[dir_idx[d], 1],
destination[dir_idx[d], 2],
color=color,
length=length,
**kwargs,
)
mins = use_trans.min(0)
maxs = use_trans.max(0)
if surf is not None:
ax.plot_trisurf(
*surf["rr"].T,
triangles=surf["tris"],
color=helmet_color,
alpha=0.1,
shade=False,
)
ax.scatter(*rrs.T, s=1, color=helmet_color)
mins = np.minimum(mins, rrs.min(0))
maxs = np.maximum(maxs, rrs.max(0))
scale = (maxs - mins).max() / 2.0
xlim, ylim, zlim = (maxs + mins)[:, np.newaxis] / 2.0 + [-scale, scale]
ax.set(xlabel="x", ylabel="y", zlabel="z", xlim=xlim, ylim=ylim, zlim=zlim)
_set_aspect_equal(ax)
ax.view_init(30, 45)
tight_layout(fig=fig)
plt_show(show)
return fig
|
def plot_head_positions(
pos,
mode="traces",
cmap="viridis",
direction="z",
show=True,
destination=None,
info=None,
color="k",
axes=None,
):
"""Plot head positions.
Parameters
----------
pos : ndarray, shape (n_pos, 10) | list of ndarray
The head position data. Can also be a list to treat as a
concatenation of runs.
mode : str
Can be 'traces' (default) to show position and quaternion traces,
or 'field' to show the position as a vector field over time.
The 'field' mode requires matplotlib 1.4+.
cmap : colormap
Colormap to use for the trace plot, default is "viridis".
direction : str
Can be any combination of "x", "y", or "z" (default: "z") to show
directional axes in "field" mode.
show : bool
Show figure if True. Defaults to True.
destination : str | array-like, shape (3,) | None
The destination location for the head, assumed to be in head
coordinates. See :func:`mne.preprocessing.maxwell_filter` for
details.
.. versionadded:: 0.16
info : instance of mne.Info | None
Measurement information. If provided, will be used to show the
destination position when ``destination is None``, and for
showing the MEG sensors.
.. versionadded:: 0.16
color : color object
The color to use for lines in ``mode == 'traces'`` and quiver
arrows in ``mode == 'field'``.
.. versionadded:: 0.16
axes : array-like, shape (3, 2)
The matplotlib axes to use. Only used for ``mode == 'traces'``.
.. versionadded:: 0.16
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from ..chpi import head_pos_to_trans_rot_t
from ..preprocessing.maxwell import _check_destination
import matplotlib.pyplot as plt
_check_option("mode", mode, ["traces", "field"])
dest_info = dict(dev_head_t=None) if info is None else info
destination = _check_destination(destination, dest_info, head_frame=True)
if destination is not None:
destination = _ensure_trans(destination, "head", "meg") # probably inv
destination = destination["trans"][:3].copy()
destination[:, 3] *= 1000
if not isinstance(pos, (list, tuple)):
pos = [pos]
for ii, p in enumerate(pos):
p = np.array(p, float)
if p.ndim != 2 or p.shape[1] != 10:
raise ValueError(
"pos (or each entry in pos if a list) must be "
"dimension (N, 10), got %s" % (p.shape,)
)
if ii > 0: # concatenation
p[:, 0] += pos[ii - 1][-1, 0] - p[0, 0]
pos[ii] = p
borders = np.cumsum([len(pp) for pp in pos])
pos = np.concatenate(pos, axis=0)
trans, rot, t = head_pos_to_trans_rot_t(pos) # also ensures pos is okay
# trans, rot, and t are for dev_head_t, but what we really want
# is head_dev_t (i.e., where the head origin is in device coords)
use_trans = einsum("ijk,ik->ij", rot[:, :3, :3].transpose([0, 2, 1]), -trans) * 1000
use_rot = rot.transpose([0, 2, 1])
use_quats = -pos[:, 1:4] # inverse (like doing rot.T)
surf = rrs = lims = None
if info is not None:
meg_picks = pick_types(info, meg=True, ref_meg=False, exclude=())
if len(meg_picks) > 0:
rrs = 1000 * np.array(
[info["chs"][pick]["loc"][:3] for pick in meg_picks], float
)
if mode == "traces":
lims = np.array((rrs.min(0), rrs.max(0))).T
else: # mode == 'field'
surf = get_meg_helmet_surf(info)
transform_surface_to(surf, "meg", info["dev_head_t"], copy=False)
surf["rr"] *= 1000.0
helmet_color = (0.0, 0.0, 0.6)
if mode == "traces":
if axes is None:
axes = plt.subplots(3, 2, sharex=True)[1]
else:
axes = np.array(axes)
if axes.shape != (3, 2):
raise ValueError("axes must have shape (3, 2), got %s" % (axes.shape,))
fig = axes[0, 0].figure
labels = ["xyz", ("$q_1$", "$q_2$", "$q_3$")]
for ii, (quat, coord) in enumerate(zip(use_quats.T, use_trans.T)):
axes[ii, 0].plot(t, coord, color, lw=1.0, zorder=3)
axes[ii, 0].set(ylabel=labels[0][ii], xlim=t[[0, -1]])
axes[ii, 1].plot(t, quat, color, lw=1.0, zorder=3)
axes[ii, 1].set(ylabel=labels[1][ii], xlim=t[[0, -1]])
for b in borders[:-1]:
for jj in range(2):
axes[ii, jj].axvline(t[b], color="r")
for ii, title in enumerate(("Position (mm)", "Rotation (quat)")):
axes[0, ii].set(title=title)
axes[-1, ii].set(xlabel="Time (s)")
if rrs is not None:
pos_bads = np.any(
[
(use_trans[:, ii] <= lims[ii, 0])
| (use_trans[:, ii] >= lims[ii, 1])
for ii in range(3)
],
axis=0,
)
for ii in range(3):
oidx = list(range(ii)) + list(range(ii + 1, 3))
# knowing it will generally be spherical, we can approximate
# how far away we are along the axis line by taking the
# point to the left and right with the smallest distance
from scipy.spatial.distance import cdist
dists = cdist(rrs[:, oidx], use_trans[:, oidx])
left = rrs[:, [ii]] < use_trans[:, ii]
left_dists_all = dists.copy()
left_dists_all[~left] = np.inf
# Don't show negative Z direction
if ii != 2 and np.isfinite(left_dists_all).any():
idx = np.argmin(left_dists_all, axis=0)
left_dists = rrs[idx, ii]
bads = (
~np.isfinite(left_dists_all[idx, np.arange(len(idx))])
| pos_bads
)
left_dists[bads] = np.nan
axes[ii, 0].plot(
t, left_dists, color=helmet_color, ls="-", lw=0.5, zorder=2
)
else:
axes[ii, 0].axhline(
lims[ii][0], color=helmet_color, ls="-", lw=0.5, zorder=2
)
right_dists_all = dists
right_dists_all[left] = np.inf
if np.isfinite(right_dists_all).any():
idx = np.argmin(right_dists_all, axis=0)
right_dists = rrs[idx, ii]
bads = (
~np.isfinite(right_dists_all[idx, np.arange(len(idx))])
| pos_bads
)
right_dists[bads] = np.nan
axes[ii, 0].plot(
t, right_dists, color=helmet_color, ls="-", lw=0.5, zorder=2
)
else:
axes[ii, 0].axhline(
lims[ii][1], color=helmet_color, ls="-", lw=0.5, zorder=2
)
for ii in range(3):
axes[ii, 1].set(ylim=[-1, 1])
if destination is not None:
vals = np.array(
[destination[:, 3], rot_to_quat(destination[:, :3])]
).T.ravel()
for ax, val in zip(fig.axes, vals):
ax.axhline(val, color="r", ls=":", zorder=2, lw=1.0)
else: # mode == 'field':
from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from mpl_toolkits.mplot3d import axes3d # noqa: F401, analysis:ignore
fig, ax = plt.subplots(1, subplot_kw=dict(projection="3d"))
# First plot the trajectory as a colormap:
# http://matplotlib.org/examples/pylab_examples/multicolored_line.html
pts = use_trans[:, np.newaxis]
segments = np.concatenate([pts[:-1], pts[1:]], axis=1)
norm = Normalize(t[0], t[-2])
lc = Line3DCollection(segments, cmap=cmap, norm=norm)
lc.set_array(t[:-1])
ax.add_collection(lc)
# now plot the head directions as a quiver
dir_idx = dict(x=0, y=1, z=2)
kwargs = dict(pivot="tail")
for d, length in zip(direction, [5.0, 2.5, 1.0]):
use_dir = use_rot[:, :, dir_idx[d]]
# draws stems, then heads
array = np.concatenate((t, np.repeat(t, 2)))
ax.quiver(
use_trans[:, 0],
use_trans[:, 1],
use_trans[:, 2],
use_dir[:, 0],
use_dir[:, 1],
use_dir[:, 2],
norm=norm,
cmap=cmap,
array=array,
length=length,
**kwargs,
)
if destination is not None:
ax.quiver(
destination[0, 3],
destination[1, 3],
destination[2, 3],
destination[dir_idx[d], 0],
destination[dir_idx[d], 1],
destination[dir_idx[d], 2],
color=color,
length=length,
**kwargs,
)
mins = use_trans.min(0)
maxs = use_trans.max(0)
if surf is not None:
ax.plot_trisurf(
*surf["rr"].T,
triangles=surf["tris"],
color=helmet_color,
alpha=0.1,
shade=False,
)
ax.scatter(*rrs.T, s=1, color=helmet_color)
mins = np.minimum(mins, rrs.min(0))
maxs = np.maximum(maxs, rrs.max(0))
scale = (maxs - mins).max() / 2.0
xlim, ylim, zlim = (maxs + mins)[:, np.newaxis] / 2.0 + [-scale, scale]
ax.set(xlabel="x", ylabel="y", zlabel="z", xlim=xlim, ylim=ylim, zlim=zlim)
_set_aspect_equal(ax)
ax.view_init(30, 45)
tight_layout(fig=fig)
plt_show(show)
return fig
|
https://github.com/mne-tools/mne-python/issues/8810
|
Using control points [6.56993144e-11 7.80732925e-11 1.82440323e-10]
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/projections/__init__.py in get_projection_class(projection)
57 try:
---> 58 return projection_registry.get_projection_class(projection)
59 except KeyError:
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/projections/__init__.py in get_projection_class(self, name)
24 """
---> 25 return self._all_projection_types[name]
26
KeyError: '3d'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
~/src/git/hnn-core/examples/plot_simulate_somato.py in <module>
----> 1 stc_mne.plot(backend='matplotlib')
~/src/git/mne-python/mne/source_estimate.py in plot(self, subject, surface, hemi, colormap, time_label, smoothing_steps, transparent, alpha, time_viewer, subjects_dir, figure, views, colorbar, clim, cortex, size, background, foreground, initial_time, time_unit, backend, spacing, title, show_traces, src, volume_options, view_layout, add_data_kwargs, brain_kwargs, verbose)
662 src=src, volume_options=volume_options, view_layout=view_layout,
663 add_data_kwargs=add_data_kwargs, brain_kwargs=brain_kwargs,
--> 664 verbose=verbose)
665 return brain
666
<decorator-gen-142> in plot_source_estimates(stc, subject, surface, hemi, colormap, time_label, smoothing_steps, transparent, alpha, time_viewer, subjects_dir, figure, views, colorbar, clim, cortex, size, background, foreground, initial_time, time_unit, backend, spacing, title, show_traces, src, volume_options, view_layout, add_data_kwargs, brain_kwargs, verbose)
~/src/git/mne-python/mne/viz/_3d.py in plot_source_estimates(stc, subject, surface, hemi, colormap, time_label, smoothing_steps, transparent, alpha, time_viewer, subjects_dir, figure, views, colorbar, clim, cortex, size, background, foreground, initial_time, time_unit, backend, spacing, title, show_traces, src, volume_options, view_layout, add_data_kwargs, brain_kwargs, verbose)
1826 transparent=transparent)
1827 if plot_mpl:
-> 1828 return _plot_mpl_stc(stc, spacing=spacing, **kwargs)
1829 return _plot_stc(
1830 stc, overlay_alpha=alpha, brain_alpha=alpha, vector_alpha=alpha,
~/src/git/mne-python/mne/viz/_3d.py in _plot_mpl_stc(***failed resolving arguments***)
1524 time_label, times = _handle_time(time_label, time_unit, stc.times)
1525 fig = plt.figure(figsize=(6, 6)) if figure is None else figure
-> 1526 ax = fig.gca(projection='3d')
1527 hemi_idx = 0 if hemi == 'lh' else 1
1528 surf = op.join(subjects_dir, subject, 'surf', '%s.%s' % (hemi, surface))
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/figure.py in gca(self, **kwargs)
1930
1931 # no axes found, so create one which spans the figure
-> 1932 return self.add_subplot(1, 1, 1, **kwargs)
1933
1934 def sca(self, a):
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/figure.py in add_subplot(self, *args, **kwargs)
1394 else:
1395 projection_class, kwargs, key = \
-> 1396 self._process_projection_requirements(*args, **kwargs)
1397
1398 # try to find the axes with this key in the stack
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/figure.py in _process_projection_requirements(self, polar, projection, *args, **kwargs)
1118
1119 if isinstance(projection, str) or projection is None:
-> 1120 projection_class = projections.get_projection_class(projection)
1121 elif hasattr(projection, '_as_mpl_axes'):
1122 projection_class, extra_kwargs = projection._as_mpl_axes()
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/projections/__init__.py in get_projection_class(projection)
58 return projection_registry.get_projection_class(projection)
59 except KeyError:
---> 60 raise ValueError("Unknown projection %r" % projection)
61
62
ValueError: Unknown projection '3d'
|
KeyError
|
def _plot_mpl_stc(
stc,
subject=None,
surface="inflated",
hemi="lh",
colormap="auto",
time_label="auto",
smoothing_steps=10,
subjects_dir=None,
views="lat",
clim="auto",
figure=None,
initial_time=None,
time_unit="s",
background="black",
spacing="oct6",
time_viewer=False,
colorbar=True,
transparent=True,
):
"""Plot source estimate using mpl."""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401, analysis:ignore
from matplotlib import cm
from matplotlib.widgets import Slider
import nibabel as nib
from scipy import stats
from ..morph import _get_subject_sphere_tris
if hemi not in ["lh", "rh"]:
raise ValueError(
"hemi must be 'lh' or 'rh' when using matplotlib. Got %s." % hemi
)
lh_kwargs = {
"lat": {"elev": 0, "azim": 180},
"med": {"elev": 0, "azim": 0},
"ros": {"elev": 0, "azim": 90},
"cau": {"elev": 0, "azim": -90},
"dor": {"elev": 90, "azim": -90},
"ven": {"elev": -90, "azim": -90},
"fro": {"elev": 0, "azim": 106.739},
"par": {"elev": 30, "azim": -120},
}
rh_kwargs = {
"lat": {"elev": 0, "azim": 0},
"med": {"elev": 0, "azim": 180},
"ros": {"elev": 0, "azim": 90},
"cau": {"elev": 0, "azim": -90},
"dor": {"elev": 90, "azim": -90},
"ven": {"elev": -90, "azim": -90},
"fro": {"elev": 16.739, "azim": 60},
"par": {"elev": 30, "azim": -60},
}
time_viewer = False if time_viewer == "auto" else time_viewer
kwargs = dict(lh=lh_kwargs, rh=rh_kwargs)
views = "lat" if views == "auto" else views
_check_option("views", views, sorted(lh_kwargs.keys()))
mapdata = _process_clim(clim, colormap, transparent, stc.data)
_separate_map(mapdata)
colormap, scale_pts = _linearize_map(mapdata)
del transparent, mapdata
time_label, times = _handle_time(time_label, time_unit, stc.times)
fig = plt.figure(figsize=(6, 6)) if figure is None else figure
ax = fig.gca(projection="3d")
hemi_idx = 0 if hemi == "lh" else 1
surf = op.join(subjects_dir, subject, "surf", "%s.%s" % (hemi, surface))
if spacing == "all":
coords, faces = nib.freesurfer.read_geometry(surf)
inuse = slice(None)
else:
stype, sval, ico_surf, src_type_str = _check_spacing(spacing)
surf = _create_surf_spacing(surf, hemi, subject, stype, ico_surf, subjects_dir)
inuse = surf["vertno"]
faces = surf["use_tris"]
coords = surf["rr"][inuse]
shape = faces.shape
faces = stats.rankdata(faces, "dense").reshape(shape) - 1
faces = np.round(faces).astype(int) # should really be int-like anyway
del surf
vertices = stc.vertices[hemi_idx]
n_verts = len(vertices)
tris = _get_subject_sphere_tris(subject, subjects_dir)[hemi_idx]
cmap = cm.get_cmap(colormap)
greymap = cm.get_cmap("Greys")
curv = nib.freesurfer.read_morph_data(
op.join(subjects_dir, subject, "surf", "%s.curv" % hemi)
)[inuse]
curv = np.clip(np.array(curv > 0, np.int64), 0.33, 0.66)
params = dict(
ax=ax,
stc=stc,
coords=coords,
faces=faces,
hemi_idx=hemi_idx,
vertices=vertices,
tris=tris,
smoothing_steps=smoothing_steps,
n_verts=n_verts,
inuse=inuse,
cmap=cmap,
curv=curv,
scale_pts=scale_pts,
greymap=greymap,
time_label=time_label,
time_unit=time_unit,
)
_smooth_plot(initial_time, params)
ax.view_init(**kwargs[hemi][views])
try:
ax.set_facecolor(background)
except AttributeError:
ax.set_axis_bgcolor(background)
if time_viewer:
time_viewer = figure_nobar(figsize=(4.5, 0.25))
fig.time_viewer = time_viewer
ax_time = plt.axes()
if initial_time is None:
initial_time = 0
slider = Slider(
ax=ax_time,
label="Time",
valmin=times[0],
valmax=times[-1],
valinit=initial_time,
)
time_viewer.slider = slider
callback_slider = partial(_smooth_plot, params=params)
slider.on_changed(callback_slider)
callback_key = partial(_key_pressed_slider, params=params)
time_viewer.canvas.mpl_connect("key_press_event", callback_key)
time_viewer.subplots_adjust(left=0.12, bottom=0.05, right=0.75, top=0.95)
fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0)
# add colorbar
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
sm = plt.cm.ScalarMappable(
cmap=cmap, norm=plt.Normalize(scale_pts[0], scale_pts[2])
)
cax = inset_axes(ax, width="80%", height="5%", loc=8, borderpad=3.0)
plt.setp(plt.getp(cax, "xticklabels"), color="w")
sm.set_array(np.linspace(scale_pts[0], scale_pts[2], 256))
if colorbar:
cb = plt.colorbar(sm, cax=cax, orientation="horizontal")
cb_yticks = plt.getp(cax, "yticklabels")
plt.setp(cb_yticks, color="w")
cax.tick_params(labelsize=16)
cb.patch.set_facecolor("0.5")
cax.set(xlim=(scale_pts[0], scale_pts[2]))
plt.show()
return fig
|
def _plot_mpl_stc(
stc,
subject=None,
surface="inflated",
hemi="lh",
colormap="auto",
time_label="auto",
smoothing_steps=10,
subjects_dir=None,
views="lat",
clim="auto",
figure=None,
initial_time=None,
time_unit="s",
background="black",
spacing="oct6",
time_viewer=False,
colorbar=True,
transparent=True,
):
"""Plot source estimate using mpl."""
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.widgets import Slider
import nibabel as nib
from scipy import stats
from ..morph import _get_subject_sphere_tris
if hemi not in ["lh", "rh"]:
raise ValueError(
"hemi must be 'lh' or 'rh' when using matplotlib. Got %s." % hemi
)
lh_kwargs = {
"lat": {"elev": 0, "azim": 180},
"med": {"elev": 0, "azim": 0},
"ros": {"elev": 0, "azim": 90},
"cau": {"elev": 0, "azim": -90},
"dor": {"elev": 90, "azim": -90},
"ven": {"elev": -90, "azim": -90},
"fro": {"elev": 0, "azim": 106.739},
"par": {"elev": 30, "azim": -120},
}
rh_kwargs = {
"lat": {"elev": 0, "azim": 0},
"med": {"elev": 0, "azim": 180},
"ros": {"elev": 0, "azim": 90},
"cau": {"elev": 0, "azim": -90},
"dor": {"elev": 90, "azim": -90},
"ven": {"elev": -90, "azim": -90},
"fro": {"elev": 16.739, "azim": 60},
"par": {"elev": 30, "azim": -60},
}
time_viewer = False if time_viewer == "auto" else time_viewer
kwargs = dict(lh=lh_kwargs, rh=rh_kwargs)
views = "lat" if views == "auto" else views
_check_option("views", views, sorted(lh_kwargs.keys()))
mapdata = _process_clim(clim, colormap, transparent, stc.data)
_separate_map(mapdata)
colormap, scale_pts = _linearize_map(mapdata)
del transparent, mapdata
time_label, times = _handle_time(time_label, time_unit, stc.times)
fig = plt.figure(figsize=(6, 6)) if figure is None else figure
ax = fig.gca(projection="3d")
hemi_idx = 0 if hemi == "lh" else 1
surf = op.join(subjects_dir, subject, "surf", "%s.%s" % (hemi, surface))
if spacing == "all":
coords, faces = nib.freesurfer.read_geometry(surf)
inuse = slice(None)
else:
stype, sval, ico_surf, src_type_str = _check_spacing(spacing)
surf = _create_surf_spacing(surf, hemi, subject, stype, ico_surf, subjects_dir)
inuse = surf["vertno"]
faces = surf["use_tris"]
coords = surf["rr"][inuse]
shape = faces.shape
faces = stats.rankdata(faces, "dense").reshape(shape) - 1
faces = np.round(faces).astype(int) # should really be int-like anyway
del surf
vertices = stc.vertices[hemi_idx]
n_verts = len(vertices)
tris = _get_subject_sphere_tris(subject, subjects_dir)[hemi_idx]
cmap = cm.get_cmap(colormap)
greymap = cm.get_cmap("Greys")
curv = nib.freesurfer.read_morph_data(
op.join(subjects_dir, subject, "surf", "%s.curv" % hemi)
)[inuse]
curv = np.clip(np.array(curv > 0, np.int64), 0.33, 0.66)
params = dict(
ax=ax,
stc=stc,
coords=coords,
faces=faces,
hemi_idx=hemi_idx,
vertices=vertices,
tris=tris,
smoothing_steps=smoothing_steps,
n_verts=n_verts,
inuse=inuse,
cmap=cmap,
curv=curv,
scale_pts=scale_pts,
greymap=greymap,
time_label=time_label,
time_unit=time_unit,
)
_smooth_plot(initial_time, params)
ax.view_init(**kwargs[hemi][views])
try:
ax.set_facecolor(background)
except AttributeError:
ax.set_axis_bgcolor(background)
if time_viewer:
time_viewer = figure_nobar(figsize=(4.5, 0.25))
fig.time_viewer = time_viewer
ax_time = plt.axes()
if initial_time is None:
initial_time = 0
slider = Slider(
ax=ax_time,
label="Time",
valmin=times[0],
valmax=times[-1],
valinit=initial_time,
)
time_viewer.slider = slider
callback_slider = partial(_smooth_plot, params=params)
slider.on_changed(callback_slider)
callback_key = partial(_key_pressed_slider, params=params)
time_viewer.canvas.mpl_connect("key_press_event", callback_key)
time_viewer.subplots_adjust(left=0.12, bottom=0.05, right=0.75, top=0.95)
fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0)
# add colorbar
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
sm = plt.cm.ScalarMappable(
cmap=cmap, norm=plt.Normalize(scale_pts[0], scale_pts[2])
)
cax = inset_axes(ax, width="80%", height="5%", loc=8, borderpad=3.0)
plt.setp(plt.getp(cax, "xticklabels"), color="w")
sm.set_array(np.linspace(scale_pts[0], scale_pts[2], 256))
if colorbar:
cb = plt.colorbar(sm, cax=cax, orientation="horizontal")
cb_yticks = plt.getp(cax, "yticklabels")
plt.setp(cb_yticks, color="w")
cax.tick_params(labelsize=16)
cb.patch.set_facecolor("0.5")
cax.set(xlim=(scale_pts[0], scale_pts[2]))
plt.show()
return fig
|
https://github.com/mne-tools/mne-python/issues/8810
|
Using control points [6.56993144e-11 7.80732925e-11 1.82440323e-10]
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/projections/__init__.py in get_projection_class(projection)
57 try:
---> 58 return projection_registry.get_projection_class(projection)
59 except KeyError:
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/projections/__init__.py in get_projection_class(self, name)
24 """
---> 25 return self._all_projection_types[name]
26
KeyError: '3d'
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
~/src/git/hnn-core/examples/plot_simulate_somato.py in <module>
----> 1 stc_mne.plot(backend='matplotlib')
~/src/git/mne-python/mne/source_estimate.py in plot(self, subject, surface, hemi, colormap, time_label, smoothing_steps, transparent, alpha, time_viewer, subjects_dir, figure, views, colorbar, clim, cortex, size, background, foreground, initial_time, time_unit, backend, spacing, title, show_traces, src, volume_options, view_layout, add_data_kwargs, brain_kwargs, verbose)
662 src=src, volume_options=volume_options, view_layout=view_layout,
663 add_data_kwargs=add_data_kwargs, brain_kwargs=brain_kwargs,
--> 664 verbose=verbose)
665 return brain
666
<decorator-gen-142> in plot_source_estimates(stc, subject, surface, hemi, colormap, time_label, smoothing_steps, transparent, alpha, time_viewer, subjects_dir, figure, views, colorbar, clim, cortex, size, background, foreground, initial_time, time_unit, backend, spacing, title, show_traces, src, volume_options, view_layout, add_data_kwargs, brain_kwargs, verbose)
~/src/git/mne-python/mne/viz/_3d.py in plot_source_estimates(stc, subject, surface, hemi, colormap, time_label, smoothing_steps, transparent, alpha, time_viewer, subjects_dir, figure, views, colorbar, clim, cortex, size, background, foreground, initial_time, time_unit, backend, spacing, title, show_traces, src, volume_options, view_layout, add_data_kwargs, brain_kwargs, verbose)
1826 transparent=transparent)
1827 if plot_mpl:
-> 1828 return _plot_mpl_stc(stc, spacing=spacing, **kwargs)
1829 return _plot_stc(
1830 stc, overlay_alpha=alpha, brain_alpha=alpha, vector_alpha=alpha,
~/src/git/mne-python/mne/viz/_3d.py in _plot_mpl_stc(***failed resolving arguments***)
1524 time_label, times = _handle_time(time_label, time_unit, stc.times)
1525 fig = plt.figure(figsize=(6, 6)) if figure is None else figure
-> 1526 ax = fig.gca(projection='3d')
1527 hemi_idx = 0 if hemi == 'lh' else 1
1528 surf = op.join(subjects_dir, subject, 'surf', '%s.%s' % (hemi, surface))
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/figure.py in gca(self, **kwargs)
1930
1931 # no axes found, so create one which spans the figure
-> 1932 return self.add_subplot(1, 1, 1, **kwargs)
1933
1934 def sca(self, a):
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/figure.py in add_subplot(self, *args, **kwargs)
1394 else:
1395 projection_class, kwargs, key = \
-> 1396 self._process_projection_requirements(*args, **kwargs)
1397
1398 # try to find the axes with this key in the stack
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/figure.py in _process_projection_requirements(self, polar, projection, *args, **kwargs)
1118
1119 if isinstance(projection, str) or projection is None:
-> 1120 projection_class = projections.get_projection_class(projection)
1121 elif hasattr(projection, '_as_mpl_axes'):
1122 projection_class, extra_kwargs = projection._as_mpl_axes()
~/miniconda3/envs/hnn_dev/lib/python3.7/site-packages/matplotlib/projections/__init__.py in get_projection_class(projection)
58 return projection_registry.get_projection_class(projection)
59 except KeyError:
---> 60 raise ValueError("Unknown projection %r" % projection)
61
62
ValueError: Unknown projection '3d'
|
KeyError
|
def _get_path(path, key, name):
"""Get a dataset path."""
# 1. Input
if path is not None:
if not isinstance(path, str):
raise ValueError("path must be a string or None")
return path
# 2. get_config(key)
# 3. get_config('MNE_DATA')
path = get_config(key, get_config("MNE_DATA"))
if path is not None:
if not op.exists(path):
msg = (
f"Download location {path} as specified by MNE_DATA does "
f"not exist. Either create this directory manually and try "
f"again, or set MNE_DATA to an existing directory."
)
raise FileNotFoundError(msg)
return path
# 4. ~/mne_data (but use a fake home during testing so we don't
# unnecessarily create ~/mne_data)
logger.info("Using default location ~/mne_data for %s..." % name)
path = op.join(os.getenv("_MNE_FAKE_HOME_DIR", op.expanduser("~")), "mne_data")
if not op.exists(path):
logger.info("Creating ~/mne_data")
try:
os.mkdir(path)
except OSError:
raise OSError(
"User does not have write permissions "
"at '%s', try giving the path as an "
"argument to data_path() where user has "
"write permissions, for ex:data_path"
"('/home/xyz/me2/')" % (path)
)
return path
|
def _get_path(path, key, name):
"""Get a dataset path."""
# 1. Input
if path is not None:
if not isinstance(path, str):
raise ValueError("path must be a string or None")
return path
# 2. get_config(key)
# 3. get_config('MNE_DATA')
path = get_config(key, get_config("MNE_DATA"))
if path is not None:
return path
# 4. ~/mne_data (but use a fake home during testing so we don't
# unnecessarily create ~/mne_data)
logger.info("Using default location ~/mne_data for %s..." % name)
path = op.join(os.getenv("_MNE_FAKE_HOME_DIR", op.expanduser("~")), "mne_data")
if not op.exists(path):
logger.info("Creating ~/mne_data")
try:
os.mkdir(path)
except OSError:
raise OSError(
"User does not have write permissions "
"at '%s', try giving the path as an "
"argument to data_path() where user has "
"write permissions, for ex:data_path"
"('/home/xyz/me2/')" % (path)
)
return path
|
https://github.com/mne-tools/mne-python/issues/8764
|
Downloading archive MNE-sample-data-processed.tar.gz to /Users/clemens/mne_data
Downloading https://files.osf.io/v1/resources/rxvq7/providers/osfstorage/59c0e26f9ad5a1025c4ab159?version=5&action=download&direct (1.54 GB)
0%| | Downloading : 0.00/1.54G [00:00<?, ?B/s]Error while fetching file https://osf.io/86qa2/download?version=5. Dataset fetching aborted.
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/IPython/core/interactiveshell.py", line 3418, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-2-9a4a5c6c7639>", line 1, in <module>
runfile('/Users/clemens/Library/Application Support/JetBrains/PyCharm2020.3/scratches/scratch.py', wdir='/Users/clemens/Library/Application Support/JetBrains/PyCharm2020.3/scratches')
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_bundle/pydev_umd.py", line 197, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "/Applications/PyCharm.app/Contents/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/Users/clemens/Library/Application Support/JetBrains/PyCharm2020.3/scratches/scratch.py", line 5, in <module>
data_path = sample.data_path()
File "<decorator-gen-461>", line 24, in data_path
File "/Users/clemens/Repositories/mne-python/mne/datasets/sample/sample.py", line 19, in data_path
return _data_path(path=path, force_update=force_update,
File "/Users/clemens/Repositories/mne-python/mne/datasets/utils.py", line 418, in _data_path
remove_archive, full = _download(path, u, an, h)
File "/Users/clemens/Repositories/mne-python/mne/datasets/utils.py", line 475, in _download
_fetch_file(url, full_name, print_destination=False,
File "<decorator-gen-3>", line 24, in _fetch_file
File "/Users/clemens/Repositories/mne-python/mne/utils/fetching.py", line 117, in _fetch_file
_get_http(url, temp_file_name, initial_size, timeout, verbose_bool)
File "/Users/clemens/Repositories/mne-python/mne/utils/fetching.py", line 55, in _get_http
with open(temp_file_name, mode) as local_file:
FileNotFoundError: [Errno 2] No such file or directory: '/Users/clemens/mne_data/MNE-sample-data-processed.tar.gz.part'
|
FileNotFoundError
|
def __init__(self, input_fname, eog=(), preload=False, uint16_codec=None, verbose=None): # noqa: D102
eeg = _check_load_mat(input_fname, uint16_codec)
if eeg.trials != 1:
raise TypeError(
"The number of trials is %d. It must be 1 for raw"
" files. Please use `mne.io.read_epochs_eeglab` if"
" the .set file contains epochs." % eeg.trials
)
last_samps = [eeg.pnts - 1]
info, eeg_montage, _ = _get_info(eeg, eog=eog)
# read the data
if isinstance(eeg.data, str):
data_fname = _check_fname(input_fname, eeg.data)
logger.info("Reading %s" % data_fname)
super(RawEEGLAB, self).__init__(
info,
preload,
filenames=[data_fname],
last_samps=last_samps,
orig_format="double",
verbose=verbose,
)
else:
if preload is False or isinstance(preload, str):
warn(
"Data will be preloaded. preload=False or a string "
"preload is not supported when the data is stored in "
"the .set file"
)
# can't be done in standard way with preload=True because of
# different reading path (.set file)
if eeg.nbchan == 1 and len(eeg.data.shape) == 1:
n_chan, n_times = [1, eeg.data.shape[0]]
else:
n_chan, n_times = eeg.data.shape
data = np.empty((n_chan, n_times), dtype=float)
data[:n_chan] = eeg.data
data *= CAL
super(RawEEGLAB, self).__init__(
info,
data,
filenames=[input_fname],
last_samps=last_samps,
orig_format="double",
verbose=verbose,
)
# create event_ch from annotations
annot = read_annotations(input_fname)
self.set_annotations(annot)
_check_boundary(annot, None)
_set_dig_montage_in_init(self, eeg_montage)
latencies = np.round(annot.onset * self.info["sfreq"])
_check_latencies(latencies)
|
def __init__(self, input_fname, eog=(), preload=False, uint16_codec=None, verbose=None): # noqa: D102
eeg = _check_load_mat(input_fname, uint16_codec)
if eeg.trials != 1:
raise TypeError(
"The number of trials is %d. It must be 1 for raw"
" files. Please use `mne.io.read_epochs_eeglab` if"
" the .set file contains epochs." % eeg.trials
)
last_samps = [eeg.pnts - 1]
info, eeg_montage, update_ch_names = _get_info(eeg, eog=eog)
# read the data
if isinstance(eeg.data, str):
data_fname = _check_fname(input_fname, eeg.data)
logger.info("Reading %s" % data_fname)
super(RawEEGLAB, self).__init__(
info,
preload,
filenames=[data_fname],
last_samps=last_samps,
orig_format="double",
verbose=verbose,
)
else:
if preload is False or isinstance(preload, str):
warn(
"Data will be preloaded. preload=False or a string "
"preload is not supported when the data is stored in "
"the .set file"
)
# can't be done in standard way with preload=True because of
# different reading path (.set file)
if eeg.nbchan == 1 and len(eeg.data.shape) == 1:
n_chan, n_times = [1, eeg.data.shape[0]]
else:
n_chan, n_times = eeg.data.shape
data = np.empty((n_chan, n_times), dtype=float)
data[:n_chan] = eeg.data
data *= CAL
super(RawEEGLAB, self).__init__(
info,
data,
filenames=[input_fname],
last_samps=last_samps,
orig_format="double",
verbose=verbose,
)
# create event_ch from annotations
annot = read_annotations(input_fname)
self.set_annotations(annot)
_check_boundary(annot, None)
_set_dig_montage_in_init(self, eeg_montage)
latencies = np.round(annot.onset * self.info["sfreq"])
_check_latencies(latencies)
|
https://github.com/mne-tools/mne-python/issues/8755
|
AssertionError Traceback (most recent call last)
<ipython-input-10-a6f5a4e0501a> in <module>
5 from mne import preprocessing
6
----> 7 ica = preprocessing.read_ica_eeglab('full_filepath')
8
9
~/opt/anaconda3/envs/mne/lib/python3.8/site-packages/mne/preprocessing/ica.py in read_ica_eeglab(fname)
2754
2755 n_ch = len(ica.ch_names)
-> 2756 assert eeg.icaweights.shape == (n_components, n_ch)
2757 # When PCA reduction is used in EEGLAB, runica returns
2758 # weights= weights*sphere*eigenvectors(:,1:ncomps)';
AssertionError:
|
AssertionError
|
def read_ica_eeglab(fname, *, verbose=None):
"""Load ICA information saved in an EEGLAB .set file.
Parameters
----------
fname : str
Complete path to a .set EEGLAB file that contains an ICA object.
%(verbose)s
Returns
-------
ica : instance of ICA
An ICA object based on the information contained in the input file.
"""
eeg = _check_load_mat(fname, None)
info, eeg_montage, _ = _get_info(eeg)
info.set_montage(eeg_montage)
pick_info(info, np.round(eeg["icachansind"]).astype(int) - 1, copy=False)
rank = eeg.icasphere.shape[0]
n_components = eeg.icaweights.shape[0]
ica = ICA(method="imported_eeglab", n_components=n_components)
ica.current_fit = "eeglab"
ica.ch_names = info["ch_names"]
ica.n_pca_components = None
ica.n_components_ = n_components
n_ch = len(ica.ch_names)
assert len(eeg.icachansind) == n_ch
ica.pre_whitener_ = np.ones((n_ch, 1))
ica.pca_mean_ = np.zeros(n_ch)
assert eeg.icasphere.shape[1] == n_ch
assert eeg.icaweights.shape == (n_components, rank)
# When PCA reduction is used in EEGLAB, runica returns
# weights= weights*sphere*eigenvectors(:,1:ncomps)';
# sphere = eye(urchans). When PCA reduction is not used, we have:
#
# eeg.icawinv == pinv(eeg.icaweights @ eeg.icasphere)
#
# So in either case, we can use SVD to get our square whitened
# weights matrix (u * s) and our PCA vectors (v) back:
use = eeg.icaweights @ eeg.icasphere
use_check = linalg.pinv(eeg.icawinv)
if not np.allclose(use, use_check, rtol=1e-6):
warn(
"Mismatch between icawinv and icaweights @ icasphere from EEGLAB "
"possibly due to ICA component removal, assuming icawinv is "
"correct"
)
use = use_check
u, s, v = _safe_svd(use, full_matrices=False)
ica.unmixing_matrix_ = u * s
ica.pca_components_ = v
ica.pca_explained_variance_ = s * s
ica.info = info
ica._update_mixing_matrix()
ica._update_ica_names()
return ica
|
def read_ica_eeglab(fname):
"""Load ICA information saved in an EEGLAB .set file.
Parameters
----------
fname : str
Complete path to a .set EEGLAB file that contains an ICA object.
Returns
-------
ica : instance of ICA
An ICA object based on the information contained in the input file.
"""
eeg = _check_load_mat(fname, None)
info = _get_info(eeg)[0]
pick_info(info, np.round(eeg["icachansind"]).astype(int) - 1, copy=False)
n_components = eeg.icaweights.shape[0]
ica = ICA(method="imported_eeglab", n_components=n_components)
ica.current_fit = "eeglab"
ica.ch_names = info["ch_names"]
ica.n_pca_components = None
ica.n_components_ = n_components
ica.pre_whitener_ = np.ones((len(eeg.icachansind), 1))
ica.pca_mean_ = np.zeros(len(eeg.icachansind))
n_ch = len(ica.ch_names)
assert eeg.icaweights.shape == (n_components, n_ch)
# When PCA reduction is used in EEGLAB, runica returns
# weights= weights*sphere*eigenvectors(:,1:ncomps)';
# sphere = eye(urchans). When PCA reduction is not used, we have:
#
# eeg.icawinv == pinv(eeg.icaweights @ eeg.icasphere)
#
# So in either case, we can use SVD to get our square whitened
# weights matrix (u * s) and our PCA vectors (v) back:
use = eeg.icaweights @ eeg.icasphere
u, s, v = _safe_svd(use, full_matrices=False)
ica.unmixing_matrix_ = u * s
ica.pca_components_ = v
ica.pca_explained_variance_ = s * s
ica._update_mixing_matrix()
return ica
|
https://github.com/mne-tools/mne-python/issues/8755
|
AssertionError Traceback (most recent call last)
<ipython-input-10-a6f5a4e0501a> in <module>
5 from mne import preprocessing
6
----> 7 ica = preprocessing.read_ica_eeglab('full_filepath')
8
9
~/opt/anaconda3/envs/mne/lib/python3.8/site-packages/mne/preprocessing/ica.py in read_ica_eeglab(fname)
2754
2755 n_ch = len(ica.ch_names)
-> 2756 assert eeg.icaweights.shape == (n_components, n_ch)
2757 # When PCA reduction is used in EEGLAB, runica returns
2758 # weights= weights*sphere*eigenvectors(:,1:ncomps)';
AssertionError:
|
AssertionError
|
def anonymize_info(info, daysback=None, keep_his=False, verbose=None):
"""Anonymize measurement information in place.
.. warning:: If ``info`` is part of an object like
:class:`raw.info <mne.io.Raw>`, you should directly use
the method :meth:`raw.anonymize() <mne.io.Raw.anonymize>`
to ensure that all parts of the data are anonymized and
stay synchronized (e.g.,
:class:`raw.annotations <mne.Annotations>`).
Parameters
----------
info : dict, instance of Info
Measurement information for the dataset.
%(anonymize_info_parameters)s
%(verbose)s
Returns
-------
info : instance of Info
The anonymized measurement information.
Notes
-----
%(anonymize_info_notes)s
"""
_validate_type(info, "info", "self")
default_anon_dos = datetime.datetime(
2000, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc
)
default_str = "mne_anonymize"
default_subject_id = 0
default_desc = "Anonymized using a time shift to preserve age at acquisition"
none_meas_date = info["meas_date"] is None
if none_meas_date:
warn(
"Input info has 'meas_date' set to None."
" Removing all information from time/date structures."
" *NOT* performing any time shifts"
)
info["meas_date"] = None
else:
# compute timeshift delta
if daysback is None:
delta_t = info["meas_date"] - default_anon_dos
else:
delta_t = datetime.timedelta(days=daysback)
# adjust meas_date
info["meas_date"] = info["meas_date"] - delta_t
# file_id and meas_id
for key in ("file_id", "meas_id"):
value = info.get(key)
if value is not None:
assert "msecs" not in value
if none_meas_date or ((value["secs"], value["usecs"]) == DATE_NONE):
# Don't try to shift backwards in time when no measurement
# date is available or when file_id is already a place holder
tmp = DATE_NONE
else:
tmp = _add_timedelta_to_stamp((value["secs"], value["usecs"]), -delta_t)
value["secs"] = tmp[0]
value["usecs"] = tmp[1]
# The following copy is needed for a test CTF dataset
# otherwise value['machid'][:] = 0 would suffice
_tmp = value["machid"].copy()
_tmp[:] = 0
value["machid"] = _tmp
# subject info
subject_info = info.get("subject_info")
if subject_info is not None:
if subject_info.get("id") is not None:
subject_info["id"] = default_subject_id
if keep_his:
logger.info("Not fully anonymizing info - keeping 'his_id'")
elif subject_info.get("his_id") is not None:
subject_info["his_id"] = str(default_subject_id)
for key in ("last_name", "first_name", "middle_name"):
if subject_info.get(key) is not None:
subject_info[key] = default_str
# anonymize the subject birthday
if none_meas_date:
subject_info.pop("birthday", None)
elif subject_info.get("birthday") is not None:
dob = datetime.datetime(
subject_info["birthday"][0],
subject_info["birthday"][1],
subject_info["birthday"][2],
)
dob -= delta_t
subject_info["birthday"] = dob.year, dob.month, dob.day
for key in ("weight", "height"):
if subject_info.get(key) is not None:
subject_info[key] = 0
info["experimenter"] = default_str
info["description"] = default_desc
if info["proj_id"] is not None:
info["proj_id"] = np.zeros_like(info["proj_id"])
if info["proj_name"] is not None:
info["proj_name"] = default_str
if info["utc_offset"] is not None:
info["utc_offset"] = None
proc_hist = info.get("proc_history")
if proc_hist is not None:
for record in proc_hist:
record["block_id"]["machid"][:] = 0
record["experimenter"] = default_str
if none_meas_date:
record["block_id"]["secs"] = DATE_NONE[0]
record["block_id"]["usecs"] = DATE_NONE[1]
record["date"] = DATE_NONE
else:
this_t0 = (record["block_id"]["secs"], record["block_id"]["usecs"])
this_t1 = _add_timedelta_to_stamp(this_t0, -delta_t)
record["block_id"]["secs"] = this_t1[0]
record["block_id"]["usecs"] = this_t1[1]
record["date"] = _add_timedelta_to_stamp(record["date"], -delta_t)
hi = info.get("helium_info")
if hi is not None:
if hi.get("orig_file_guid") is not None:
hi["orig_file_guid"] = default_str
if none_meas_date and hi.get("meas_date") is not None:
hi["meas_date"] = DATE_NONE
elif hi.get("meas_date") is not None:
hi["meas_date"] = _add_timedelta_to_stamp(hi["meas_date"], -delta_t)
di = info.get("device_info")
if di is not None:
for k in ("serial", "site"):
if di.get(k) is not None:
di[k] = default_str
err_mesg = (
"anonymize_info generated an inconsistent info object. Underlying Error:\n"
)
info._check_consistency(prepend_error=err_mesg)
err_mesg = (
"anonymize_info generated an inconsistent info object. "
"daysback parameter was too large. "
"Underlying Error:\n"
)
_check_dates(info, prepend_error=err_mesg)
return info
|
def anonymize_info(info, daysback=None, keep_his=False, verbose=None):
"""Anonymize measurement information in place.
.. warning:: If ``info`` is part of an object like
:class:`raw.info <mne.io.Raw>`, you should directly use
the method :meth:`raw.anonymize() <mne.io.Raw.anonymize>`
to ensure that all parts of the data are anonymized and
stay synchronized (e.g.,
:class:`raw.annotations <mne.Annotations>`).
Parameters
----------
info : dict, instance of Info
Measurement information for the dataset.
%(anonymize_info_parameters)s
%(verbose)s
Returns
-------
info : instance of Info
The anonymized measurement information.
Notes
-----
%(anonymize_info_notes)s
"""
_validate_type(info, "info", "self")
default_anon_dos = datetime.datetime(
2000, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc
)
default_str = "mne_anonymize"
default_subject_id = 0
default_desc = "Anonymized using a time shift to preserve age at acquisition"
none_meas_date = info["meas_date"] is None
if none_meas_date:
warn(
"Input info has 'meas_date' set to None."
" Removing all information from time/date structures."
" *NOT* performing any time shifts"
)
info["meas_date"] = None
else:
# compute timeshift delta
if daysback is None:
delta_t = info["meas_date"] - default_anon_dos
else:
delta_t = datetime.timedelta(days=daysback)
# adjust meas_date
info["meas_date"] = info["meas_date"] - delta_t
# file_id and meas_id
for key in ("file_id", "meas_id"):
value = info.get(key)
if value is not None:
assert "msecs" not in value
if none_meas_date:
tmp = DATE_NONE
else:
tmp = _add_timedelta_to_stamp((value["secs"], value["usecs"]), -delta_t)
value["secs"] = tmp[0]
value["usecs"] = tmp[1]
# The following copy is needed for a test CTF dataset
# otherwise value['machid'][:] = 0 would suffice
_tmp = value["machid"].copy()
_tmp[:] = 0
value["machid"] = _tmp
# subject info
subject_info = info.get("subject_info")
if subject_info is not None:
if subject_info.get("id") is not None:
subject_info["id"] = default_subject_id
if keep_his:
logger.info("Not fully anonymizing info - keeping 'his_id'")
elif subject_info.get("his_id") is not None:
subject_info["his_id"] = str(default_subject_id)
for key in ("last_name", "first_name", "middle_name"):
if subject_info.get(key) is not None:
subject_info[key] = default_str
# anonymize the subject birthday
if none_meas_date:
subject_info.pop("birthday", None)
elif subject_info.get("birthday") is not None:
dob = datetime.datetime(
subject_info["birthday"][0],
subject_info["birthday"][1],
subject_info["birthday"][2],
)
dob -= delta_t
subject_info["birthday"] = dob.year, dob.month, dob.day
for key in ("weight", "height"):
if subject_info.get(key) is not None:
subject_info[key] = 0
info["experimenter"] = default_str
info["description"] = default_desc
if info["proj_id"] is not None:
info["proj_id"] = np.zeros_like(info["proj_id"])
if info["proj_name"] is not None:
info["proj_name"] = default_str
if info["utc_offset"] is not None:
info["utc_offset"] = None
proc_hist = info.get("proc_history")
if proc_hist is not None:
for record in proc_hist:
record["block_id"]["machid"][:] = 0
record["experimenter"] = default_str
if none_meas_date:
record["block_id"]["secs"] = DATE_NONE[0]
record["block_id"]["usecs"] = DATE_NONE[1]
record["date"] = DATE_NONE
else:
this_t0 = (record["block_id"]["secs"], record["block_id"]["usecs"])
this_t1 = _add_timedelta_to_stamp(this_t0, -delta_t)
record["block_id"]["secs"] = this_t1[0]
record["block_id"]["usecs"] = this_t1[1]
record["date"] = _add_timedelta_to_stamp(record["date"], -delta_t)
hi = info.get("helium_info")
if hi is not None:
if hi.get("orig_file_guid") is not None:
hi["orig_file_guid"] = default_str
if none_meas_date and hi.get("meas_date") is not None:
hi["meas_date"] = DATE_NONE
elif hi.get("meas_date") is not None:
hi["meas_date"] = _add_timedelta_to_stamp(hi["meas_date"], -delta_t)
di = info.get("device_info")
if di is not None:
for k in ("serial", "site"):
if di.get(k) is not None:
di[k] = default_str
err_mesg = (
"anonymize_info generated an inconsistent info object. Underlying Error:\n"
)
info._check_consistency(prepend_error=err_mesg)
err_mesg = (
"anonymize_info generated an inconsistent info object. "
"daysback parameter was too large."
"Underlying Error:\n"
)
_check_dates(info, prepend_error=err_mesg)
return info
|
https://github.com/mne-tools/mne-python/issues/8661
|
Original file_id
{'version': 65537, 'machid': array([ 1082367, 1060765696]), 'secs': 1038942072, 'usecs': 190548}
Updated file_id
{'version': 65540, 'machid': array([808661043, 808661043]), 'secs': 0, 'usecs': 2147483647}
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-127-0f07c80ed58b> in <module>
22 # Anonymize
23 daysback = (raw2.info['meas_date'].date() - date(1924, 1, 1)).days
---> 24 raw2.anonymize(daysback=daysback)
<decorator-gen-35> in anonymize(self, daysback, keep_his, verbose)
E:\Evgenii_Kalenkovich\programs\miniminiconda3\envs\syntax_tagging\lib\site-packages\mne\channels\channels.py in anonymize(self, daysback, keep_his, verbose)
617 """
618 anonymize_info(self.info, daysback=daysback, keep_his=keep_his,
--> 619 verbose=verbose)
620 self.set_meas_date(self.info['meas_date']) # unify annot update
621 return self
<decorator-gen-30> in anonymize_info(info, daysback, keep_his, verbose)
E:\Evgenii_Kalenkovich\programs\miniminiconda3\envs\syntax_tagging\lib\site-packages\mne\io\meas_info.py in anonymize_info(info, daysback, keep_his, verbose)
2264 'daysback parameter was too large.'
2265 'Underlying Error:\n')
-> 2266 _check_dates(info, prepend_error=err_mesg)
2267
2268 return info
E:\Evgenii_Kalenkovich\programs\miniminiconda3\envs\syntax_tagging\lib\site-packages\mne\io\meas_info.py in _check_dates(info, prepend_error)
1478 np.iinfo('>i4').min,
1479 np.iinfo('>i4').max,
-> 1480 value[key_2]),)
1481
1482 meas_date = info.get('meas_date')
RuntimeError: anonymize_info generated an inconsistent info object. daysback parameter was too large.Underlying Error:
info[file_id][secs] must be between "-2147483648" and "2147483647", got "-2490564253"
|
RuntimeError
|
def set_bipolar_reference(
inst,
anode,
cathode,
ch_name=None,
ch_info=None,
drop_refs=True,
copy=True,
verbose=None,
):
"""Re-reference selected channels using a bipolar referencing scheme.
A bipolar reference takes the difference between two channels (the anode
minus the cathode) and adds it as a new virtual channel. The original
channels will be dropped.
Multiple anodes and cathodes can be specified, in which case multiple
virtual channels will be created. The 1st anode will be subtracted from the
1st cathode, the 2nd anode from the 2nd cathode, etc.
By default, the virtual channels will be annotated with channel info of
the anodes, their locations set to (0, 0, 0) and coil types set to
EEG_BIPOLAR.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Data containing the unreferenced channels.
anode : str | list of str
The name(s) of the channel(s) to use as anode in the bipolar reference.
cathode : str | list of str
The name(s) of the channel(s) to use as cathode in the bipolar
reference.
ch_name : str | list of str | None
The channel name(s) for the virtual channel(s) containing the resulting
signal. By default, bipolar channels are named after the anode and
cathode, but it is recommended to supply a more meaningful name.
ch_info : dict | list of dict | None
This parameter can be used to supply a dictionary (or a dictionary for
each bipolar channel) containing channel information to merge in,
overwriting the default values. Defaults to None.
drop_refs : bool
Whether to drop the anode/cathode channels from the instance.
copy : bool
Whether to operate on a copy of the data (True) or modify it in-place
(False). Defaults to True.
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with the specified channels re-referenced.
See Also
--------
set_eeg_reference : Convenience function for creating an EEG reference.
Notes
-----
1. If the anodes contain any EEG channels, this function removes
any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. The data must be preloaded.
.. versionadded:: 0.9.0
"""
_check_can_reref(inst)
if not isinstance(anode, list):
anode = [anode]
if not isinstance(cathode, list):
cathode = [cathode]
if len(anode) != len(cathode):
raise ValueError(
"Number of anodes (got %d) must equal the number "
"of cathodes (got %d)." % (len(anode), len(cathode))
)
if ch_name is None:
ch_name = ["%s-%s" % ac for ac in zip(anode, cathode)]
elif not isinstance(ch_name, list):
ch_name = [ch_name]
if len(ch_name) != len(anode):
raise ValueError(
"Number of channel names must equal the number of "
"anodes/cathodes (got %d)." % len(ch_name)
)
# Check for duplicate channel names (it is allowed to give the name of the
# anode or cathode channel, as they will be replaced).
for ch, a, c in zip(ch_name, anode, cathode):
if ch not in [a, c] and ch in inst.ch_names:
raise ValueError(
'There is already a channel named "%s", please '
"specify a different name for the bipolar "
"channel using the ch_name parameter." % ch
)
if ch_info is None:
ch_info = [{} for _ in anode]
elif not isinstance(ch_info, list):
ch_info = [ch_info]
if len(ch_info) != len(anode):
raise ValueError(
"Number of channel info dictionaries must equal the "
"number of anodes/cathodes."
)
# Merge specified and anode channel information dictionaries
new_chs = []
for ci, (an, ch) in enumerate(zip(anode, ch_info)):
_check_ch_keys(ch, ci, name="ch_info", check_min=False)
an_idx = inst.ch_names.index(an)
this_chs = deepcopy(inst.info["chs"][an_idx])
# Set channel location and coil type
this_chs["loc"] = np.zeros(12)
this_chs["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR
this_chs.update(ch)
new_chs.append(this_chs)
if copy:
inst = inst.copy()
for i, (an, ca, name, chs) in enumerate(zip(anode, cathode, ch_name, new_chs)):
if an in anode[i + 1 :] or an in cathode[i + 1 :] or not drop_refs:
# Make a copy of the channel if it's still needed later
# otherwise it's modified inplace
_copy_channel(inst, an, "TMP")
an = "TMP"
_apply_reference(inst, [ca], [an]) # ensures preloaded
an_idx = inst.ch_names.index(an)
inst.info["chs"][an_idx] = chs
inst.info["chs"][an_idx]["ch_name"] = name
logger.info('Bipolar channel added as "%s".' % name)
inst.info._update_redundant()
if getattr(inst, "picks", None) is not None:
del inst.picks # picks cannot be tracked anymore
# Drop remaining channels.
if drop_refs:
drop_channels = list((set(anode) | set(cathode)) & set(inst.ch_names))
inst.drop_channels(drop_channels)
return inst
|
def set_bipolar_reference(
inst,
anode,
cathode,
ch_name=None,
ch_info=None,
drop_refs=True,
copy=True,
verbose=None,
):
"""Re-reference selected channels using a bipolar referencing scheme.
A bipolar reference takes the difference between two channels (the anode
minus the cathode) and adds it as a new virtual channel. The original
channels will be dropped.
Multiple anodes and cathodes can be specified, in which case multiple
virtual channels will be created. The 1st anode will be subtracted from the
1st cathode, the 2nd anode from the 2nd cathode, etc.
By default, the virtual channels will be annotated with channel info of
the anodes, their locations set to (0, 0, 0) and coil types set to
EEG_BIPOLAR.
Parameters
----------
inst : instance of Raw | Epochs | Evoked
Data containing the unreferenced channels.
anode : str | list of str
The name(s) of the channel(s) to use as anode in the bipolar reference.
cathode : str | list of str
The name(s) of the channel(s) to use as cathode in the bipolar
reference.
ch_name : str | list of str | None
The channel name(s) for the virtual channel(s) containing the resulting
signal. By default, bipolar channels are named after the anode and
cathode, but it is recommended to supply a more meaningful name.
ch_info : dict | list of dict | None
This parameter can be used to supply a dictionary (or a dictionary for
each bipolar channel) containing channel information to merge in,
overwriting the default values. Defaults to None.
drop_refs : bool
Whether to drop the anode/cathode channels from the instance.
copy : bool
Whether to operate on a copy of the data (True) or modify it in-place
(False). Defaults to True.
%(verbose)s
Returns
-------
inst : instance of Raw | Epochs | Evoked
Data with the specified channels re-referenced.
See Also
--------
set_eeg_reference : Convenience function for creating an EEG reference.
Notes
-----
1. If the anodes contain any EEG channels, this function removes
any pre-existing average reference projections.
2. During source localization, the EEG signal should have an average
reference.
3. The data must be preloaded.
.. versionadded:: 0.9.0
"""
_check_can_reref(inst)
if not isinstance(anode, list):
anode = [anode]
if not isinstance(cathode, list):
cathode = [cathode]
if len(anode) != len(cathode):
raise ValueError(
"Number of anodes (got %d) must equal the number "
"of cathodes (got %d)." % (len(anode), len(cathode))
)
if ch_name is None:
ch_name = ["%s-%s" % ac for ac in zip(anode, cathode)]
elif not isinstance(ch_name, list):
ch_name = [ch_name]
if len(ch_name) != len(anode):
raise ValueError(
"Number of channel names must equal the number of "
"anodes/cathodes (got %d)." % len(ch_name)
)
# Check for duplicate channel names (it is allowed to give the name of the
# anode or cathode channel, as they will be replaced).
for ch, a, c in zip(ch_name, anode, cathode):
if ch not in [a, c] and ch in inst.ch_names:
raise ValueError(
'There is already a channel named "%s", please '
"specify a different name for the bipolar "
"channel using the ch_name parameter." % ch
)
if ch_info is None:
ch_info = [{} for _ in anode]
elif not isinstance(ch_info, list):
ch_info = [ch_info]
if len(ch_info) != len(anode):
raise ValueError(
"Number of channel info dictionaries must equal the "
"number of anodes/cathodes."
)
# Merge specified and anode channel information dictionaries
new_chs = []
for ci, (an, ch) in enumerate(zip(anode, ch_info)):
_check_ch_keys(ch, ci, name="ch_info", check_min=False)
an_idx = inst.ch_names.index(an)
this_chs = deepcopy(inst.info["chs"][an_idx])
# Set channel location and coil type
this_chs["loc"] = np.zeros(12)
this_chs["coil_type"] = FIFF.FIFFV_COIL_EEG_BIPOLAR
this_chs.update(ch)
new_chs.append(this_chs)
if copy:
inst = inst.copy()
for i, (an, ca, name, chs) in enumerate(zip(anode, cathode, ch_name, new_chs)):
if an in anode[i + 1 :] or an in cathode[i + 1 :] or not drop_refs:
# Make a copy of the channel if it's still needed later
# otherwise it's modified inplace
_copy_channel(inst, an, "TMP")
an = "TMP"
_apply_reference(inst, [ca], [an]) # ensures preloaded
an_idx = inst.ch_names.index(an)
inst.info["chs"][an_idx] = chs
inst.info["chs"][an_idx]["ch_name"] = name
logger.info('Bipolar channel added as "%s".' % name)
inst.info._update_redundant()
# Drop remaining channels.
if drop_refs:
drop_channels = list((set(anode) | set(cathode)) & set(inst.ch_names))
inst.drop_channels(drop_channels)
return inst
|
https://github.com/mne-tools/mne-python/issues/8726
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
~/Documents/github_repos/mne-python/tutorials/epochs/plot_10_epochs_overview.py in <module>
23 evoked_bipolar1 = mne.set_bipolar_reference(evoked, anode=anode, cathode=cathode)
24
---> 25 epochs_bipolar = mne.set_bipolar_reference(epochs, anode=anode, cathode=cathode)
26 evoked_bipolar2 = epochs_bipolar.average()
27
<decorator-gen-232> in set_bipolar_reference(inst, anode, cathode, ch_name, ch_info, drop_refs, copy, verbose)
~/Documents/github_repos/mne-python/mne/io/reference.py in set_bipolar_reference(inst, anode, cathode, ch_name, ch_info, drop_refs, copy, verbose)
504 if drop_refs:
505 drop_channels = list((set(anode) | set(cathode)) & set(inst.ch_names))
--> 506 inst.drop_channels(drop_channels)
507
508 return inst
~/Documents/github_repos/mne-python/mne/channels/channels.py in drop_channels(self, ch_names)
894 if ch in self.ch_names]
895 idx = np.setdiff1d(np.arange(len(self.ch_names)), bad_idx)
--> 896 return self._pick_drop_channels(idx)
897
898 def _pick_drop_channels(self, idx):
~/Documents/github_repos/mne-python/mne/channels/channels.py in _pick_drop_channels(self, idx)
909
910 if getattr(self, 'picks', None) is not None:
--> 911 self.picks = self.picks[idx]
912
913 if getattr(self, '_read_picks', None) is not None:
IndexError: index 10 is out of bounds for axis 0 with size 10
|
IndexError
|
def _plot_stc(
stc,
subject,
surface,
hemi,
colormap,
time_label,
smoothing_steps,
subjects_dir,
views,
clim,
figure,
initial_time,
time_unit,
background,
time_viewer,
colorbar,
transparent,
brain_alpha,
overlay_alpha,
vector_alpha,
cortex,
foreground,
size,
scale_factor,
show_traces,
src,
volume_options,
view_layout,
add_data_kwargs,
):
from .backends.renderer import _get_3d_backend
from ..source_estimate import _BaseVolSourceEstimate
vec = stc._data_ndim == 3
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir, raise_error=True)
subject = _check_subject(stc.subject, subject, True)
backend = _get_3d_backend()
del _get_3d_backend
using_mayavi = backend == "mayavi"
if using_mayavi:
from surfer import Brain
_require_version("surfer", "stc.plot", "0.9")
else: # PyVista
from ._brain import Brain
views = _check_views(surface, views, hemi, stc, backend)
_check_option("hemi", hemi, ["lh", "rh", "split", "both"])
_check_option("view_layout", view_layout, ("vertical", "horizontal"))
time_label, times = _handle_time(time_label, time_unit, stc.times)
# convert control points to locations in colormap
use = stc.magnitude().data if vec else stc.data
mapdata = _process_clim(clim, colormap, transparent, use, allow_pos_lims=not vec)
volume = _check_volume(stc, src, surface, backend)
# XXX we should only need to do this for PySurfer/Mayavi, the PyVista
# plotter should be smart enough to do this separation in the cmap-to-ctab
# conversion. But this will need to be another refactoring that will
# hopefully restore this line:
#
# if using_mayavi:
_separate_map(mapdata)
colormap = mapdata["colormap"]
diverging = "pos_lims" in mapdata["clim"]
scale_pts = mapdata["clim"]["pos_lims" if diverging else "lims"]
transparent = mapdata["transparent"]
del mapdata
if hemi in ["both", "split"]:
hemis = ["lh", "rh"]
else:
hemis = [hemi]
if overlay_alpha is None:
overlay_alpha = brain_alpha
if overlay_alpha == 0:
smoothing_steps = 1 # Disable smoothing to save time.
title = subject if len(hemis) > 1 else "%s - %s" % (subject, hemis[0])
kwargs = {
"subject_id": subject,
"hemi": hemi,
"surf": surface,
"title": title,
"cortex": cortex,
"size": size,
"background": background,
"foreground": foreground,
"figure": figure,
"subjects_dir": subjects_dir,
"views": views,
"alpha": brain_alpha,
}
if backend in ["pyvista", "notebook"]:
kwargs["show"] = False
kwargs["view_layout"] = view_layout
else:
kwargs.update(_check_pysurfer_antialias(Brain))
if view_layout != "vertical":
raise ValueError(
'view_layout must be "vertical" when using the mayavi backend'
)
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(**kwargs)
del kwargs
if scale_factor is None:
# Configure the glyphs scale directly
width = np.mean(
[
np.ptp(brain.geo[hemi].coords[:, 1])
for hemi in hemis
if hemi in brain.geo
]
)
scale_factor = 0.025 * width / scale_pts[-1]
if transparent is None:
transparent = True
sd_kwargs = dict(transparent=transparent, verbose=False)
center = 0.0 if diverging else None
kwargs = {
"array": stc,
"colormap": colormap,
"smoothing_steps": smoothing_steps,
"time": times,
"time_label": time_label,
"alpha": overlay_alpha,
"colorbar": colorbar,
"vector_alpha": vector_alpha,
"scale_factor": scale_factor,
"verbose": False,
"initial_time": initial_time,
"transparent": transparent,
"center": center,
"fmin": scale_pts[0],
"fmid": scale_pts[1],
"fmax": scale_pts[2],
"clim": clim,
"src": src,
"volume_options": volume_options,
"verbose": False,
}
for hemi in hemis:
if isinstance(stc, _BaseVolSourceEstimate): # no surf data
break
vertices = stc.vertices[0 if hemi == "lh" else 1]
if len(vertices) == 0: # no surf data for the given hemi
continue # no data
use_kwargs = kwargs.copy()
use_kwargs.update(hemi=hemi)
if using_mayavi:
del use_kwargs["clim"], use_kwargs["src"]
del use_kwargs["volume_options"]
use_kwargs.update(
min=use_kwargs.pop("fmin"),
mid=use_kwargs.pop("fmid"),
max=use_kwargs.pop("fmax"),
array=getattr(stc, hemi + "_data"),
vertices=vertices,
)
if add_data_kwargs is not None:
use_kwargs.update(add_data_kwargs)
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(**use_kwargs)
if using_mayavi:
brain.scale_data_colormap(
fmin=scale_pts[0], fmid=scale_pts[1], fmax=scale_pts[2], **sd_kwargs
)
if volume:
use_kwargs = kwargs.copy()
use_kwargs.update(hemi="vol")
brain.add_data(**use_kwargs)
del kwargs
need_peeling = brain_alpha < 1.0 and sys.platform != "darwin" and vec
if using_mayavi:
for hemi in hemis:
for b in brain._brain_list:
for layer in b["brain"].data.values():
glyphs = layer["glyphs"]
if glyphs is None:
continue
glyphs.glyph.glyph.scale_factor = scale_factor
glyphs.glyph.glyph.clamping = False
glyphs.glyph.glyph.range = (0.0, 1.0)
# depth peeling patch
if need_peeling:
for ff in brain._figures:
for f in ff:
if f.scene is not None and sys.platform != "darwin":
f.scene.renderer.use_depth_peeling = True
elif need_peeling:
brain.enable_depth_peeling()
# time_viewer and show_traces
_check_option("time_viewer", time_viewer, (True, False, "auto"))
_validate_type(show_traces, (str, bool, "numeric"), "show_traces")
if isinstance(show_traces, str):
_check_option(
"show_traces", show_traces, ("auto", "separate"), extra="when a string"
)
if time_viewer == "auto":
time_viewer = not using_mayavi
if show_traces == "auto":
show_traces = (
not using_mayavi
and time_viewer
and brain._times is not None
and len(brain._times) > 1
)
if show_traces and not time_viewer:
raise ValueError("show_traces cannot be used when time_viewer=False")
if using_mayavi and show_traces:
raise NotImplementedError(
"show_traces=True is not available for the mayavi 3d backend."
)
if time_viewer:
if using_mayavi:
from surfer import TimeViewer
TimeViewer(brain)
else: # PyVista
brain.setup_time_viewer(time_viewer=time_viewer, show_traces=show_traces)
else:
if not using_mayavi:
brain.show()
return brain
|
def _plot_stc(
stc,
subject,
surface,
hemi,
colormap,
time_label,
smoothing_steps,
subjects_dir,
views,
clim,
figure,
initial_time,
time_unit,
background,
time_viewer,
colorbar,
transparent,
brain_alpha,
overlay_alpha,
vector_alpha,
cortex,
foreground,
size,
scale_factor,
show_traces,
src,
volume_options,
view_layout,
add_data_kwargs,
):
from .backends.renderer import _get_3d_backend
from ..source_estimate import _BaseVolSourceEstimate
vec = stc._data_ndim == 3
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir, raise_error=True)
subject = _check_subject(stc.subject, subject, True)
backend = _get_3d_backend()
del _get_3d_backend
using_mayavi = backend == "mayavi"
if using_mayavi:
from surfer import Brain
_require_version("surfer", "stc.plot", "0.9")
else: # PyVista
from ._brain import Brain
views = _check_views(surface, views, hemi, stc, backend)
_check_option("hemi", hemi, ["lh", "rh", "split", "both"])
_check_option("view_layout", view_layout, ("vertical", "horizontal"))
time_label, times = _handle_time(time_label, time_unit, stc.times)
# convert control points to locations in colormap
use = stc.magnitude().data if vec else stc.data
mapdata = _process_clim(clim, colormap, transparent, use, allow_pos_lims=not vec)
volume = _check_volume(stc, src, surface, backend)
# XXX we should only need to do this for PySurfer/Mayavi, the PyVista
# plotter should be smart enough to do this separation in the cmap-to-ctab
# conversion. But this will need to be another refactoring that will
# hopefully restore this line:
#
# if using_mayavi:
_separate_map(mapdata)
colormap = mapdata["colormap"]
diverging = "pos_lims" in mapdata["clim"]
scale_pts = mapdata["clim"]["pos_lims" if diverging else "lims"]
transparent = mapdata["transparent"]
del mapdata
if hemi in ["both", "split"]:
hemis = ["lh", "rh"]
else:
hemis = [hemi]
if overlay_alpha is None:
overlay_alpha = brain_alpha
if overlay_alpha == 0:
smoothing_steps = 1 # Disable smoothing to save time.
title = subject if len(hemis) > 1 else "%s - %s" % (subject, hemis[0])
kwargs = {
"subject_id": subject,
"hemi": hemi,
"surf": surface,
"title": title,
"cortex": cortex,
"size": size,
"background": background,
"foreground": foreground,
"figure": figure,
"subjects_dir": subjects_dir,
"views": views,
"alpha": brain_alpha,
}
if backend in ["pyvista", "notebook"]:
kwargs["show"] = False
kwargs["view_layout"] = view_layout
else:
kwargs.update(_check_pysurfer_antialias(Brain))
if view_layout != "vertical":
raise ValueError(
'view_layout must be "vertical" when using the mayavi backend'
)
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(**kwargs)
del kwargs
if scale_factor is None:
# Configure the glyphs scale directly
width = np.mean(
[
np.ptp(brain.geo[hemi].coords[:, 1])
for hemi in hemis
if hemi in brain.geo
]
)
scale_factor = 0.025 * width / scale_pts[-1]
if transparent is None:
transparent = True
sd_kwargs = dict(transparent=transparent, verbose=False)
center = 0.0 if diverging else None
kwargs = {
"array": stc,
"colormap": colormap,
"smoothing_steps": smoothing_steps,
"time": times,
"time_label": time_label,
"alpha": overlay_alpha,
"colorbar": colorbar,
"vector_alpha": vector_alpha,
"scale_factor": scale_factor,
"verbose": False,
"initial_time": initial_time,
"transparent": transparent,
"center": center,
"fmin": scale_pts[0],
"fmid": scale_pts[1],
"fmax": scale_pts[2],
"clim": clim,
"src": src,
"volume_options": volume_options,
"verbose": False,
}
for hi, hemi in enumerate(hemis):
if isinstance(stc, _BaseVolSourceEstimate): # no surf data
break
vertices = stc.vertices[hi]
if len(stc.vertices[hi]) == 0: # no surf data for the given hemi
continue # no data
use_kwargs = kwargs.copy()
use_kwargs.update(hemi=hemi)
if using_mayavi:
del use_kwargs["clim"], use_kwargs["src"]
del use_kwargs["volume_options"]
use_kwargs.update(
min=use_kwargs.pop("fmin"),
mid=use_kwargs.pop("fmid"),
max=use_kwargs.pop("fmax"),
array=getattr(stc, hemi + "_data"),
vertices=vertices,
)
if add_data_kwargs is not None:
use_kwargs.update(add_data_kwargs)
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(**use_kwargs)
if using_mayavi:
brain.scale_data_colormap(
fmin=scale_pts[0], fmid=scale_pts[1], fmax=scale_pts[2], **sd_kwargs
)
if volume:
use_kwargs = kwargs.copy()
use_kwargs.update(hemi="vol")
brain.add_data(**use_kwargs)
del kwargs
need_peeling = brain_alpha < 1.0 and sys.platform != "darwin" and vec
if using_mayavi:
for hemi in hemis:
for b in brain._brain_list:
for layer in b["brain"].data.values():
glyphs = layer["glyphs"]
if glyphs is None:
continue
glyphs.glyph.glyph.scale_factor = scale_factor
glyphs.glyph.glyph.clamping = False
glyphs.glyph.glyph.range = (0.0, 1.0)
# depth peeling patch
if need_peeling:
for ff in brain._figures:
for f in ff:
if f.scene is not None and sys.platform != "darwin":
f.scene.renderer.use_depth_peeling = True
elif need_peeling:
brain.enable_depth_peeling()
# time_viewer and show_traces
_check_option("time_viewer", time_viewer, (True, False, "auto"))
_validate_type(show_traces, (str, bool, "numeric"), "show_traces")
if isinstance(show_traces, str):
_check_option(
"show_traces", show_traces, ("auto", "separate"), extra="when a string"
)
if time_viewer == "auto":
time_viewer = not using_mayavi
if show_traces == "auto":
show_traces = (
not using_mayavi
and time_viewer
and brain._times is not None
and len(brain._times) > 1
)
if show_traces and not time_viewer:
raise ValueError("show_traces cannot be used when time_viewer=False")
if using_mayavi and show_traces:
raise NotImplementedError(
"show_traces=True is not available for the mayavi 3d backend."
)
if time_viewer:
if using_mayavi:
from surfer import TimeViewer
TimeViewer(brain)
else: # PyVista
brain.setup_time_viewer(time_viewer=time_viewer, show_traces=show_traces)
else:
if not using_mayavi:
brain.show()
return brain
|
https://github.com/mne-tools/mne-python/issues/8609
|
Traceback (most recent call last):
File "/home/circleci/project/examples/inverse/plot_time_frequency_mixed_norm_inverse.py", line 126, in <module>
brain = stc.plot('sample', 'inflated', 'rh', views='medial',
File "/home/circleci/project/mne/source_estimate.py", line 653, in plot
brain = plot_source_estimates(
File "<decorator-gen-139>", line 24, in plot_source_estimates
File "/home/circleci/project/mne/viz/_3d.py", line 1794, in plot_source_estimates
return _plot_stc(
File "/home/circleci/project/mne/viz/_3d.py", line 1984, in _plot_stc
brain.setup_time_viewer(time_viewer=time_viewer,
File "/home/circleci/project/mne/viz/_brain/_brain.py", line 416, in setup_time_viewer
self._configure_sliders()
File "/home/circleci/project/mne/viz/_brain/_brain.py", line 719, in _configure_sliders
value=self._data['smoothing_steps'],
KeyError: 'smoothing_steps'
|
KeyError
|
def _read_annotations_eeglab(eeg, uint16_codec=None):
r"""Create Annotations from EEGLAB file.
This function reads the event attribute from the EEGLAB
structure and makes an :class:`mne.Annotations` object.
Parameters
----------
eeg : object | str
'EEG' struct or the path to the (EEGLAB) .set file.
uint16_codec : str | None
If your \*.set file contains non-ascii characters, sometimes reading
it may fail and give rise to error message stating that "buffer is
too small". ``uint16_codec`` allows to specify what codec (for example:
'latin1' or 'utf-8') should be used when reading character arrays and
can therefore help you solve this problem.
Returns
-------
annotations : instance of Annotations
The annotations present in the file.
"""
if isinstance(eeg, str):
eeg = _check_load_mat(eeg, uint16_codec=uint16_codec)
if not hasattr(eeg, "event"):
events = []
elif isinstance(eeg.event, dict) and np.array(eeg.event["latency"]).ndim > 0:
events = _dol_to_lod(eeg.event)
elif not isinstance(eeg.event, (np.ndarray, list)):
events = [eeg.event]
else:
events = eeg.event
events = _bunchify(events)
description = [str(event.type) for event in events]
onset = [event.latency - 1 for event in events]
duration = np.zeros(len(onset))
if len(events) > 0 and hasattr(events[0], "duration"):
for idx, event in enumerate(events):
# empty duration fields are read as empty arrays
is_empty_array = (
isinstance(event.duration, np.ndarray) and len(event.duration) == 0
)
duration[idx] = np.nan if is_empty_array else event.duration
return Annotations(
onset=np.array(onset) / eeg.srate,
duration=duration / eeg.srate,
description=description,
orig_time=None,
)
|
def _read_annotations_eeglab(eeg, uint16_codec=None):
r"""Create Annotations from EEGLAB file.
This function reads the event attribute from the EEGLAB
structure and makes an :class:`mne.Annotations` object.
Parameters
----------
eeg : object | str
'EEG' struct or the path to the (EEGLAB) .set file.
uint16_codec : str | None
If your \*.set file contains non-ascii characters, sometimes reading
it may fail and give rise to error message stating that "buffer is
too small". ``uint16_codec`` allows to specify what codec (for example:
'latin1' or 'utf-8') should be used when reading character arrays and
can therefore help you solve this problem.
Returns
-------
annotations : instance of Annotations
The annotations present in the file.
"""
if isinstance(eeg, str):
eeg = _check_load_mat(eeg, uint16_codec=uint16_codec)
if not hasattr(eeg, "event"):
events = []
elif isinstance(eeg.event, dict) and np.array(eeg.event["latency"]).ndim > 0:
events = _dol_to_lod(eeg.event)
elif not isinstance(eeg.event, (np.ndarray, list)):
events = [eeg.event]
else:
events = eeg.event
events = _bunchify(events)
description = [str(event.type) for event in events]
onset = [event.latency - 1 for event in events]
duration = np.zeros(len(onset))
if len(events) > 0 and hasattr(events[0], "duration"):
duration[:] = [event.duration for event in events]
return Annotations(
onset=np.array(onset) / eeg.srate,
duration=duration / eeg.srate,
description=description,
orig_time=None,
)
|
https://github.com/mne-tools/mne-python/issues/8383
|
TypeError Traceback (most recent call last)
TypeError: only size-1 arrays can be converted to Python scalars
The above exception was the direct cause of the following exception:
ValueError Traceback (most recent call last)
<ipython-input-20-49d5d8853521> in <module>
----> 1 mne.io.read_raw_eeglab(os.path.join(folder_with_data, files_all[0]))
~.conda\envs\mne\lib\site-packages\mne\io\eeglab\eeglab.py in read_raw_eeglab(input_fname, eog, preload, uint16_codec, verbose)
219 .. versionadded:: 0.11.0
220 """
--> 221 return RawEEGLAB(input_fname=input_fname, preload=preload,
222 eog=eog, verbose=verbose, uint16_codec=uint16_codec)
223
<decorator-gen-197> in init(self, input_fname, eog, preload, uint16_codec, verbose)
~.conda\envs\mne\lib\site-packages\mne\io\eeglab\eeglab.py in init(self, input_fname, eog, preload, uint16_codec, verbose)
353
354 # create event_ch from annotations
--> 355 annot = read_annotations(input_fname)
356 self.set_annotations(annot)
357 _check_boundary(annot, None)
~.conda\envs\mne\lib\site-packages\mne\annotations.py in read_annotations(fname, sfreq, uint16_codec)
665
666 elif name.endswith('set'):
--> 667 annotations = _read_annotations_eeglab(fname,
668 uint16_codec=uint16_codec)
669
~.conda\envs\mne\lib\site-packages\mne\io\eeglab\eeglab.py in _read_annotations_eeglab(eeg, uint16_codec)
611 duration = np.zeros(len(onset))
612 if len(events) > 0 and hasattr(events[0], 'duration'):
--> 613 duration[:] = [event.duration for event in events]
614
615 return Annotations(onset=np.array(onset) / eeg.srate,
ValueError: setting an array element with a sequence.
|
TypeError
|
def _data_path(
path=None,
force_update=False,
update_path=True,
download=True,
name=None,
check_version=False,
return_version=False,
archive_name=None,
):
"""Aux function."""
key = {
"fake": "MNE_DATASETS_FAKE_PATH",
"misc": "MNE_DATASETS_MISC_PATH",
"sample": "MNE_DATASETS_SAMPLE_PATH",
"spm": "MNE_DATASETS_SPM_FACE_PATH",
"somato": "MNE_DATASETS_SOMATO_PATH",
"brainstorm": "MNE_DATASETS_BRAINSTORM_PATH",
"testing": "MNE_DATASETS_TESTING_PATH",
"multimodal": "MNE_DATASETS_MULTIMODAL_PATH",
"fnirs_motor": "MNE_DATASETS_FNIRS_MOTOR_PATH",
"opm": "MNE_DATASETS_OPM_PATH",
"visual_92_categories": "MNE_DATASETS_VISUAL_92_CATEGORIES_PATH",
"kiloword": "MNE_DATASETS_KILOWORD_PATH",
"mtrf": "MNE_DATASETS_MTRF_PATH",
"fieldtrip_cmc": "MNE_DATASETS_FIELDTRIP_CMC_PATH",
"phantom_4dbti": "MNE_DATASETS_PHANTOM_4DBTI_PATH",
"limo": "MNE_DATASETS_LIMO_PATH",
"refmeg_noise": "MNE_DATASETS_REFMEG_NOISE_PATH",
}[name]
path = _get_path(path, key, name)
# To update the testing or misc dataset, push commits, then make a new
# release on GitHub. Then update the "releases" variable:
releases = dict(testing="0.108", misc="0.6")
# And also update the "md5_hashes['testing']" variable below.
# To update any other dataset, update the data archive itself (upload
# an updated version) and update the md5 hash.
# try to match url->archive_name->folder_name
urls = dict( # the URLs to use
brainstorm=dict(
bst_auditory="https://osf.io/5t9n8/download?version=1",
bst_phantom_ctf="https://osf.io/sxr8y/download?version=1",
bst_phantom_elekta="https://osf.io/dpcku/download?version=1",
bst_raw="https://osf.io/9675n/download?version=2",
bst_resting="https://osf.io/m7bd3/download?version=3",
),
fake="https://github.com/mne-tools/mne-testing-data/raw/master/"
"datasets/foo.tgz",
misc="https://codeload.github.com/mne-tools/mne-misc-data/"
"tar.gz/%s" % releases["misc"],
sample="https://osf.io/86qa2/download?version=5",
somato="https://osf.io/tp4sg/download?version=7",
spm="https://osf.io/je4s8/download?version=2",
testing="https://codeload.github.com/mne-tools/mne-testing-data/"
"tar.gz/%s" % releases["testing"],
multimodal="https://ndownloader.figshare.com/files/5999598",
fnirs_motor="https://osf.io/dj3eh/download?version=1",
opm="https://osf.io/p6ae7/download?version=2",
visual_92_categories=[
"https://osf.io/8ejrs/download?version=1",
"https://osf.io/t4yjp/download?version=1",
],
mtrf="https://osf.io/h85s2/download?version=1",
kiloword="https://osf.io/qkvf9/download?version=1",
fieldtrip_cmc="https://osf.io/j9b6s/download?version=1",
phantom_4dbti="https://osf.io/v2brw/download?version=2",
refmeg_noise="https://osf.io/drt6v/download?version=1",
)
# filename of the resulting downloaded archive (only needed if the URL
# name does not match resulting filename)
archive_names = dict(
fieldtrip_cmc="SubjectCMC.zip",
kiloword="MNE-kiloword-data.tar.gz",
misc="mne-misc-data-%s.tar.gz" % releases["misc"],
mtrf="mTRF_1.5.zip",
multimodal="MNE-multimodal-data.tar.gz",
fnirs_motor="MNE-fNIRS-motor-data.tgz",
opm="MNE-OPM-data.tar.gz",
sample="MNE-sample-data-processed.tar.gz",
somato="MNE-somato-data.tar.gz",
spm="MNE-spm-face.tar.gz",
testing="mne-testing-data-%s.tar.gz" % releases["testing"],
visual_92_categories=[
"MNE-visual_92_categories-data-part1.tar.gz",
"MNE-visual_92_categories-data-part2.tar.gz",
],
phantom_4dbti="MNE-phantom-4DBTi.zip",
refmeg_noise="sample_reference_MEG_noise-raw.zip",
)
# original folder names that get extracted (only needed if the
# archive does not extract the right folder name; e.g., usually GitHub)
folder_origs = dict( # not listed means None (no need to move)
misc="mne-misc-data-%s" % releases["misc"],
testing="mne-testing-data-%s" % releases["testing"],
)
# finally, where we want them to extract to (only needed if the folder name
# is not the same as the last bit of the archive name without the file
# extension)
folder_names = dict(
brainstorm="MNE-brainstorm-data",
fake="foo",
misc="MNE-misc-data",
mtrf="mTRF_1.5",
sample="MNE-sample-data",
testing="MNE-testing-data",
visual_92_categories="MNE-visual_92_categories-data",
fieldtrip_cmc="MNE-fieldtrip_cmc-data",
phantom_4dbti="MNE-phantom-4DBTi",
refmeg_noise="MNE-refmeg-noise-data",
)
md5_hashes = dict(
brainstorm=dict(
bst_auditory="fa371a889a5688258896bfa29dd1700b",
bst_phantom_ctf="80819cb7f5b92d1a5289db3fb6acb33c",
bst_phantom_elekta="1badccbe17998d18cc373526e86a7aaf",
bst_raw="fa2efaaec3f3d462b319bc24898f440c",
bst_resting="70fc7bf9c3b97c4f2eab6260ee4a0430",
),
fake="3194e9f7b46039bb050a74f3e1ae9908",
misc="e00808c3b05123059e2cf49ff276e919",
sample="12b75d1cb7df9dfb4ad73ed82f61094f",
somato="32fd2f6c8c7eb0784a1de6435273c48b",
spm="9f43f67150e3b694b523a21eb929ea75",
testing="57a21229ebb11e3c470b5d0fdb80642f",
multimodal="26ec847ae9ab80f58f204d09e2c08367",
fnirs_motor="c4935d19ddab35422a69f3326a01fef8",
opm="370ad1dcfd5c47e029e692c85358a374",
visual_92_categories=[
"74f50bbeb65740903eadc229c9fa759f",
"203410a98afc9df9ae8ba9f933370e20",
],
kiloword="3a124170795abbd2e48aae8727e719a8",
mtrf="273a390ebbc48da2c3184b01a82e4636",
fieldtrip_cmc="6f9fd6520f9a66e20994423808d2528c",
phantom_4dbti="938a601440f3ffa780d20a17bae039ff",
refmeg_noise="779fecd890d98b73a4832e717d7c7c45",
)
assert set(md5_hashes.keys()) == set(urls.keys())
url = urls[name]
hash_ = md5_hashes[name]
folder_orig = folder_origs.get(name, None)
if name == "brainstorm":
assert archive_name is not None
url = [url[archive_name.split(".")[0]]]
folder_path = [op.join(path, folder_names[name], archive_name.split(".")[0])]
hash_ = [hash_[archive_name.split(".")[0]]]
archive_name = [archive_name]
else:
url = [url] if not isinstance(url, list) else url
hash_ = [hash_] if not isinstance(hash_, list) else hash_
archive_name = archive_names.get(name)
if archive_name is None:
archive_name = [u.split("/")[-1] for u in url]
if not isinstance(archive_name, list):
archive_name = [archive_name]
folder_path = [
op.join(path, folder_names.get(name, a.split(".")[0])) for a in archive_name
]
if not isinstance(folder_orig, list):
folder_orig = [folder_orig] * len(url)
folder_path = [op.abspath(f) for f in folder_path]
assert hash_ is not None
assert all(isinstance(x, list) for x in (url, archive_name, hash_, folder_path))
assert len(url) == len(archive_name) == len(hash_) == len(folder_path)
logger.debug("URL: %s" % (url,))
logger.debug("archive_name: %s" % (archive_name,))
logger.debug("hash: %s" % (hash_,))
logger.debug("folder_path: %s" % (folder_path,))
need_download = any(not op.exists(f) for f in folder_path)
if need_download and not download:
return ""
if need_download or force_update:
logger.debug(
"Downloading: need_download=%s, force_update=%s"
% (need_download, force_update)
)
for f in folder_path:
logger.debug(" Exists: %s: %s" % (f, op.exists(f)))
if name == "brainstorm":
if "--accept-brainstorm-license" in sys.argv:
answer = "y"
else:
answer = input("%sAgree (y/[n])? " % _bst_license_text)
if answer.lower() != "y":
raise RuntimeError("You must agree to the license to use this dataset")
assert len(url) == len(hash_)
assert len(url) == len(archive_name)
assert len(url) == len(folder_orig)
assert len(url) == len(folder_path)
assert len(url) > 0
# 1. Get all the archives
full_name = list()
for u, an, h, fo in zip(url, archive_name, hash_, folder_orig):
remove_archive, full = _download(path, u, an, h)
full_name.append(full)
del archive_name
# 2. Extract all of the files
remove_dir = True
for u, fp, an, h, fo in zip(url, folder_path, full_name, hash_, folder_orig):
_extract(path, name, fp, an, fo, remove_dir)
remove_dir = False # only do on first iteration
# 3. Remove all of the archives
if remove_archive:
for an in full_name:
os.remove(op.join(path, an))
logger.info("Successfully extracted to: %s" % folder_path)
_do_path_update(path, update_path, key, name)
path = folder_path[0]
# compare the version of the dataset and mne
data_version = _dataset_version(path, name)
# 0.7 < 0.7.git should be False, therefore strip
if check_version and (
LooseVersion(data_version) < LooseVersion(mne_version.strip(".git"))
):
warn(
"The {name} dataset (version {current}) is older than "
"mne-python (version {newest}). If the examples fail, "
"you may need to update the {name} dataset by using "
"mne.datasets.{name}.data_path(force_update=True)".format(
name=name, current=data_version, newest=mne_version
)
)
return (path, data_version) if return_version else path
|
def _data_path(
path=None,
force_update=False,
update_path=True,
download=True,
name=None,
check_version=False,
return_version=False,
archive_name=None,
):
"""Aux function."""
key = {
"fake": "MNE_DATASETS_FAKE_PATH",
"misc": "MNE_DATASETS_MISC_PATH",
"sample": "MNE_DATASETS_SAMPLE_PATH",
"spm": "MNE_DATASETS_SPM_FACE_PATH",
"somato": "MNE_DATASETS_SOMATO_PATH",
"brainstorm": "MNE_DATASETS_BRAINSTORM_PATH",
"testing": "MNE_DATASETS_TESTING_PATH",
"multimodal": "MNE_DATASETS_MULTIMODAL_PATH",
"fnirs_motor": "MNE_DATASETS_FNIRS_MOTOR_PATH",
"opm": "MNE_DATASETS_OPM_PATH",
"visual_92_categories": "MNE_DATASETS_VISUAL_92_CATEGORIES_PATH",
"kiloword": "MNE_DATASETS_KILOWORD_PATH",
"mtrf": "MNE_DATASETS_MTRF_PATH",
"fieldtrip_cmc": "MNE_DATASETS_FIELDTRIP_CMC_PATH",
"phantom_4dbti": "MNE_DATASETS_PHANTOM_4DBTI_PATH",
"limo": "MNE_DATASETS_LIMO_PATH",
"refmeg_noise": "MNE_DATASETS_REFMEG_NOISE_PATH",
}[name]
path = _get_path(path, key, name)
# To update the testing or misc dataset, push commits, then make a new
# release on GitHub. Then update the "releases" variable:
releases = dict(testing="0.106", misc="0.6")
# And also update the "md5_hashes['testing']" variable below.
# To update any other dataset, update the data archive itself (upload
# an updated version) and update the md5 hash.
# try to match url->archive_name->folder_name
urls = dict( # the URLs to use
brainstorm=dict(
bst_auditory="https://osf.io/5t9n8/download?version=1",
bst_phantom_ctf="https://osf.io/sxr8y/download?version=1",
bst_phantom_elekta="https://osf.io/dpcku/download?version=1",
bst_raw="https://osf.io/9675n/download?version=2",
bst_resting="https://osf.io/m7bd3/download?version=3",
),
fake="https://github.com/mne-tools/mne-testing-data/raw/master/"
"datasets/foo.tgz",
misc="https://codeload.github.com/mne-tools/mne-misc-data/"
"tar.gz/%s" % releases["misc"],
sample="https://osf.io/86qa2/download?version=5",
somato="https://osf.io/tp4sg/download?version=7",
spm="https://osf.io/je4s8/download?version=2",
testing="https://codeload.github.com/mne-tools/mne-testing-data/"
"tar.gz/%s" % releases["testing"],
multimodal="https://ndownloader.figshare.com/files/5999598",
fnirs_motor="https://osf.io/dj3eh/download?version=1",
opm="https://osf.io/p6ae7/download?version=2",
visual_92_categories=[
"https://osf.io/8ejrs/download?version=1",
"https://osf.io/t4yjp/download?version=1",
],
mtrf="https://osf.io/h85s2/download?version=1",
kiloword="https://osf.io/qkvf9/download?version=1",
fieldtrip_cmc="https://osf.io/j9b6s/download?version=1",
phantom_4dbti="https://osf.io/v2brw/download?version=2",
refmeg_noise="https://osf.io/drt6v/download?version=1",
)
# filename of the resulting downloaded archive (only needed if the URL
# name does not match resulting filename)
archive_names = dict(
fieldtrip_cmc="SubjectCMC.zip",
kiloword="MNE-kiloword-data.tar.gz",
misc="mne-misc-data-%s.tar.gz" % releases["misc"],
mtrf="mTRF_1.5.zip",
multimodal="MNE-multimodal-data.tar.gz",
fnirs_motor="MNE-fNIRS-motor-data.tgz",
opm="MNE-OPM-data.tar.gz",
sample="MNE-sample-data-processed.tar.gz",
somato="MNE-somato-data.tar.gz",
spm="MNE-spm-face.tar.gz",
testing="mne-testing-data-%s.tar.gz" % releases["testing"],
visual_92_categories=[
"MNE-visual_92_categories-data-part1.tar.gz",
"MNE-visual_92_categories-data-part2.tar.gz",
],
phantom_4dbti="MNE-phantom-4DBTi.zip",
refmeg_noise="sample_reference_MEG_noise-raw.zip",
)
# original folder names that get extracted (only needed if the
# archive does not extract the right folder name; e.g., usually GitHub)
folder_origs = dict( # not listed means None (no need to move)
misc="mne-misc-data-%s" % releases["misc"],
testing="mne-testing-data-%s" % releases["testing"],
)
# finally, where we want them to extract to (only needed if the folder name
# is not the same as the last bit of the archive name without the file
# extension)
folder_names = dict(
brainstorm="MNE-brainstorm-data",
fake="foo",
misc="MNE-misc-data",
mtrf="mTRF_1.5",
sample="MNE-sample-data",
testing="MNE-testing-data",
visual_92_categories="MNE-visual_92_categories-data",
fieldtrip_cmc="MNE-fieldtrip_cmc-data",
phantom_4dbti="MNE-phantom-4DBTi",
refmeg_noise="MNE-refmeg-noise-data",
)
md5_hashes = dict(
brainstorm=dict(
bst_auditory="fa371a889a5688258896bfa29dd1700b",
bst_phantom_ctf="80819cb7f5b92d1a5289db3fb6acb33c",
bst_phantom_elekta="1badccbe17998d18cc373526e86a7aaf",
bst_raw="fa2efaaec3f3d462b319bc24898f440c",
bst_resting="70fc7bf9c3b97c4f2eab6260ee4a0430",
),
fake="3194e9f7b46039bb050a74f3e1ae9908",
misc="e00808c3b05123059e2cf49ff276e919",
sample="12b75d1cb7df9dfb4ad73ed82f61094f",
somato="32fd2f6c8c7eb0784a1de6435273c48b",
spm="9f43f67150e3b694b523a21eb929ea75",
testing="d67eff9e1089f15b69f88931dbbf35df",
multimodal="26ec847ae9ab80f58f204d09e2c08367",
fnirs_motor="c4935d19ddab35422a69f3326a01fef8",
opm="370ad1dcfd5c47e029e692c85358a374",
visual_92_categories=[
"74f50bbeb65740903eadc229c9fa759f",
"203410a98afc9df9ae8ba9f933370e20",
],
kiloword="3a124170795abbd2e48aae8727e719a8",
mtrf="273a390ebbc48da2c3184b01a82e4636",
fieldtrip_cmc="6f9fd6520f9a66e20994423808d2528c",
phantom_4dbti="938a601440f3ffa780d20a17bae039ff",
refmeg_noise="779fecd890d98b73a4832e717d7c7c45",
)
assert set(md5_hashes.keys()) == set(urls.keys())
url = urls[name]
hash_ = md5_hashes[name]
folder_orig = folder_origs.get(name, None)
if name == "brainstorm":
assert archive_name is not None
url = [url[archive_name.split(".")[0]]]
folder_path = [op.join(path, folder_names[name], archive_name.split(".")[0])]
hash_ = [hash_[archive_name.split(".")[0]]]
archive_name = [archive_name]
else:
url = [url] if not isinstance(url, list) else url
hash_ = [hash_] if not isinstance(hash_, list) else hash_
archive_name = archive_names.get(name)
if archive_name is None:
archive_name = [u.split("/")[-1] for u in url]
if not isinstance(archive_name, list):
archive_name = [archive_name]
folder_path = [
op.join(path, folder_names.get(name, a.split(".")[0])) for a in archive_name
]
if not isinstance(folder_orig, list):
folder_orig = [folder_orig] * len(url)
folder_path = [op.abspath(f) for f in folder_path]
assert hash_ is not None
assert all(isinstance(x, list) for x in (url, archive_name, hash_, folder_path))
assert len(url) == len(archive_name) == len(hash_) == len(folder_path)
logger.debug("URL: %s" % (url,))
logger.debug("archive_name: %s" % (archive_name,))
logger.debug("hash: %s" % (hash_,))
logger.debug("folder_path: %s" % (folder_path,))
need_download = any(not op.exists(f) for f in folder_path)
if need_download and not download:
return ""
if need_download or force_update:
logger.debug(
"Downloading: need_download=%s, force_update=%s"
% (need_download, force_update)
)
for f in folder_path:
logger.debug(" Exists: %s: %s" % (f, op.exists(f)))
if name == "brainstorm":
if "--accept-brainstorm-license" in sys.argv:
answer = "y"
else:
answer = input("%sAgree (y/[n])? " % _bst_license_text)
if answer.lower() != "y":
raise RuntimeError("You must agree to the license to use this dataset")
assert len(url) == len(hash_)
assert len(url) == len(archive_name)
assert len(url) == len(folder_orig)
assert len(url) == len(folder_path)
assert len(url) > 0
# 1. Get all the archives
full_name = list()
for u, an, h, fo in zip(url, archive_name, hash_, folder_orig):
remove_archive, full = _download(path, u, an, h)
full_name.append(full)
del archive_name
# 2. Extract all of the files
remove_dir = True
for u, fp, an, h, fo in zip(url, folder_path, full_name, hash_, folder_orig):
_extract(path, name, fp, an, fo, remove_dir)
remove_dir = False # only do on first iteration
# 3. Remove all of the archives
if remove_archive:
for an in full_name:
os.remove(op.join(path, an))
logger.info("Successfully extracted to: %s" % folder_path)
_do_path_update(path, update_path, key, name)
path = folder_path[0]
# compare the version of the dataset and mne
data_version = _dataset_version(path, name)
# 0.7 < 0.7.git should be False, therefore strip
if check_version and (
LooseVersion(data_version) < LooseVersion(mne_version.strip(".git"))
):
warn(
"The {name} dataset (version {current}) is older than "
"mne-python (version {newest}). If the examples fail, "
"you may need to update the {name} dataset by using "
"mne.datasets.{name}.data_path(force_update=True)".format(
name=name, current=data_version, newest=mne_version
)
)
return (path, data_version) if return_version else path
|
https://github.com/mne-tools/mne-python/issues/8391
|
Data loaded, with preload=False: C:\read_curry example data\PH005_baseline.dat, 70 channels X 76850 points
Leaving device<->head transform as None (no landmarks found)
Reading 0 ... 76849 = 0.000 ... 307.396 secs...
Traceback (most recent call last):
File "c:\Users\todfl\.vscode\extensions\ms-python.python-2020.2.64397\pythonFiles\ptvsd_launcher.py", line 48, in <module>
main(ptvsdArgs)
File "c:\Users\todfl\.vscode\extensions\ms-python.python-2020.2.64397\pythonFiles\lib\python\old_ptvsd\ptvsd\__main__.py", line 432, in main
run()
File "c:\Users\todfl\.vscode\extensions\ms-python.python-2020.2.64397\pythonFiles\lib\python\old_ptvsd\ptvsd\__main__.py", line 316, in run_file
runpy.run_path(target, run_name='__main__')
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 265, in run_path
return _run_module_code(code, init_globals, run_name,
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 87, in _run_code
File "c:\Users\todfl\Documents\Decker\EEG Processing\EEG Processing Code\read_raw_curry error MWE.py", line 7, in <module>
raw = mne.io.read_raw_curry(data_path,preload=True,verbose=True)
File "<decorator-gen-218>", line 20, in read_raw_curry
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\curry\curry.py", line 406, in read_raw_curry
return RawCurry(fname, preload, verbose)
File "<decorator-gen-219>", line 20, in __init__
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\curry\curry.py", line 439, in __init__
super(RawCurry, self).__init__(
File "<decorator-gen-160>", line 20, in __init__
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 247, in __init__
self._preload_data(preload)
File "<decorator-gen-164>", line 20, in _preload_data
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 528, in _preload_data
self._data = self._read_segment(
File "<decorator-gen-162>", line 20, in _read_segment
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 412, in _read_segment
_ReadSegmentFileProtector(self)._read_segment_file(
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 1816, in _read_segment_file
return self.__raw.__class__._read_segment_file(
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\curry\curry.py", line 469, in _read_segment_file
_read_segments_file(
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\utils.py", line 221, in _read_segments_file
raise RuntimeError('Incorrect number of samples (%s != %s), '
RuntimeError: Incorrect number of samples (5302650 != 5379500), please report this error to MNE-Python developers
|
RuntimeError
|
def _read_curry_parameters(fname):
"""Extract Curry params from a Curry info file."""
_msg_match = (
"The sampling frequency and the time steps extracted from "
"the parameter file do not match."
)
_msg_invalid = "sfreq must be greater than 0. Got sfreq = {0}"
var_names = [
"NumSamples",
"SampleFreqHz",
"DataFormat",
"SampleTimeUsec",
"NumChannels",
"StartYear",
"StartMonth",
"StartDay",
"StartHour",
"StartMin",
"StartSec",
"StartMillisec",
"NUM_SAMPLES",
"SAMPLE_FREQ_HZ",
"DATA_FORMAT",
"SAMPLE_TIME_USEC",
"NUM_CHANNELS",
"START_YEAR",
"START_MONTH",
"START_DAY",
"START_HOUR",
"START_MIN",
"START_SEC",
"START_MILLISEC",
]
param_dict = dict()
unit_dict = dict()
with open(fname) as fid:
for line in iter(fid):
if any(var_name in line for var_name in var_names):
key, val = line.replace(" ", "").replace("\n", "").split("=")
param_dict[key.lower().replace("_", "")] = val
for type in CHANTYPES:
if "DEVICE_PARAMETERS" + CHANTYPES[type] + " START" in line:
data_unit = next(fid)
unit_dict[type] = (
data_unit.replace(" ", "").replace("\n", "").split("=")[-1]
)
# look for CHAN_IN_FILE sections, which may or may not exist; issue #8391
types = ["meg", "eeg", "misc"]
chanidx_in_file = _read_curry_lines(
fname, ["CHAN_IN_FILE" + CHANTYPES[key] for key in types]
)
n_samples = int(param_dict["numsamples"])
sfreq = float(param_dict["samplefreqhz"])
time_step = float(param_dict["sampletimeusec"]) * 1e-6
is_ascii = param_dict["dataformat"] == "ASCII"
n_channels = int(param_dict["numchannels"])
try:
dt_start = datetime(
int(param_dict["startyear"]),
int(param_dict["startmonth"]),
int(param_dict["startday"]),
int(param_dict["starthour"]),
int(param_dict["startmin"]),
int(param_dict["startsec"]),
int(param_dict["startmillisec"]) * 1000,
timezone.utc,
)
# Note that the time zone information is not stored in the Curry info
# file, and it seems the start time info is in the local timezone
# of the acquisition system (which is unknown); therefore, just set
# the timezone to be UTC. If the user knows otherwise, they can
# change it later. (Some Curry files might include StartOffsetUTCMin,
# but its presence is unpredictable, so we won't rely on it.)
except (ValueError, KeyError):
dt_start = None # if missing keywords or illegal values, don't set
if time_step == 0:
true_sfreq = sfreq
elif sfreq == 0:
true_sfreq = 1 / time_step
elif not np.isclose(sfreq, 1 / time_step):
raise ValueError(_msg_match)
else: # they're equal and != 0
true_sfreq = sfreq
if true_sfreq <= 0:
raise ValueError(_msg_invalid.format(true_sfreq))
return CurryParameters(
n_samples,
true_sfreq,
is_ascii,
unit_dict,
n_channels,
dt_start,
chanidx_in_file,
)
|
def _read_curry_parameters(fname):
"""Extract Curry params from a Curry info file."""
_msg_match = (
"The sampling frequency and the time steps extracted from "
"the parameter file do not match."
)
_msg_invalid = "sfreq must be greater than 0. Got sfreq = {0}"
var_names = [
"NumSamples",
"SampleFreqHz",
"DataFormat",
"SampleTimeUsec",
"NUM_SAMPLES",
"SAMPLE_FREQ_HZ",
"DATA_FORMAT",
"SAMPLE_TIME_USEC",
]
param_dict = dict()
unit_dict = dict()
with open(fname) as fid:
for line in iter(fid):
if any(var_name in line for var_name in var_names):
key, val = line.replace(" ", "").replace("\n", "").split("=")
param_dict[key.lower().replace("_", "")] = val
for type in CHANTYPES:
if "DEVICE_PARAMETERS" + CHANTYPES[type] + " START" in line:
data_unit = next(fid)
unit_dict[type] = (
data_unit.replace(" ", "").replace("\n", "").split("=")[-1]
)
n_samples = int(param_dict["numsamples"])
sfreq = float(param_dict["samplefreqhz"])
time_step = float(param_dict["sampletimeusec"]) * 1e-6
is_ascii = param_dict["dataformat"] == "ASCII"
if time_step == 0:
true_sfreq = sfreq
elif sfreq == 0:
true_sfreq = 1 / time_step
elif not np.isclose(sfreq, 1 / time_step):
raise ValueError(_msg_match)
else: # they're equal and != 0
true_sfreq = sfreq
if true_sfreq <= 0:
raise ValueError(_msg_invalid.format(true_sfreq))
return CurryParameters(n_samples, true_sfreq, is_ascii, unit_dict)
|
https://github.com/mne-tools/mne-python/issues/8391
|
Data loaded, with preload=False: C:\read_curry example data\PH005_baseline.dat, 70 channels X 76850 points
Leaving device<->head transform as None (no landmarks found)
Reading 0 ... 76849 = 0.000 ... 307.396 secs...
Traceback (most recent call last):
File "c:\Users\todfl\.vscode\extensions\ms-python.python-2020.2.64397\pythonFiles\ptvsd_launcher.py", line 48, in <module>
main(ptvsdArgs)
File "c:\Users\todfl\.vscode\extensions\ms-python.python-2020.2.64397\pythonFiles\lib\python\old_ptvsd\ptvsd\__main__.py", line 432, in main
run()
File "c:\Users\todfl\.vscode\extensions\ms-python.python-2020.2.64397\pythonFiles\lib\python\old_ptvsd\ptvsd\__main__.py", line 316, in run_file
runpy.run_path(target, run_name='__main__')
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 265, in run_path
return _run_module_code(code, init_globals, run_name,
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 87, in _run_code
File "c:\Users\todfl\Documents\Decker\EEG Processing\EEG Processing Code\read_raw_curry error MWE.py", line 7, in <module>
raw = mne.io.read_raw_curry(data_path,preload=True,verbose=True)
File "<decorator-gen-218>", line 20, in read_raw_curry
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\curry\curry.py", line 406, in read_raw_curry
return RawCurry(fname, preload, verbose)
File "<decorator-gen-219>", line 20, in __init__
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\curry\curry.py", line 439, in __init__
super(RawCurry, self).__init__(
File "<decorator-gen-160>", line 20, in __init__
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 247, in __init__
self._preload_data(preload)
File "<decorator-gen-164>", line 20, in _preload_data
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 528, in _preload_data
self._data = self._read_segment(
File "<decorator-gen-162>", line 20, in _read_segment
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 412, in _read_segment
_ReadSegmentFileProtector(self)._read_segment_file(
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 1816, in _read_segment_file
return self.__raw.__class__._read_segment_file(
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\curry\curry.py", line 469, in _read_segment_file
_read_segments_file(
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\utils.py", line 221, in _read_segments_file
raise RuntimeError('Incorrect number of samples (%s != %s), '
RuntimeError: Incorrect number of samples (5302650 != 5379500), please report this error to MNE-Python developers
|
RuntimeError
|
def _read_curry_info(curry_paths):
"""Extract info from curry parameter files."""
curry_params = _read_curry_parameters(curry_paths["info"])
R = np.eye(4)
R[[0, 1], [0, 1]] = -1 # rotate 180 deg
# shift down and back
# (chosen by eyeballing to make the CTF helmet look roughly correct)
R[:3, 3] = [0.0, -0.015, -0.12]
curry_dev_dev_t = Transform("ctf_meg", "meg", R)
# read labels from label files
label_fname = curry_paths["labels"]
types = ["meg", "eeg", "misc"]
labels = _read_curry_lines(
label_fname, ["LABELS" + CHANTYPES[key] for key in types]
)
sensors = _read_curry_lines(
label_fname, ["SENSORS" + CHANTYPES[key] for key in types]
)
normals = _read_curry_lines(
label_fname, ["NORMALS" + CHANTYPES[key] for key in types]
)
assert len(labels) == len(sensors) == len(normals)
all_chans = list()
for key in ["meg", "eeg", "misc"]:
chanidx_is_explicit = (
len(curry_params.chanidx_in_file["CHAN_IN_FILE" + CHANTYPES[key]]) > 0
) # channel index
# position in the datafile may or may not be explicitly declared,
# based on the CHAN_IN_FILE section in info file
for ind, chan in enumerate(labels["LABELS" + CHANTYPES[key]]):
chanidx = len(all_chans) + 1 # by default, just assume the
# channel index in the datafile is in order of the channel
# names as we found them in the labels file
if chanidx_is_explicit: # but, if explicitly declared, use
# that index number
chanidx = int(
curry_params.chanidx_in_file["CHAN_IN_FILE" + CHANTYPES[key]][ind]
)
if chanidx <= 0: # if chanidx was explicitly declared to be ' 0',
# it means the channel is not actually saved in the data file
# (e.g. the "Ref" channel), so don't add it to our list.
# Git issue #8391
continue
ch = {
"ch_name": chan,
"unit": curry_params.unit_dict[key],
"kind": FIFFV_CHANTYPES[key],
"coil_type": FIFFV_COILTYPES[key],
"ch_idx": chanidx,
}
if key == "eeg":
loc = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float)
# XXX just the sensor, where is ref (next 3)?
assert loc.shape == (3,)
loc /= 1000.0 # to meters
loc = np.concatenate([loc, np.zeros(9)])
ch["loc"] = loc
# XXX need to check/ensure this
ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD
elif key == "meg":
pos = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float)
pos /= 1000.0 # to meters
pos = pos[:3] # just the inner coil
pos = apply_trans(curry_dev_dev_t, pos)
nn = np.array(normals["NORMALS" + CHANTYPES[key]][ind], float)
assert np.isclose(np.linalg.norm(nn), 1.0, atol=1e-4)
nn /= np.linalg.norm(nn)
nn = apply_trans(curry_dev_dev_t, nn, move=False)
trans = np.eye(4)
trans[:3, 3] = pos
trans[:3, :3] = _normal_orth(nn).T
ch["loc"] = _coil_trans_to_loc(trans)
ch["coord_frame"] = FIFF.FIFFV_COORD_DEVICE
all_chans.append(ch)
ch_count = len(all_chans)
assert ch_count == curry_params.n_chans # ensure that we have assembled
# the same number of channels as declared in the info (.DAP) file in the
# DATA_PARAMETERS section. Git issue #8391
# sort the channels to assure they are in the order that matches how
# recorded in the datafile. In general they most likely are already in
# the correct order, but if the channel index in the data file was
# explicitly declared we might as well use it.
all_chans = sorted(all_chans, key=lambda ch: ch["ch_idx"])
ch_names = [chan["ch_name"] for chan in all_chans]
info = create_info(ch_names, curry_params.sfreq)
info["meas_date"] = curry_params.dt_start # for Git issue #8398
_make_trans_dig(curry_paths, info, curry_dev_dev_t)
for ind, ch_dict in enumerate(info["chs"]):
all_chans[ind].pop("ch_idx")
ch_dict.update(all_chans[ind])
assert ch_dict["loc"].shape == (12,)
ch_dict["unit"] = SI_UNITS[all_chans[ind]["unit"][1]]
ch_dict["cal"] = SI_UNIT_SCALE[all_chans[ind]["unit"][0]]
return info, curry_params.n_samples, curry_params.is_ascii
|
def _read_curry_info(curry_paths):
"""Extract info from curry parameter files."""
curry_params = _read_curry_parameters(curry_paths["info"])
R = np.eye(4)
R[[0, 1], [0, 1]] = -1 # rotate 180 deg
# shift down and back
# (chosen by eyeballing to make the CTF helmet look roughly correct)
R[:3, 3] = [0.0, -0.015, -0.12]
curry_dev_dev_t = Transform("ctf_meg", "meg", R)
# read labels from label files
label_fname = curry_paths["labels"]
types = ["meg", "eeg", "misc"]
labels = _read_curry_lines(
label_fname, ["LABELS" + CHANTYPES[key] for key in types]
)
sensors = _read_curry_lines(
label_fname, ["SENSORS" + CHANTYPES[key] for key in types]
)
normals = _read_curry_lines(
label_fname, ["NORMALS" + CHANTYPES[key] for key in types]
)
assert len(labels) == len(sensors) == len(normals)
all_chans = list()
for key in ["meg", "eeg", "misc"]:
for ind, chan in enumerate(labels["LABELS" + CHANTYPES[key]]):
ch = {
"ch_name": chan,
"unit": curry_params.unit_dict[key],
"kind": FIFFV_CHANTYPES[key],
"coil_type": FIFFV_COILTYPES[key],
}
if key == "eeg":
loc = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float)
# XXX just the sensor, where is ref (next 3)?
assert loc.shape == (3,)
loc /= 1000.0 # to meters
loc = np.concatenate([loc, np.zeros(9)])
ch["loc"] = loc
# XXX need to check/ensure this
ch["coord_frame"] = FIFF.FIFFV_COORD_HEAD
elif key == "meg":
pos = np.array(sensors["SENSORS" + CHANTYPES[key]][ind], float)
pos /= 1000.0 # to meters
pos = pos[:3] # just the inner coil
pos = apply_trans(curry_dev_dev_t, pos)
nn = np.array(normals["NORMALS" + CHANTYPES[key]][ind], float)
assert np.isclose(np.linalg.norm(nn), 1.0, atol=1e-4)
nn /= np.linalg.norm(nn)
nn = apply_trans(curry_dev_dev_t, nn, move=False)
trans = np.eye(4)
trans[:3, 3] = pos
trans[:3, :3] = _normal_orth(nn).T
ch["loc"] = _coil_trans_to_loc(trans)
ch["coord_frame"] = FIFF.FIFFV_COORD_DEVICE
all_chans.append(ch)
ch_names = [chan["ch_name"] for chan in all_chans]
info = create_info(ch_names, curry_params.sfreq)
_make_trans_dig(curry_paths, info, curry_dev_dev_t)
for ind, ch_dict in enumerate(info["chs"]):
ch_dict.update(all_chans[ind])
assert ch_dict["loc"].shape == (12,)
ch_dict["unit"] = SI_UNITS[all_chans[ind]["unit"][1]]
ch_dict["cal"] = SI_UNIT_SCALE[all_chans[ind]["unit"][0]]
return info, curry_params.n_samples, curry_params.is_ascii
|
https://github.com/mne-tools/mne-python/issues/8391
|
Data loaded, with preload=False: C:\read_curry example data\PH005_baseline.dat, 70 channels X 76850 points
Leaving device<->head transform as None (no landmarks found)
Reading 0 ... 76849 = 0.000 ... 307.396 secs...
Traceback (most recent call last):
File "c:\Users\todfl\.vscode\extensions\ms-python.python-2020.2.64397\pythonFiles\ptvsd_launcher.py", line 48, in <module>
main(ptvsdArgs)
File "c:\Users\todfl\.vscode\extensions\ms-python.python-2020.2.64397\pythonFiles\lib\python\old_ptvsd\ptvsd\__main__.py", line 432, in main
run()
File "c:\Users\todfl\.vscode\extensions\ms-python.python-2020.2.64397\pythonFiles\lib\python\old_ptvsd\ptvsd\__main__.py", line 316, in run_file
runpy.run_path(target, run_name='__main__')
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 265, in run_path
return _run_module_code(code, init_globals, run_name,
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 97, in _run_module_code
_run_code(code, mod_globals, init_globals,
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 87, in _run_code
File "c:\Users\todfl\Documents\Decker\EEG Processing\EEG Processing Code\read_raw_curry error MWE.py", line 7, in <module>
raw = mne.io.read_raw_curry(data_path,preload=True,verbose=True)
File "<decorator-gen-218>", line 20, in read_raw_curry
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\curry\curry.py", line 406, in read_raw_curry
return RawCurry(fname, preload, verbose)
File "<decorator-gen-219>", line 20, in __init__
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\curry\curry.py", line 439, in __init__
super(RawCurry, self).__init__(
File "<decorator-gen-160>", line 20, in __init__
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 247, in __init__
self._preload_data(preload)
File "<decorator-gen-164>", line 20, in _preload_data
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 528, in _preload_data
self._data = self._read_segment(
File "<decorator-gen-162>", line 20, in _read_segment
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 412, in _read_segment
_ReadSegmentFileProtector(self)._read_segment_file(
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\base.py", line 1816, in _read_segment_file
return self.__raw.__class__._read_segment_file(
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\curry\curry.py", line 469, in _read_segment_file
_read_segments_file(
File "C:\ProgramData\Anaconda3\lib\site-packages\mne\io\utils.py", line 221, in _read_segments_file
raise RuntimeError('Incorrect number of samples (%s != %s), '
RuntimeError: Incorrect number of samples (5302650 != 5379500), please report this error to MNE-Python developers
|
RuntimeError
|
def parse_folder(
self,
data_path,
pattern="*.fif",
n_jobs=1,
mri_decim=2,
sort_sections=True,
on_error="warn",
image_format=None,
render_bem=True,
verbose=None,
):
r"""Render all the files in the folder.
Parameters
----------
data_path : str
Path to the folder containing data whose HTML report will be
created.
pattern : str | list of str
Filename pattern(s) to include in the report.
Example: [\*raw.fif, \*ave.fif] will include Raw as well as Evoked
files.
%(n_jobs)s
mri_decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
sort_sections : bool
If True, sort sections in the order: raw -> events -> epochs
-> evoked -> covariance -> trans -> mri -> forward -> inverse.
on_error : str
What to do if a file cannot be rendered. Can be 'ignore',
'warn' (default), or 'raise'.
image_format : str | None
The image format to be used for the report, can be 'png' or 'svd'.
None (default) will use the default specified during Report
class construction.
.. versionadded:: 0.15
render_bem : bool
If True (default), try to render the BEM.
.. versionadded:: 0.16
%(verbose_meth)s
"""
_validate_type(data_path, "path-like", "data_path")
data_path = str(data_path)
image_format = _check_image_format(self, image_format)
_check_option("on_error", on_error, ["ignore", "warn", "raise"])
self._sort = sort_sections
n_jobs = check_n_jobs(n_jobs)
self.data_path = data_path
if self.title is None:
self.title = "MNE Report for ...%s" % self.data_path[-20:]
if not isinstance(pattern, (list, tuple)):
pattern = [pattern]
# iterate through the possible patterns
fnames = list()
for p in pattern:
fnames.extend(sorted(_recursive_search(self.data_path, p)))
if self.info_fname is not None:
info = read_info(self.info_fname, verbose=False)
sfreq = info["sfreq"]
else:
# only warn if relevant
if any(_endswith(fname, "cov") for fname in fnames):
warn("`info_fname` not provided. Cannot render -cov.fif(.gz) files.")
if any(_endswith(fname, "trans") for fname in fnames):
warn("`info_fname` not provided. Cannot render -trans.fif(.gz) files.")
if any(_endswith(fname, "proj") for fname in fnames):
warn("`info_fname` not provided. Cannot render -proj.fif(.gz) files.")
info, sfreq = None, None
cov = None
if self.cov_fname is not None:
cov = read_cov(self.cov_fname)
baseline = self.baseline
# render plots in parallel; check that n_jobs <= # of files
logger.info(
"Iterating over %s potential files (this may take some time)" % len(fnames)
)
use_jobs = min(n_jobs, max(1, len(fnames)))
parallel, p_fun, _ = parallel_func(_iterate_files, use_jobs)
r = parallel(
p_fun(
self,
fname,
info,
cov,
baseline,
sfreq,
on_error,
image_format,
self.data_path,
)
for fname in np.array_split(fnames, use_jobs)
)
htmls, report_fnames, report_sectionlabels = zip(*r)
# combine results from n_jobs discarding plots not rendered
self.html = [html for html in sum(htmls, []) if html is not None]
self.fnames = [fname for fname in sum(report_fnames, []) if fname is not None]
self._sectionlabels = [
slabel for slabel in sum(report_sectionlabels, []) if slabel is not None
]
# find unique section labels
self.sections = sorted(set(self._sectionlabels))
self._sectionvars = dict(zip(self.sections, self.sections))
# render mri
if render_bem:
if self.subjects_dir is not None and self.subject is not None:
logger.info("Rendering BEM")
self.fnames.append("bem")
self.add_bem_to_section(
self.subject,
decim=mri_decim,
n_jobs=n_jobs,
subjects_dir=self.subjects_dir,
)
else:
warn(
"`subjects_dir` and `subject` not provided. Cannot "
"render MRI and -trans.fif(.gz) files."
)
|
def parse_folder(
self,
data_path,
pattern="*.fif",
n_jobs=1,
mri_decim=2,
sort_sections=True,
on_error="warn",
image_format=None,
render_bem=True,
verbose=None,
):
r"""Render all the files in the folder.
Parameters
----------
data_path : str
Path to the folder containing data whose HTML report will be
created.
pattern : str | list of str
Filename pattern(s) to include in the report.
Example: [\*raw.fif, \*ave.fif] will include Raw as well as Evoked
files.
%(n_jobs)s
mri_decim : int
Use this decimation factor for generating MRI/BEM images
(since it can be time consuming).
sort_sections : bool
If True, sort sections in the order: raw -> events -> epochs
-> evoked -> covariance -> trans -> mri -> forward -> inverse.
on_error : str
What to do if a file cannot be rendered. Can be 'ignore',
'warn' (default), or 'raise'.
image_format : str | None
The image format to be used for the report, can be 'png' or 'svd'.
None (default) will use the default specified during Report
class construction.
.. versionadded:: 0.15
render_bem : bool
If True (default), try to render the BEM.
.. versionadded:: 0.16
%(verbose_meth)s
"""
image_format = _check_image_format(self, image_format)
_check_option("on_error", on_error, ["ignore", "warn", "raise"])
self._sort = sort_sections
n_jobs = check_n_jobs(n_jobs)
self.data_path = data_path
if self.title is None:
self.title = "MNE Report for ...%s" % self.data_path[-20:]
if not isinstance(pattern, (list, tuple)):
pattern = [pattern]
# iterate through the possible patterns
fnames = list()
for p in pattern:
fnames.extend(sorted(_recursive_search(self.data_path, p)))
if self.info_fname is not None:
info = read_info(self.info_fname, verbose=False)
sfreq = info["sfreq"]
else:
# only warn if relevant
if any(_endswith(fname, "cov") for fname in fnames):
warn("`info_fname` not provided. Cannot render -cov.fif(.gz) files.")
if any(_endswith(fname, "trans") for fname in fnames):
warn("`info_fname` not provided. Cannot render -trans.fif(.gz) files.")
if any(_endswith(fname, "proj") for fname in fnames):
warn("`info_fname` not provided. Cannot render -proj.fif(.gz) files.")
info, sfreq = None, None
cov = None
if self.cov_fname is not None:
cov = read_cov(self.cov_fname)
baseline = self.baseline
# render plots in parallel; check that n_jobs <= # of files
logger.info(
"Iterating over %s potential files (this may take some time)" % len(fnames)
)
use_jobs = min(n_jobs, max(1, len(fnames)))
parallel, p_fun, _ = parallel_func(_iterate_files, use_jobs)
r = parallel(
p_fun(
self,
fname,
info,
cov,
baseline,
sfreq,
on_error,
image_format,
self.data_path,
)
for fname in np.array_split(fnames, use_jobs)
)
htmls, report_fnames, report_sectionlabels = zip(*r)
# combine results from n_jobs discarding plots not rendered
self.html = [html for html in sum(htmls, []) if html is not None]
self.fnames = [fname for fname in sum(report_fnames, []) if fname is not None]
self._sectionlabels = [
slabel for slabel in sum(report_sectionlabels, []) if slabel is not None
]
# find unique section labels
self.sections = sorted(set(self._sectionlabels))
self._sectionvars = dict(zip(self.sections, self.sections))
# render mri
if render_bem:
if self.subjects_dir is not None and self.subject is not None:
logger.info("Rendering BEM")
self.fnames.append("bem")
self.add_bem_to_section(
self.subject,
decim=mri_decim,
n_jobs=n_jobs,
subjects_dir=self.subjects_dir,
)
else:
warn(
"`subjects_dir` and `subject` not provided. Cannot "
"render MRI and -trans.fif(.gz) files."
)
|
https://github.com/mne-tools/mne-python/issues/8168
|
Embedding : jquery.js
Embedding : jquery-ui.min.js
Embedding : bootstrap.min.js
Embedding : jquery-ui.min.css
Embedding : bootstrap.min.css
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
Untitled-1 in
5 p = pathlib.Path('foo')
6 report = mne.Report()
----> 7 report.parse_folder(p)
in parse_folder(self, data_path, pattern, n_jobs, mri_decim, sort_sections, on_error, image_format, render_bem, verbose)
~/Development/mne-python/mne/report.py in parse_folder(self, data_path, pattern, n_jobs, mri_decim, sort_sections, on_error, image_format, render_bem, verbose)
1445
1446 if self.title is None:
-> 1447 self.title = 'MNE Report for ...%s' % self.data_path[-20:]
1448
1449 if not isinstance(pattern, (list, tuple)):
TypeError: 'PosixPath' object is not subscriptable
|
TypeError
|
def _plot_lines(
data,
info,
picks,
fig,
axes,
spatial_colors,
unit,
units,
scalings,
hline,
gfp,
types,
zorder,
xlim,
ylim,
times,
bad_ch_idx,
titles,
ch_types_used,
selectable,
psd,
line_alpha,
nave,
time_unit,
sphere,
):
"""Plot data as butterfly plot."""
from matplotlib import patheffects, pyplot as plt
from matplotlib.widgets import SpanSelector
assert len(axes) == len(ch_types_used)
texts = list()
idxs = list()
lines = list()
sphere = _check_sphere(sphere, info)
path_effects = [patheffects.withStroke(linewidth=2, foreground="w", alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w", alpha=0.75)]
if selectable:
selectables = np.ones(len(ch_types_used), dtype=bool)
for type_idx, this_type in enumerate(ch_types_used):
idx = picks[types == this_type]
if len(idx) < 2 or (this_type == "grad" and len(idx) < 4):
# prevent unnecessary warnings for e.g. EOG
if this_type in _DATA_CH_TYPES_SPLIT:
logger.info(
"Need more than one channel to make "
"topography for %s. Disabling interactivity." % (this_type,)
)
selectables[type_idx] = False
if selectable:
# Parameters for butterfly interactive plots
params = dict(
axes=axes,
texts=texts,
lines=lines,
ch_names=info["ch_names"],
idxs=idxs,
need_draw=False,
path_effects=path_effects,
)
fig.canvas.mpl_connect("pick_event", partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect(
"button_press_event", partial(_butterfly_on_button_press, params=params)
)
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
line_list = list() # 'line_list' contains the lines for this axes
if unit is False:
this_scaling = 1.0
ch_unit = "NA" # no unit
else:
this_scaling = 1.0 if scalings is None else scalings[this_type]
ch_unit = units[this_type]
idx = list(picks[types == this_type])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * data[idx, :]
_check_if_nan(D)
gfp_only = isinstance(gfp, str) and gfp == "only"
if not gfp_only:
chs = [info["chs"][i] for i in idx]
locs3d = np.array([ch["loc"][:3] for ch in chs])
if spatial_colors is True and not _check_ch_locs(chs):
warn("Channel locations not available. Disabling spatial colors.")
spatial_colors = selectable = False
if spatial_colors is True and len(idx) != 1:
x, y, z = locs3d.T
colors = _rgb(x, y, z)
_handle_spatial_colors(
colors, info, idx, this_type, psd, ax, sphere
)
else:
if isinstance(spatial_colors, (tuple, str)):
col = [spatial_colors]
else:
col = ["k"]
colors = col * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = "r"
if zorder == "std":
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == "unsorted":
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = '`zorder` must be a function, "std" or "unsorted", not {0}.'
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(
times,
D[ch_idx],
picker=3.0,
zorder=z + 1 if spatial_colors is True else 1,
color=colors[ch_idx],
alpha=line_alpha,
linewidth=0.5,
)[0]
)
if gfp: # 'only' or boolean True
gfp_color = 3 * (0.0,) if spatial_colors is True else (0.0, 1.0, 0.0)
this_gfp = np.sqrt((D * D).mean(axis=0))
this_ylim = (
ax.get_ylim()
if (ylim is None or this_type not in ylim.keys())
else ylim[this_type]
)
if gfp_only:
y_offset = 0.0
else:
y_offset = this_ylim[0]
this_gfp += y_offset
ax.fill_between(
times,
y_offset,
this_gfp,
color="none",
facecolor=gfp_color,
zorder=1,
alpha=0.2,
)
line_list.append(
ax.plot(
times, this_gfp, color=gfp_color, zorder=3, alpha=line_alpha
)[0]
)
ax.text(
times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
"GFP",
zorder=4,
color=gfp_color,
path_effects=gfp_path_effects,
)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors is True:
line.set_linestyle("--")
ax.set_ylabel(ch_unit)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(
ax.text(
0,
0,
"blank",
zorder=3,
verticalalignment="baseline",
horizontalalignment="left",
fontweight="bold",
alpha=0,
clip_on=True,
)
)
if xlim is not None:
if xlim == "tight":
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and this_type in ylim:
ax.set_ylim(ylim[this_type])
ax.set(
title=r"%s (%d channel%s)" % (titles[this_type], len(D), _pl(len(D)))
)
if ai == 0:
_add_nave(ax, nave)
if hline is not None:
for h in hline:
c = "grey" if spatial_colors is True else "r"
ax.axhline(h, linestyle="--", linewidth=2, color=c)
lines.append(line_list)
if selectable:
for ax in np.array(axes)[selectables]:
if len(ax.lines) == 1:
continue
text = ax.annotate(
"Loading...",
xy=(0.01, 0.1),
xycoords="axes fraction",
fontsize=20,
color="green",
zorder=3,
)
text.set_visible(False)
callback_onselect = partial(
_line_plot_onselect,
ch_types=ch_types_used,
info=info,
data=data,
times=times,
text=text,
psd=psd,
time_unit=time_unit,
sphere=sphere,
)
blit = False if plt.get_backend() == "MacOSX" else True
minspan = 0 if len(times) < 2 else times[1] - times[0]
ax._span_selector = SpanSelector(
ax,
callback_onselect,
"horizontal",
minspan=minspan,
useblit=blit,
rectprops=dict(alpha=0.5, facecolor="red"),
)
|
def _plot_lines(
data,
info,
picks,
fig,
axes,
spatial_colors,
unit,
units,
scalings,
hline,
gfp,
types,
zorder,
xlim,
ylim,
times,
bad_ch_idx,
titles,
ch_types_used,
selectable,
psd,
line_alpha,
nave,
time_unit,
sphere,
):
"""Plot data as butterfly plot."""
from matplotlib import patheffects, pyplot as plt
from matplotlib.widgets import SpanSelector
assert len(axes) == len(ch_types_used)
texts = list()
idxs = list()
lines = list()
sphere = _check_sphere(sphere, info)
path_effects = [patheffects.withStroke(linewidth=2, foreground="w", alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w", alpha=0.75)]
if selectable:
selectables = np.ones(len(ch_types_used), dtype=bool)
for type_idx, this_type in enumerate(ch_types_used):
idx = picks[types == this_type]
if len(idx) < 2 or (this_type == "grad" and len(idx) < 4):
# prevent unnecessary warnings for e.g. EOG
if this_type in _DATA_CH_TYPES_SPLIT:
logger.info(
"Need more than one channel to make "
"topography for %s. Disabling interactivity." % (this_type,)
)
selectables[type_idx] = False
if selectable:
# Parameters for butterfly interactive plots
params = dict(
axes=axes,
texts=texts,
lines=lines,
ch_names=info["ch_names"],
idxs=idxs,
need_draw=False,
path_effects=path_effects,
)
fig.canvas.mpl_connect("pick_event", partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect(
"button_press_event", partial(_butterfly_on_button_press, params=params)
)
for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):
line_list = list() # 'line_list' contains the lines for this axes
if unit is False:
this_scaling = 1.0
ch_unit = "NA" # no unit
else:
this_scaling = 1.0 if scalings is None else scalings[this_type]
ch_unit = units[this_type]
idx = list(picks[types == this_type])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * data[idx, :]
_check_if_nan(D)
gfp_only = isinstance(gfp, str) and gfp == "only"
if not gfp_only:
chs = [info["chs"][i] for i in idx]
locs3d = np.array([ch["loc"][:3] for ch in chs])
if spatial_colors is True and not _check_ch_locs(chs):
warn("Channel locations not available. Disabling spatial colors.")
spatial_colors = selectable = False
if spatial_colors is True and len(idx) != 1:
x, y, z = locs3d.T
colors = _rgb(x, y, z)
_handle_spatial_colors(
colors, info, idx, this_type, psd, ax, sphere
)
else:
if isinstance(spatial_colors, (tuple, str)):
col = [spatial_colors]
else:
col = ["k"]
colors = col * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = "r"
if zorder == "std":
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == "unsorted":
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = '`zorder` must be a function, "std" or "unsorted", not {0}.'
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(
times,
D[ch_idx],
picker=3.0,
zorder=z + 1 if spatial_colors is True else 1,
color=colors[ch_idx],
alpha=line_alpha,
linewidth=0.5,
)[0]
)
if gfp: # 'only' or boolean True
gfp_color = 3 * (0.0,) if spatial_colors is True else (0.0, 1.0, 0.0)
this_gfp = np.sqrt((D * D).mean(axis=0))
this_ylim = (
ax.get_ylim()
if (ylim is None or this_type not in ylim.keys())
else ylim[this_type]
)
if gfp_only:
y_offset = 0.0
else:
y_offset = this_ylim[0]
this_gfp += y_offset
ax.fill_between(
times,
y_offset,
this_gfp,
color="none",
facecolor=gfp_color,
zorder=1,
alpha=0.2,
)
line_list.append(
ax.plot(
times, this_gfp, color=gfp_color, zorder=3, alpha=line_alpha
)[0]
)
ax.text(
times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
"GFP",
zorder=4,
color=gfp_color,
path_effects=gfp_path_effects,
)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors is True:
line.set_linestyle("--")
ax.set_ylabel(ch_unit)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(
ax.text(
0,
0,
"blank",
zorder=3,
verticalalignment="baseline",
horizontalalignment="left",
fontweight="bold",
alpha=0,
)
)
if xlim is not None:
if xlim == "tight":
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and this_type in ylim:
ax.set_ylim(ylim[this_type])
ax.set(
title=r"%s (%d channel%s)" % (titles[this_type], len(D), _pl(len(D)))
)
if ai == 0:
_add_nave(ax, nave)
if hline is not None:
for h in hline:
c = "grey" if spatial_colors is True else "r"
ax.axhline(h, linestyle="--", linewidth=2, color=c)
lines.append(line_list)
if selectable:
for ax in np.array(axes)[selectables]:
if len(ax.lines) == 1:
continue
text = ax.annotate(
"Loading...",
xy=(0.01, 0.1),
xycoords="axes fraction",
fontsize=20,
color="green",
zorder=3,
)
text.set_visible(False)
callback_onselect = partial(
_line_plot_onselect,
ch_types=ch_types_used,
info=info,
data=data,
times=times,
text=text,
psd=psd,
time_unit=time_unit,
sphere=sphere,
)
blit = False if plt.get_backend() == "MacOSX" else True
minspan = 0 if len(times) < 2 else times[1] - times[0]
ax._span_selector = SpanSelector(
ax,
callback_onselect,
"horizontal",
minspan=minspan,
useblit=blit,
rectprops=dict(alpha=0.5, facecolor="red"),
)
|
https://github.com/mne-tools/mne-python/issues/6968
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 pass
340 else:
--> 341 return printer(obj)
342 # Finally look for special method names
343 method = get_real_method(obj, self.print_method)
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in <lambda>(fig)
242
243 if 'png' in formats:
--> 244 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
245 if 'retina' in formats or 'png2x' in formats:
246 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
126
127 bytes_io = BytesIO()
--> 128 fig.canvas.print_figure(bytes_io, **kw)
129 data = bytes_io.getvalue()
130 if fmt == 'svg':
~/anaconda3/lib/python3.7/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)
2080 orientation=orientation,
2081 bbox_inches_restore=_bbox_inches_restore,
-> 2082 **kwargs)
2083 finally:
2084 if bbox_inches and restore_bbox:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, metadata, pil_kwargs, *args, **kwargs)
525
526 else:
--> 527 FigureCanvasAgg.draw(self)
528 renderer = self.get_renderer()
529 with cbook._setattr_cm(renderer, dpi=self.figure.dpi), \
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in draw(self)
384 Draw the figure using the renderer.
385 """
--> 386 self.renderer = self.get_renderer(cleared=True)
387 with RendererAgg.lock:
388 self.figure.draw(self.renderer)
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in get_renderer(self, cleared)
397 and getattr(self, "_lastKey", None) == key)
398 if not reuse_renderer:
--> 399 self.renderer = RendererAgg(w, h, self.figure.dpi)
400 self._lastKey = key
401 elif cleared:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in __init__(self, width, height, dpi)
84 self.width = width
85 self.height = height
---> 86 self._renderer = _RendererAgg(int(width), int(height), dpi)
87 self._filter_renderers = []
88
ValueError: Image size of 115104x291 pixels is too large. It must be less than 2^16 in each direction.
<Figure size 432x288 with 6 Axes>
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 pass
340 else:
--> 341 return printer(obj)
342 # Finally look for special method names
343 method = get_real_method(obj, self.print_method)
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in <lambda>(fig)
242
243 if 'png' in formats:
--> 244 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
245 if 'retina' in formats or 'png2x' in formats:
246 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
126
127 bytes_io = BytesIO()
--> 128 fig.canvas.print_figure(bytes_io, **kw)
129 data = bytes_io.getvalue()
130 if fmt == 'svg':
~/anaconda3/lib/python3.7/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)
2080 orientation=orientation,
2081 bbox_inches_restore=_bbox_inches_restore,
-> 2082 **kwargs)
2083 finally:
2084 if bbox_inches and restore_bbox:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, metadata, pil_kwargs, *args, **kwargs)
525
526 else:
--> 527 FigureCanvasAgg.draw(self)
528 renderer = self.get_renderer()
529 with cbook._setattr_cm(renderer, dpi=self.figure.dpi), \
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in draw(self)
384 Draw the figure using the renderer.
385 """
--> 386 self.renderer = self.get_renderer(cleared=True)
387 with RendererAgg.lock:
388 self.figure.draw(self.renderer)
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in get_renderer(self, cleared)
397 and getattr(self, "_lastKey", None) == key)
398 if not reuse_renderer:
--> 399 self.renderer = RendererAgg(w, h, self.figure.dpi)
400 self._lastKey = key
401 elif cleared:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in __init__(self, width, height, dpi)
84 self.width = width
85 self.height = height
---> 86 self._renderer = _RendererAgg(int(width), int(height), dpi)
87 self._filter_renderers = []
88
ValueError: Image size of 115104x291 pixels is too large. It must be less than 2^16 in each direction.
|
ValueError
|
def _set_psd_plot_params(info, proj, picks, ax, area_mode):
"""Set PSD plot params."""
import matplotlib.pyplot as plt
_check_option("area_mode", area_mode, [None, "std", "range"])
picks = _picks_to_idx(info, picks)
# XXX this could be refactored more with e.g., plot_evoked
# XXX when it's refactored, Report._render_raw will need to be updated
titles = _handle_default("titles", None)
units = _handle_default("units", None)
scalings = _handle_default("scalings", None)
picks_list = list()
titles_list = list()
units_list = list()
scalings_list = list()
for name in _DATA_CH_TYPES_SPLIT:
kwargs = dict(meg=False, ref_meg=False, exclude=[])
if name in ("mag", "grad"):
kwargs["meg"] = name
elif name in ("fnirs_raw", "fnirs_od", "hbo", "hbr"):
kwargs["fnirs"] = name
else:
kwargs[name] = True
these_picks = pick_types(info, **kwargs)
these_picks = np.intersect1d(these_picks, picks)
if len(these_picks) > 0:
picks_list.append(these_picks)
titles_list.append(titles[name])
units_list.append(units[name])
scalings_list.append(scalings[name])
if len(picks_list) == 0:
raise RuntimeError("No data channels found")
if ax is not None:
if isinstance(ax, plt.Axes):
ax = [ax]
if len(ax) != len(picks_list):
raise ValueError(
"For this dataset with picks=None %s axes "
"must be supplied, got %s" % (len(picks_list), len(ax))
)
ax_list = ax
del picks
fig = None
if ax is None:
fig, ax_list = plt.subplots(len(picks_list), 1, sharex=True, squeeze=False)
ax_list = list(ax_list[:, 0])
make_label = True
else:
fig = ax_list[0].get_figure()
make_label = len(ax_list) == len(fig.axes)
return (
fig,
picks_list,
titles_list,
units_list,
scalings_list,
ax_list,
make_label,
)
|
def _set_psd_plot_params(info, proj, picks, ax, area_mode):
"""Set PSD plot params."""
import matplotlib.pyplot as plt
_check_option("area_mode", area_mode, [None, "std", "range"])
picks = _picks_to_idx(info, picks)
# XXX this could be refactored more with e.g., plot_evoked
# XXX when it's refactored, Report._render_raw will need to be updated
titles = _handle_default("titles", None)
units = _handle_default("units", None)
scalings = _handle_default("scalings", None)
picks_list = list()
titles_list = list()
units_list = list()
scalings_list = list()
for name in _DATA_CH_TYPES_SPLIT:
kwargs = dict(meg=False, ref_meg=False, exclude=[])
if name in ("mag", "grad"):
kwargs["meg"] = name
elif name in ("fnirs_raw", "fnirs_od", "hbo", "hbr"):
kwargs["fnirs"] = name
else:
kwargs[name] = True
these_picks = pick_types(info, **kwargs)
these_picks = np.intersect1d(these_picks, picks)
if len(these_picks) > 0:
picks_list.append(these_picks)
titles_list.append(titles[name])
units_list.append(units[name])
scalings_list.append(scalings[name])
if len(picks_list) == 0:
raise RuntimeError("No data channels found")
if ax is not None:
if isinstance(ax, plt.Axes):
ax = [ax]
if len(ax) != len(picks_list):
raise ValueError(
"For this dataset with picks=None %s axes "
"must be supplied, got %s" % (len(picks_list), len(ax))
)
ax_list = ax
del picks
fig = None
if ax is None:
fig = plt.figure()
ax_list = list()
for ii in range(len(picks_list)):
# Make x-axes change together
if ii > 0:
ax_list.append(
plt.subplot(len(picks_list), 1, ii + 1, sharex=ax_list[0])
)
else:
ax_list.append(plt.subplot(len(picks_list), 1, ii + 1))
make_label = True
else:
fig = ax_list[0].get_figure()
make_label = len(ax_list) == len(fig.axes)
return (
fig,
picks_list,
titles_list,
units_list,
scalings_list,
ax_list,
make_label,
)
|
https://github.com/mne-tools/mne-python/issues/6968
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 pass
340 else:
--> 341 return printer(obj)
342 # Finally look for special method names
343 method = get_real_method(obj, self.print_method)
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in <lambda>(fig)
242
243 if 'png' in formats:
--> 244 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
245 if 'retina' in formats or 'png2x' in formats:
246 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
126
127 bytes_io = BytesIO()
--> 128 fig.canvas.print_figure(bytes_io, **kw)
129 data = bytes_io.getvalue()
130 if fmt == 'svg':
~/anaconda3/lib/python3.7/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)
2080 orientation=orientation,
2081 bbox_inches_restore=_bbox_inches_restore,
-> 2082 **kwargs)
2083 finally:
2084 if bbox_inches and restore_bbox:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, metadata, pil_kwargs, *args, **kwargs)
525
526 else:
--> 527 FigureCanvasAgg.draw(self)
528 renderer = self.get_renderer()
529 with cbook._setattr_cm(renderer, dpi=self.figure.dpi), \
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in draw(self)
384 Draw the figure using the renderer.
385 """
--> 386 self.renderer = self.get_renderer(cleared=True)
387 with RendererAgg.lock:
388 self.figure.draw(self.renderer)
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in get_renderer(self, cleared)
397 and getattr(self, "_lastKey", None) == key)
398 if not reuse_renderer:
--> 399 self.renderer = RendererAgg(w, h, self.figure.dpi)
400 self._lastKey = key
401 elif cleared:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in __init__(self, width, height, dpi)
84 self.width = width
85 self.height = height
---> 86 self._renderer = _RendererAgg(int(width), int(height), dpi)
87 self._filter_renderers = []
88
ValueError: Image size of 115104x291 pixels is too large. It must be less than 2^16 in each direction.
<Figure size 432x288 with 6 Axes>
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 pass
340 else:
--> 341 return printer(obj)
342 # Finally look for special method names
343 method = get_real_method(obj, self.print_method)
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in <lambda>(fig)
242
243 if 'png' in formats:
--> 244 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
245 if 'retina' in formats or 'png2x' in formats:
246 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
126
127 bytes_io = BytesIO()
--> 128 fig.canvas.print_figure(bytes_io, **kw)
129 data = bytes_io.getvalue()
130 if fmt == 'svg':
~/anaconda3/lib/python3.7/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)
2080 orientation=orientation,
2081 bbox_inches_restore=_bbox_inches_restore,
-> 2082 **kwargs)
2083 finally:
2084 if bbox_inches and restore_bbox:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, metadata, pil_kwargs, *args, **kwargs)
525
526 else:
--> 527 FigureCanvasAgg.draw(self)
528 renderer = self.get_renderer()
529 with cbook._setattr_cm(renderer, dpi=self.figure.dpi), \
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in draw(self)
384 Draw the figure using the renderer.
385 """
--> 386 self.renderer = self.get_renderer(cleared=True)
387 with RendererAgg.lock:
388 self.figure.draw(self.renderer)
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in get_renderer(self, cleared)
397 and getattr(self, "_lastKey", None) == key)
398 if not reuse_renderer:
--> 399 self.renderer = RendererAgg(w, h, self.figure.dpi)
400 self._lastKey = key
401 elif cleared:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in __init__(self, width, height, dpi)
84 self.width = width
85 self.height = height
---> 86 self._renderer = _RendererAgg(int(width), int(height), dpi)
87 self._filter_renderers = []
88
ValueError: Image size of 115104x291 pixels is too large. It must be less than 2^16 in each direction.
|
ValueError
|
def _plot_psd(
inst,
fig,
freqs,
psd_list,
picks_list,
titles_list,
units_list,
scalings_list,
ax_list,
make_label,
color,
area_mode,
area_alpha,
dB,
estimate,
average,
spatial_colors,
xscale,
line_alpha,
sphere,
):
# helper function for plot_raw_psd and plot_epochs_psd
from matplotlib.ticker import ScalarFormatter
from .evoked import _plot_lines
sphere = _check_sphere(sphere, inst.info)
_check_option("xscale", xscale, ("log", "linear"))
for key, ls in zip(["lowpass", "highpass", "line_freq"], ["--", "--", "-."]):
if inst.info[key] is not None:
for ax in ax_list:
ax.axvline(
inst.info[key],
color="k",
linestyle=ls,
alpha=0.25,
linewidth=2,
zorder=2,
)
if line_alpha is None:
line_alpha = 1.0 if average else 0.75
line_alpha = float(line_alpha)
ylabels = list()
for ii, (psd, picks, title, ax, scalings, units) in enumerate(
zip(psd_list, picks_list, titles_list, ax_list, scalings_list, units_list)
):
ylabel = _convert_psds(
psd, dB, estimate, scalings, units, [inst.ch_names[pi] for pi in picks]
)
ylabels.append(ylabel)
del ylabel
if average:
# mean across channels
psd_mean = np.mean(psd, axis=0)
if area_mode == "std":
# std across channels
psd_std = np.std(psd, axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == "range":
hyp_limits = (np.min(psd, axis=0), np.max(psd, axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color, alpha=line_alpha, linewidth=0.5)
if hyp_limits is not None:
ax.fill_between(
freqs,
hyp_limits[0],
y2=hyp_limits[1],
color=color,
alpha=area_alpha,
)
if not average:
picks = np.concatenate(picks_list)
psd_list = np.concatenate(psd_list)
types = np.array([channel_type(inst.info, idx) for idx in picks])
# Needed because the data do not match the info anymore.
info = create_info([inst.ch_names[p] for p in picks], inst.info["sfreq"], types)
info["chs"] = [inst.info["chs"][p] for p in picks]
valid_channel_types = [
"mag",
"grad",
"eeg",
"csd",
"seeg",
"eog",
"ecg",
"emg",
"dipole",
"gof",
"bio",
"ecog",
"hbo",
"hbr",
"misc",
"fnirs_raw",
"fnirs_od",
]
ch_types_used = list()
for this_type in valid_channel_types:
if this_type in types:
ch_types_used.append(this_type)
assert len(ch_types_used) == len(ax_list)
unit = ""
units = {t: yl for t, yl in zip(ch_types_used, ylabels)}
titles = {c: t for c, t in zip(ch_types_used, titles_list)}
picks = np.arange(len(psd_list))
if not spatial_colors:
spatial_colors = color
_plot_lines(
psd_list,
info,
picks,
fig,
ax_list,
spatial_colors,
unit,
units=units,
scalings=None,
hline=None,
gfp=False,
types=types,
zorder="std",
xlim=(freqs[0], freqs[-1]),
ylim=None,
times=freqs,
bad_ch_idx=[],
titles=titles,
ch_types_used=ch_types_used,
selectable=True,
psd=True,
line_alpha=line_alpha,
nave=None,
time_unit="ms",
sphere=sphere,
)
for ii, ax in enumerate(ax_list):
ax.grid(True, linestyle=":")
if xscale == "log":
ax.set(xscale="log")
ax.set(xlim=[freqs[1] if freqs[0] == 0 else freqs[0], freqs[-1]])
ax.get_xaxis().set_major_formatter(ScalarFormatter())
else: # xscale == 'linear'
ax.set(xlim=(freqs[0], freqs[-1]))
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel("Frequency (Hz)")
ax.set(ylabel=ylabels[ii], title=titles_list[ii])
if make_label:
fig.subplots_adjust(
left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.3, hspace=0.5
)
return fig
|
def _plot_psd(
inst,
fig,
freqs,
psd_list,
picks_list,
titles_list,
units_list,
scalings_list,
ax_list,
make_label,
color,
area_mode,
area_alpha,
dB,
estimate,
average,
spatial_colors,
xscale,
line_alpha,
sphere,
):
# helper function for plot_raw_psd and plot_epochs_psd
from matplotlib.ticker import ScalarFormatter
from .evoked import _plot_lines
sphere = _check_sphere(sphere, inst.info)
for key, ls in zip(["lowpass", "highpass", "line_freq"], ["--", "--", "-."]):
if inst.info[key] is not None:
for ax in ax_list:
ax.axvline(
inst.info[key],
color="k",
linestyle=ls,
alpha=0.25,
linewidth=2,
zorder=2,
)
if line_alpha is None:
line_alpha = 1.0 if average else 0.75
line_alpha = float(line_alpha)
ylabels = list()
for ii, (psd, picks, title, ax, scalings, units) in enumerate(
zip(psd_list, picks_list, titles_list, ax_list, scalings_list, units_list)
):
ylabel = _convert_psds(
psd, dB, estimate, scalings, units, [inst.ch_names[pi] for pi in picks]
)
ylabels.append(ylabel)
del ylabel
if average:
# mean across channels
psd_mean = np.mean(psd, axis=0)
if area_mode == "std":
# std across channels
psd_std = np.std(psd, axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == "range":
hyp_limits = (np.min(psd, axis=0), np.max(psd, axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color, alpha=line_alpha, linewidth=0.5)
if hyp_limits is not None:
ax.fill_between(
freqs,
hyp_limits[0],
y2=hyp_limits[1],
color=color,
alpha=area_alpha,
)
if not average:
picks = np.concatenate(picks_list)
psd_list = np.concatenate(psd_list)
types = np.array([channel_type(inst.info, idx) for idx in picks])
# Needed because the data do not match the info anymore.
info = create_info([inst.ch_names[p] for p in picks], inst.info["sfreq"], types)
info["chs"] = [inst.info["chs"][p] for p in picks]
valid_channel_types = [
"mag",
"grad",
"eeg",
"csd",
"seeg",
"eog",
"ecg",
"emg",
"dipole",
"gof",
"bio",
"ecog",
"hbo",
"hbr",
"misc",
"fnirs_raw",
"fnirs_od",
]
ch_types_used = list()
for this_type in valid_channel_types:
if this_type in types:
ch_types_used.append(this_type)
assert len(ch_types_used) == len(ax_list)
unit = ""
units = {t: yl for t, yl in zip(ch_types_used, ylabels)}
titles = {c: t for c, t in zip(ch_types_used, titles_list)}
picks = np.arange(len(psd_list))
if not spatial_colors:
spatial_colors = color
_plot_lines(
psd_list,
info,
picks,
fig,
ax_list,
spatial_colors,
unit,
units=units,
scalings=None,
hline=None,
gfp=False,
types=types,
zorder="std",
xlim=(freqs[0], freqs[-1]),
ylim=None,
times=freqs,
bad_ch_idx=[],
titles=titles,
ch_types_used=ch_types_used,
selectable=True,
psd=True,
line_alpha=line_alpha,
nave=None,
time_unit="ms",
sphere=sphere,
)
for ii, ax in enumerate(ax_list):
ax.grid(True, linestyle=":")
if xscale == "log":
ax.set(xscale="log")
ax.set(xlim=[freqs[1] if freqs[0] == 0 else freqs[0], freqs[-1]])
ax.get_xaxis().set_major_formatter(ScalarFormatter())
else:
ax.set(xlim=(freqs[0], freqs[-1]))
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel("Frequency (Hz)")
ax.set(ylabel=ylabels[ii], title=titles_list[ii])
if make_label:
fig.subplots_adjust(
left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.3, hspace=0.5
)
return fig
|
https://github.com/mne-tools/mne-python/issues/6968
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 pass
340 else:
--> 341 return printer(obj)
342 # Finally look for special method names
343 method = get_real_method(obj, self.print_method)
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in <lambda>(fig)
242
243 if 'png' in formats:
--> 244 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
245 if 'retina' in formats or 'png2x' in formats:
246 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
126
127 bytes_io = BytesIO()
--> 128 fig.canvas.print_figure(bytes_io, **kw)
129 data = bytes_io.getvalue()
130 if fmt == 'svg':
~/anaconda3/lib/python3.7/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)
2080 orientation=orientation,
2081 bbox_inches_restore=_bbox_inches_restore,
-> 2082 **kwargs)
2083 finally:
2084 if bbox_inches and restore_bbox:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, metadata, pil_kwargs, *args, **kwargs)
525
526 else:
--> 527 FigureCanvasAgg.draw(self)
528 renderer = self.get_renderer()
529 with cbook._setattr_cm(renderer, dpi=self.figure.dpi), \
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in draw(self)
384 Draw the figure using the renderer.
385 """
--> 386 self.renderer = self.get_renderer(cleared=True)
387 with RendererAgg.lock:
388 self.figure.draw(self.renderer)
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in get_renderer(self, cleared)
397 and getattr(self, "_lastKey", None) == key)
398 if not reuse_renderer:
--> 399 self.renderer = RendererAgg(w, h, self.figure.dpi)
400 self._lastKey = key
401 elif cleared:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in __init__(self, width, height, dpi)
84 self.width = width
85 self.height = height
---> 86 self._renderer = _RendererAgg(int(width), int(height), dpi)
87 self._filter_renderers = []
88
ValueError: Image size of 115104x291 pixels is too large. It must be less than 2^16 in each direction.
<Figure size 432x288 with 6 Axes>
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/anaconda3/lib/python3.7/site-packages/IPython/core/formatters.py in __call__(self, obj)
339 pass
340 else:
--> 341 return printer(obj)
342 # Finally look for special method names
343 method = get_real_method(obj, self.print_method)
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in <lambda>(fig)
242
243 if 'png' in formats:
--> 244 png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
245 if 'retina' in formats or 'png2x' in formats:
246 png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
~/anaconda3/lib/python3.7/site-packages/IPython/core/pylabtools.py in print_figure(fig, fmt, bbox_inches, **kwargs)
126
127 bytes_io = BytesIO()
--> 128 fig.canvas.print_figure(bytes_io, **kw)
129 data = bytes_io.getvalue()
130 if fmt == 'svg':
~/anaconda3/lib/python3.7/site-packages/matplotlib/backend_bases.py in print_figure(self, filename, dpi, facecolor, edgecolor, orientation, format, bbox_inches, **kwargs)
2080 orientation=orientation,
2081 bbox_inches_restore=_bbox_inches_restore,
-> 2082 **kwargs)
2083 finally:
2084 if bbox_inches and restore_bbox:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in print_png(self, filename_or_obj, metadata, pil_kwargs, *args, **kwargs)
525
526 else:
--> 527 FigureCanvasAgg.draw(self)
528 renderer = self.get_renderer()
529 with cbook._setattr_cm(renderer, dpi=self.figure.dpi), \
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in draw(self)
384 Draw the figure using the renderer.
385 """
--> 386 self.renderer = self.get_renderer(cleared=True)
387 with RendererAgg.lock:
388 self.figure.draw(self.renderer)
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in get_renderer(self, cleared)
397 and getattr(self, "_lastKey", None) == key)
398 if not reuse_renderer:
--> 399 self.renderer = RendererAgg(w, h, self.figure.dpi)
400 self._lastKey = key
401 elif cleared:
~/anaconda3/lib/python3.7/site-packages/matplotlib/backends/backend_agg.py in __init__(self, width, height, dpi)
84 self.width = width
85 self.height = height
---> 86 self._renderer = _RendererAgg(int(width), int(height), dpi)
87 self._filter_renderers = []
88
ValueError: Image size of 115104x291 pixels is too large. It must be less than 2^16 in each direction.
|
ValueError
|
def _fig_to_img(fig, image_format="png", scale=None, **kwargs):
"""Plot figure and create a binary image."""
# fig can be ndarray, mpl Figure, Mayavi Figure, or callable that produces
# a mpl Figure
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
if isinstance(fig, np.ndarray):
fig = _ndarray_to_fig(fig)
elif callable(fig):
plt.close("all")
fig = fig(**kwargs)
elif not isinstance(fig, Figure):
mlab = None
try:
mlab = _import_mlab()
# on some systems importing Mayavi raises SystemExit (!)
except Exception:
is_mayavi = False
else:
import mayavi
is_mayavi = isinstance(fig, mayavi.core.scene.Scene)
if not is_mayavi:
raise TypeError(
"Each fig must be a matplotlib Figure, mayavi "
"Scene, or NumPy ndarray, got %s (type %s)" % (fig, type(fig))
)
if fig.scene is not None:
img = mlab.screenshot(figure=fig)
else: # Testing mode
img = np.zeros((2, 2, 3))
mlab.close(fig)
fig = _ndarray_to_fig(img)
output = BytesIO()
if scale is not None:
_scale_mpl_figure(fig, scale)
logger.debug(
"Saving figure %s with dpi %s" % (fig.get_size_inches(), fig.get_dpi())
)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore") # incompatible axes
fig.savefig(
output, format=image_format, dpi=fig.get_dpi(), bbox_to_inches="tight"
)
plt.close(fig)
output = output.getvalue()
return (
output.decode("utf-8")
if image_format == "svg"
else base64.b64encode(output).decode("ascii")
)
|
def _fig_to_img(fig, image_format="png", scale=None, **kwargs):
"""Plot figure and create a binary image."""
# fig can be ndarray, mpl Figure, Mayavi Figure, or callable that produces
# a mpl Figure
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
if isinstance(fig, np.ndarray):
fig = _ndarray_to_fig(fig)
elif callable(fig):
plt.close("all")
fig = fig(**kwargs)
elif not isinstance(fig, Figure):
mlab = None
try:
mlab = _import_mlab()
# on some systems importing Mayavi raises SystemExit (!)
except Exception as e:
warn(
"Could not import mayavi (%r). Trying to render"
"`mayavi.core.api.Scene` figure instances"
" will throw an error." % (e,)
)
if fig.scene is not None:
img = mlab.screenshot(figure=fig)
else: # Testing mode
img = np.zeros((2, 2, 3))
mlab.close(fig)
fig = _ndarray_to_fig(img)
output = BytesIO()
if scale is not None:
_scale_mpl_figure(fig, scale)
logger.debug(
"Saving figure %s with dpi %s" % (fig.get_size_inches(), fig.get_dpi())
)
with warnings.catch_warnings(record=True):
warnings.simplefilter("ignore") # incompatible axes
fig.savefig(
output, format=image_format, dpi=fig.get_dpi(), bbox_to_inches="tight"
)
plt.close(fig)
output = output.getvalue()
return (
output.decode("utf-8")
if image_format == "svg"
else base64.b64encode(output).decode("ascii")
)
|
https://github.com/mne-tools/mne-python/issues/6128
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-5-a4d487ddeb86> in <module>()
3 rep = Report()
4 fig = plt.plot([1, 2], [1, 2])
----> 5 rep.add_figs_to_section(fig, 'my caption')
~/Desktop/projects/github_repos/mne-python/mne/report.py in add_figs_to_section(self, figs, captions, section, scale, image_format, comments, replace)
1078 img_klass = self._sectionvars[section]
1079
-> 1080 img = _fig_to_img(fig, image_format, scale)
1081 html = image_template.substitute(img=img, id=global_id,
1082 div_klass=div_klass,
~/Desktop/projects/github_repos/mne-python/mne/report.py in _fig_to_img(fig, image_format, scale, **kwargs)
74 '`mayavi.core.api.Scene` figure instances'
75 ' will throw an error.' % (e,))
---> 76 if fig.scene is not None:
77 img = mlab.screenshot(figure=fig)
78 else: # Testing mode
AttributeError: 'Line2D' object has no attribute 'scene'
|
AttributeError
|
def _plot_lines(
data,
info,
picks,
fig,
axes,
spatial_colors,
unit,
units,
scalings,
hline,
gfp,
types,
zorder,
xlim,
ylim,
times,
bad_ch_idx,
titles,
ch_types_used,
selectable,
psd,
line_alpha,
):
"""Plot data as butterfly plot."""
from matplotlib import patheffects
from matplotlib.widgets import SpanSelector
assert len(axes) == len(ch_types_used)
texts = list()
idxs = list()
lines = list()
path_effects = [patheffects.withStroke(linewidth=2, foreground="w", alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w", alpha=0.75)]
if selectable:
selectables = np.ones(len(ch_types_used), dtype=bool)
for type_idx, this_type in enumerate(ch_types_used):
idx = picks[types == this_type]
if len(idx) < 2 or (this_type == "grad" and len(idx) < 4):
# prevent unnecessary warnings for e.g. EOG
if this_type in _DATA_CH_TYPES_SPLIT:
logger.info(
"Need more than one channel to make "
"topography for %s. Disabling interactivity." % (this_type,)
)
selectables[type_idx] = False
if selectable:
# Parameters for butterfly interactive plots
params = dict(
axes=axes,
texts=texts,
lines=lines,
ch_names=info["ch_names"],
idxs=idxs,
need_draw=False,
path_effects=path_effects,
)
fig.canvas.mpl_connect("pick_event", partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect(
"button_press_event", partial(_butterfly_on_button_press, params=params)
)
for ax, this_type in zip(axes, ch_types_used):
line_list = list() # 'line_list' contains the lines for this axes
if unit is False:
this_scaling = 1.0
ch_unit = "NA" # no unit
else:
this_scaling = 1.0 if scalings is None else scalings[this_type]
ch_unit = units[this_type]
idx = list(picks[types == this_type])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * data[idx, :]
gfp_only = isinstance(gfp, string_types) and gfp == "only"
if not gfp_only:
chs = [info["chs"][i] for i in idx]
locs3d = np.array([ch["loc"][:3] for ch in chs])
if spatial_colors is True and (locs3d == 0).all():
warn("Channel locations not available. Disabling spatial colors.")
spatial_colors = selectable = False
if spatial_colors is True and len(idx) != 1:
x, y, z = locs3d.T
colors = _rgb(x, y, z)
_handle_spatial_colors(colors, info, idx, this_type, psd, ax)
else:
if isinstance(spatial_colors, (tuple, string_types)):
col = [spatial_colors]
else:
col = ["k"]
colors = col * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = "r"
if zorder == "std":
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == "unsorted":
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = '`zorder` must be a function, "std" or "unsorted", not {0}.'
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(
times,
D[ch_idx],
picker=3.0,
zorder=z + 1 if spatial_colors is True else 1,
color=colors[ch_idx],
alpha=line_alpha,
linewidth=0.5,
)[0]
)
if gfp: # 'only' or boolean True
gfp_color = 3 * (0.0,) if spatial_colors is True else (0.0, 1.0, 0.0)
this_gfp = np.sqrt((D * D).mean(axis=0))
this_ylim = (
ax.get_ylim()
if (ylim is None or this_type not in ylim.keys())
else ylim[this_type]
)
if gfp_only:
y_offset = 0.0
else:
y_offset = this_ylim[0]
this_gfp += y_offset
ax.fill_between(
times,
y_offset,
this_gfp,
color="none",
facecolor=gfp_color,
zorder=1,
alpha=0.2,
)
line_list.append(
ax.plot(
times, this_gfp, color=gfp_color, zorder=3, alpha=line_alpha
)[0]
)
ax.text(
times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
"GFP",
zorder=4,
color=gfp_color,
path_effects=gfp_path_effects,
)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors is True:
line.set_linestyle("--")
ax.set_ylabel(ch_unit)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(
ax.text(
0,
0,
"blank",
zorder=3,
verticalalignment="baseline",
horizontalalignment="left",
fontweight="bold",
alpha=0,
)
)
if xlim is not None:
if xlim == "tight":
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and this_type in ylim:
ax.set_ylim(ylim[this_type])
ax.set_title(titles[this_type] + " (%d channel%s)" % (len(D), _pl(D)))
if hline is not None:
for h in hline:
c = "grey" if spatial_colors is True else "r"
ax.axhline(h, linestyle="--", linewidth=2, color=c)
lines.append(line_list)
if selectable:
import matplotlib.pyplot as plt
for ax in np.array(axes)[selectables]:
if len(ax.lines) == 1:
continue
text = ax.annotate(
"Loading...",
xy=(0.01, 0.1),
xycoords="axes fraction",
fontsize=20,
color="green",
zorder=3,
)
text.set_visible(False)
callback_onselect = partial(
_line_plot_onselect,
ch_types=ch_types_used,
info=info,
data=data,
times=times,
text=text,
psd=psd,
)
blit = False if plt.get_backend() == "MacOSX" else True
minspan = 0 if len(times) < 2 else times[1] - times[0]
ax._span_selector = SpanSelector(
ax,
callback_onselect,
"horizontal",
minspan=minspan,
useblit=blit,
rectprops=dict(alpha=0.5, facecolor="red"),
)
|
def _plot_lines(
data,
info,
picks,
fig,
axes,
spatial_colors,
unit,
units,
scalings,
hline,
gfp,
types,
zorder,
xlim,
ylim,
times,
bad_ch_idx,
titles,
ch_types_used,
selectable,
psd,
line_alpha,
):
"""Plot data as butterfly plot."""
from matplotlib import patheffects
from matplotlib.widgets import SpanSelector
texts = list()
idxs = list()
lines = list()
path_effects = [patheffects.withStroke(linewidth=2, foreground="w", alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w", alpha=0.75)]
if selectable:
selectables = np.ones(len(ch_types_used), dtype=bool)
for type_idx, this_type in enumerate(ch_types_used):
idx = picks[types == this_type]
if len(idx) < 2 or (this_type == "grad" and len(idx) < 4):
# prevent unnecessary warnings for e.g. EOG
if this_type in _DATA_CH_TYPES_SPLIT:
logger.info(
"Need more than one channel to make "
"topography for %s. Disabling interactivity." % (this_type,)
)
selectables[type_idx] = False
if selectable:
# Parameters for butterfly interactive plots
params = dict(
axes=axes,
texts=texts,
lines=lines,
ch_names=info["ch_names"],
idxs=idxs,
need_draw=False,
path_effects=path_effects,
)
fig.canvas.mpl_connect("pick_event", partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect(
"button_press_event", partial(_butterfly_on_button_press, params=params)
)
for ax, this_type in zip(axes, ch_types_used):
line_list = list() # 'line_list' contains the lines for this axes
if unit is False:
this_scaling = 1.0
ch_unit = "NA" # no unit
else:
this_scaling = 1.0 if scalings is None else scalings[this_type]
ch_unit = units[this_type]
idx = list(picks[types == this_type])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * data[idx, :]
gfp_only = isinstance(gfp, string_types) and gfp == "only"
if not gfp_only:
chs = [info["chs"][i] for i in idx]
locs3d = np.array([ch["loc"][:3] for ch in chs])
if spatial_colors is True and (locs3d == 0).all():
warn("Channel locations not available. Disabling spatial colors.")
spatial_colors = selectable = False
if spatial_colors is True and len(idx) != 1:
x, y, z = locs3d.T
colors = _rgb(x, y, z)
_handle_spatial_colors(colors, info, idx, this_type, psd, ax)
else:
if isinstance(spatial_colors, (tuple, string_types)):
col = [spatial_colors]
else:
col = ["k"]
colors = col * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = "r"
if zorder == "std":
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == "unsorted":
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = '`zorder` must be a function, "std" or "unsorted", not {0}.'
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(
times,
D[ch_idx],
picker=3.0,
zorder=z + 1 if spatial_colors is True else 1,
color=colors[ch_idx],
alpha=line_alpha,
linewidth=0.5,
)[0]
)
if gfp: # 'only' or boolean True
gfp_color = 3 * (0.0,) if spatial_colors is True else (0.0, 1.0, 0.0)
this_gfp = np.sqrt((D * D).mean(axis=0))
this_ylim = (
ax.get_ylim()
if (ylim is None or this_type not in ylim.keys())
else ylim[this_type]
)
if gfp_only:
y_offset = 0.0
else:
y_offset = this_ylim[0]
this_gfp += y_offset
ax.fill_between(
times,
y_offset,
this_gfp,
color="none",
facecolor=gfp_color,
zorder=1,
alpha=0.2,
)
line_list.append(
ax.plot(
times, this_gfp, color=gfp_color, zorder=3, alpha=line_alpha
)[0]
)
ax.text(
times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
"GFP",
zorder=4,
color=gfp_color,
path_effects=gfp_path_effects,
)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors is True:
line.set_linestyle("--")
ax.set_ylabel(ch_unit)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(
ax.text(
0,
0,
"blank",
zorder=3,
verticalalignment="baseline",
horizontalalignment="left",
fontweight="bold",
alpha=0,
)
)
if xlim is not None:
if xlim == "tight":
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and this_type in ylim:
ax.set_ylim(ylim[this_type])
ax.set_title(titles[this_type] + " (%d channel%s)" % (len(D), _pl(D)))
if hline is not None:
for h in hline:
c = "grey" if spatial_colors is True else "r"
ax.axhline(h, linestyle="--", linewidth=2, color=c)
lines.append(line_list)
if selectable:
import matplotlib.pyplot as plt
for ax in np.array(axes)[selectables]:
if len(ax.lines) == 1:
continue
text = ax.annotate(
"Loading...",
xy=(0.01, 0.1),
xycoords="axes fraction",
fontsize=20,
color="green",
zorder=3,
)
text.set_visible(False)
callback_onselect = partial(
_line_plot_onselect,
ch_types=ch_types_used,
info=info,
data=data,
times=times,
text=text,
psd=psd,
)
blit = False if plt.get_backend() == "MacOSX" else True
minspan = 0 if len(times) < 2 else times[1] - times[0]
ax._span_selector = SpanSelector(
ax,
callback_onselect,
"horizontal",
minspan=minspan,
useblit=blit,
rectprops=dict(alpha=0.5, facecolor="red"),
)
|
https://github.com/mne-tools/mne-python/issues/5046
|
/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/evoked.py:433: VisibleDeprecationWarning: boolean index did not match indexed array along dimension 0; dimension is 1 but corresponding boolean dimension is 2
for ax in np.array(axes)[selectables]:
Traceback (most recent call last):
File "psd_bug.py", line 10, in <module>
ax = raw.plot_psd(picks=picks, dB=False, show=True, average=False)
File "<string>", line 2, in plot_psd
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/utils.py", line 728, in verbose
return function(*args, **kwargs)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/io/base.py", line 1779, in plot_psd
xscale=xscale, reject_by_annotation=reject_by_annotation)
File "<string>", line 2, in plot_raw_psd
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/utils.py", line 728, in verbose
return function(*args, **kwargs)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/raw.py", line 812, in plot_raw_psd
line_alpha=line_alpha)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/evoked.py", line 433, in _plot_lines
for ax in np.array(axes)[selectables]:
IndexError: index 1 is out of bounds for axis 1 with size 1
|
IndexError
|
def plot_compare_evokeds(
evokeds,
picks=None,
gfp=False,
colors=None,
linestyles=["-"],
styles=None,
cmap=None,
vlines="auto",
ci=0.95,
truncate_yaxis=False,
truncate_xaxis=True,
ylim=dict(),
invert_y=False,
show_sensors=None,
show_legend=True,
split_legend=False,
axes=None,
title=None,
show=True,
):
"""Plot evoked time courses for one or more conditions and/or channels.
Parameters
----------
evokeds : instance of mne.Evoked | list | dict
If a single Evoked instance, it is plotted as a time series.
If a dict whose values are Evoked objects, the contents are plotted as
single time series each and the keys are used as condition labels.
If a list of Evokeds, the contents are plotted with indices as labels.
If a [dict/list] of lists, the unweighted mean is plotted as a time
series and the parametric confidence interval is plotted as a shaded
area. All instances must have the same shape - channel numbers, time
points etc.
If dict, keys must be of type str.
picks : None | int | list of int
If int or list of int, the indices of the sensors to average and plot.
If multiple channel types are selected, one figure will be returned for
each channel type.
If the selected channels are gradiometers, the signal from
corresponding (gradiometer) pairs will be combined.
If None, it defaults to all data channels, in which case the global
field power will be plotted for all channel type available.
gfp : bool
If True, the channel type wise GFP is plotted.
If `picks` is an empty list (default), this is set to True.
colors : list | dict | None
If a list, will be sequentially used for line colors.
If a dict, can map evoked keys or '/'-separated (HED) tags to
conditions.
For example, if `evokeds` is a dict with the keys "Aud/L", "Aud/R",
"Vis/L", "Vis/R", `colors` can be `dict(Aud='r', Vis='b')` to map both
Aud/L and Aud/R to the color red and both Visual conditions to blue.
If None (default), a sequence of desaturated colors is used.
If `cmap` is None, `colors` will indicate how each condition is
colored with reference to its position on the colormap - see `cmap`
below. In that case, the values of colors must be either integers,
in which case they will be mapped to colors in rank order; or floats
between 0 and 1, in which case they will be mapped to percentiles of
the colormap.
linestyles : list | dict
If a list, will be sequentially and repeatedly used for evoked plot
linestyles.
If a dict, can map the `evoked` keys or '/'-separated (HED) tags to
conditions.
For example, if evokeds is a dict with the keys "Aud/L", "Aud/R",
"Vis/L", "Vis/R", `linestyles` can be `dict(L='--', R='-')` to map both
Aud/L and Vis/L to dashed lines and both Right-side conditions to
straight lines.
styles : dict | None
If a dict, keys must map to evoked keys or conditions, and values must
be a dict of legal inputs to `matplotlib.pyplot.plot`. These
parameters will be passed to the line plot call of the corresponding
condition, overriding defaults.
E.g., if evokeds is a dict with the keys "Aud/L", "Aud/R",
"Vis/L", "Vis/R", `styles` can be `{"Aud/L": {"linewidth": 1}}` to set
the linewidth for "Aud/L" to 1. Note that HED ('/'-separated) tags are
not supported.
cmap : None | str | tuple
If not None, plot evoked activity with colors from a color gradient
(indicated by a str referencing a matplotlib colormap - e.g., "viridis"
or "Reds").
If ``evokeds`` is a list and ``colors`` is `None`, the color will
depend on the list position. If ``colors`` is a list, it must contain
integers where the list positions correspond to ``evokeds``, and the
value corresponds to the position on the colorbar.
If ``evokeds`` is a dict, ``colors`` should be a dict mapping from
(potentially HED-style) condition tags to numbers corresponding to
positions on the colorbar - rank order for integers, or floats for
percentiles. E.g., ::
evokeds={"cond1/A": ev1, "cond2/A": ev2, "cond3/A": ev3, "B": ev4},
cmap='viridis', colors=dict(cond1=1 cond2=2, cond3=3),
linestyles={"A": "-", "B": ":"}
If ``cmap`` is a tuple of length 2, the first item must be
a string which will become the colorbar label, and the second one
must indicate a colormap, e.g. ::
cmap=('conds', 'viridis'), colors=dict(cond1=1 cond2=2, cond3=3),
vlines : "auto" | list of float
A list in seconds at which to plot dashed vertical lines.
If "auto" and the supplied data includes 0, it is set to [0.]
and a vertical bar is plotted at time 0.
ci : float | callable | None | bool
If not None and ``evokeds`` is a [list/dict] of lists, a shaded
confidence interval is drawn around the individual time series. If
float, a percentile bootstrap method is used to estimate the confidence
interval and this value determines the CI width. E.g., if this value is
.95 (the default), the 95% confidence interval is drawn. If a callable,
it must take as its single argument an array (observations x times) and
return the upper and lower confidence bands.
If None or False, no confidence band is plotted.
If True, the 95% confidence interval is drawn.
truncate_yaxis : bool | str
If True, the left y axis spine is truncated to reduce visual clutter.
If 'max_ticks', the spine is truncated at the minimum and maximum
ticks. Else, it is truncated to half the max absolute value, rounded to
.25. Defaults to False.
truncate_xaxis : bool
If True, the x axis is truncated to span from the first to the last.
xtick. Defaults to True.
ylim : dict | None
ylim for plots (after scaling has been applied). e.g.
ylim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
invert_y : bool
If True, negative values are plotted up (as is sometimes done
for ERPs out of tradition). Defaults to False.
show_sensors: bool | int | str | None
If not False, channel locations are plotted on a small head circle.
If int or str, the position of the axes (forwarded to
``mpl_toolkits.axes_grid1.inset_locator.inset_axes``).
If None, defaults to True if ``gfp`` is False, else to False.
show_legend : bool | str | int
If not False, show a legend. If int or str, it is the position of the
legend axes (forwarded to
``mpl_toolkits.axes_grid1.inset_locator.inset_axes``).
split_legend : bool
If True, the legend shows color and linestyle separately; `colors` must
not be None. Defaults to True if ``cmap`` is not None, else defaults to
False.
axes : None | `matplotlib.axes.Axes` instance | list of `axes`
What axes to plot to. If None, a new axes is created.
When plotting multiple channel types, can also be a list of axes, one
per channel type.
title : None | str
If str, will be plotted as figure title. If None, the channel names
will be shown.
show : bool
If True, show the figure.
Returns
-------
fig : Figure | list of Figures
The figure(s) in which the plot is drawn. When plotting multiple
channel types, a list of figures, one for each channel type is
returned.
Notes
-----
When multiple channels are passed, this function combines them all, to
get one time course for each condition. If gfp is True it combines
channels using global field power (GFP) computation, else it is taking
a plain mean.
This function is useful for comparing multiple ER[P/F]s - e.g., for
multiple conditions - at a specific location.
It can plot:
- a simple :class:`mne.Evoked` object,
- a list or dict of :class:`mne.Evoked` objects (e.g., for multiple
conditions),
- a list or dict of lists of :class:`mne.Evoked` (e.g., for multiple
subjects in multiple conditions).
In the last case, it can show a confidence interval (across e.g. subjects)
using parametric or bootstrap estimation.
When ``picks`` includes more than one planar gradiometer, the planar
gradiometers are combined with RMSE. For example data from a
VectorView system with 204 gradiometers will be transformed to
102 channels.
"""
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
evokeds, colors = _format_evokeds_colors(evokeds, cmap, colors)
conditions = sorted(list(evokeds.keys()))
# check ci parameter
if ci is None:
ci = False
if ci is True:
ci = 0.95
elif ci is not False and not (isinstance(ci, np.float) or callable(ci)):
raise TypeError("ci must be None, bool, float or callable, got %s" % type(ci))
# get and set a few limits and variables (times, channels, units)
one_evoked = evokeds[conditions[0]][0]
times = one_evoked.times
info = one_evoked.info
tmin, tmax = times[0], times[-1]
if vlines == "auto" and (tmin < 0 and tmax > 0):
vlines = [0.0]
if not isinstance(vlines, (list, tuple)):
raise TypeError("vlines must be a list or tuple, not %s" % type(vlines))
if isinstance(picks, Integral):
picks = [picks]
elif picks is None:
logger.info("No picks, plotting the GFP ...")
gfp = True
picks = _pick_data_channels(info)
if not isinstance(picks, (list, np.ndarray)):
raise TypeError(
"picks should be a list or np.array of integers. Got %s." % type(picks)
)
if len(picks) == 0:
raise ValueError(
"No valid channels were found to plot the GFP. "
+ "Use 'picks' instead to select them manually."
)
if ylim is None:
ylim = dict()
# deal with picks: infer indices and names
if gfp is True:
if show_sensors is None:
show_sensors = False # don't show sensors for GFP
ch_names = ["Global Field Power"]
if len(picks) < 2:
raise ValueError(
"A GFP with less than 2 channels doesn't work, "
"please pick more than %d channels." % len(picks)
)
else:
if show_sensors is None:
show_sensors = True # show sensors when not doing GFP
ch_names = [one_evoked.ch_names[pick] for pick in picks]
picks_by_types = channel_indices_by_type(info, picks)
# keep only channel types for which there is a channel:
ch_types = [t for t in picks_by_types if len(picks_by_types[t]) > 0]
# let's take care of axis and figs
if axes is not None:
if not isinstance(axes, list):
axes = [axes]
_validate_if_list_of_axes(axes, obligatory_len=len(ch_types))
else:
axes = [plt.subplots(figsize=(8, 6))[1] for _ in range(len(ch_types))]
if len(ch_types) > 1:
logger.info("Multiple channel types selected, returning one figure per type.")
figs = list()
for ii, t in enumerate(ch_types):
picks_ = picks_by_types[t]
title_ = "GFP, " + t if (title is None and gfp is True) else title
figs.append(
plot_compare_evokeds(
evokeds,
picks=picks_,
gfp=gfp,
colors=colors,
linestyles=linestyles,
styles=styles,
vlines=vlines,
ci=ci,
truncate_yaxis=truncate_yaxis,
ylim=ylim,
invert_y=invert_y,
axes=axes[ii],
title=title_,
show=show,
)
)
return figs
# From now on there is only 1 channel type
assert len(ch_types) == 1
ch_type = ch_types[0]
all_positive = gfp # True if not gfp, False if gfp
pos_picks = picks # keep locations to pick for plotting
if ch_type == "grad" and len(picks) > 1:
logger.info("Combining all planar gradiometers with RMSE.")
pos_picks, _ = _grad_pair_pick_and_name(one_evoked.info, picks)
pos_picks = pos_picks[::2]
all_positive = True
for cond, this_evokeds in evokeds.items():
evokeds[cond] = [_combine_grad(e, picks) for e in this_evokeds]
ch_names = evokeds[cond][0].ch_names
picks = range(len(ch_names))
del info
ymin, ymax = ylim.get(ch_type, [None, None])
scaling = _handle_default("scalings")[ch_type]
unit = _handle_default("units")[ch_type]
if (ymin is None) and all_positive:
ymin = 0.0 # 'grad' and GFP are plotted as all-positive
# if we have a dict/list of lists, we compute the grand average and the CI
_ci_fun = None
if ci is not False:
if callable(ci):
_ci_fun = ci
else:
from ..stats import _ci
_ci_fun = partial(_ci, ci=ci, method="bootstrap")
# calculate the CI
ci_dict, data_dict = dict(), dict()
for cond in conditions:
this_evokeds = evokeds[cond]
# this will fail if evokeds do not have the same structure
# (e.g. channel count)
data = [e.data[picks, :] * scaling for e in this_evokeds]
data = np.array(data)
if gfp:
data = np.sqrt(np.mean(data * data, axis=1))
else:
data = np.mean(data, axis=1) # average across channels
if _ci_fun is not None: # compute CI if requested:
ci_dict[cond] = _ci_fun(data)
# average across conditions:
data_dict[cond] = np.mean(data, axis=0)
del evokeds
# we now have dicts for data ('evokeds' - grand averaged Evoked's)
# and the CI ('ci_array') with cond name labels
# style the individual condition time series
# Styles (especially color and linestyle) are pulled from a dict 'styles'.
# This dict has one entry per condition. Its color and linestyle entries
# are pulled from the 'colors' and 'linestyles' dicts via '/'-tag matching
# unless they are overwritten by entries from a user-provided 'styles'.
# first, copy to avoid overwriting
styles = deepcopy(styles)
colors = deepcopy(colors)
linestyles = deepcopy(linestyles)
# second, check if input is valid
if isinstance(styles, dict):
for style_ in styles:
if style_ not in conditions:
raise ValueError(
"Could not map between 'styles' and "
"conditions. Condition "
+ style_
+ " was not found in the supplied data."
)
# third, color
# check: is color a list?
if (
colors is not None
and not isinstance(colors, string_types)
and not isinstance(colors, dict)
and len(colors) > 1
):
colors = dict(
(condition, color) for condition, color in zip(conditions, colors)
)
if cmap is not None:
if not isinstance(cmap, string_types) and len(cmap) == 2:
cmap_label, cmap = cmap
else:
cmap_label = ""
# dealing with a split legend
if split_legend is None:
split_legend = cmap is not None # default to True iff cmap is given
if split_legend is True:
if colors is None:
raise ValueError("If `split_legend` is True, `colors` must not be None.")
# mpl 1.3 requires us to split it like this. with recent mpl,
# we could use the label parameter of the Line2D
legend_lines, legend_labels = list(), list()
if cmap is None: # ... one set of lines for the colors
for color in sorted(colors.keys()):
line = mlines.Line2D([], [], linestyle="-", color=colors[color])
legend_lines.append(line)
legend_labels.append(color)
if len(list(linestyles)) > 1: # ... one set for the linestyle
for style, s in linestyles.items():
line = mlines.Line2D([], [], color="k", linestyle=s)
legend_lines.append(line)
legend_labels.append(style)
styles, the_colors, color_conds, color_order, colors_are_float = _setup_styles(
data_dict.keys(), styles, cmap, colors, linestyles
)
# We now have a 'styles' dict with one entry per condition, specifying at
# least color and linestyles.
(ax,) = axes
del axes
# the actual plot
any_negative, any_positive = False, False
for condition in conditions:
# plot the actual data ('d') as a line
d = data_dict[condition].T
ax.plot(
times, d, zorder=1000, label=condition, clip_on=False, **styles[condition]
)
if np.any(d > 0) or all_positive:
any_positive = True
if np.any(d < 0):
any_negative = True
# plot the confidence interval if available
if _ci_fun is not None:
ci_ = ci_dict[condition]
ax.fill_between(
times,
ci_[0].flatten(),
ci_[1].flatten(),
zorder=9,
color=styles[condition]["c"],
alpha=0.3,
clip_on=False,
)
# truncate the y axis
orig_ymin, orig_ymax = ax.get_ylim()
if not any_positive:
orig_ymax = 0
if not any_negative:
orig_ymin = 0
ax.set_ylim(
orig_ymin if ymin is None else ymin, orig_ymax if ymax is None else ymax
)
fraction = 2 if ax.get_ylim()[0] >= 0 else 3
if truncate_yaxis is not False:
_, ymax_bound = _truncate_yaxis(
ax,
ymin,
ymax,
orig_ymin,
orig_ymax,
fraction,
any_positive,
any_negative,
truncate_yaxis,
)
else:
if truncate_yaxis is True and ymin is not None and ymin > 0:
warn("ymin is all-positive, not truncating yaxis")
ymax_bound = ax.get_ylim()[-1]
title = _set_title_multiple_electrodes(
title, "average" if gfp is False else "gfp", ch_names
)
ax.set_title(title)
current_ymin = ax.get_ylim()[0]
# plot v lines
if invert_y is True and current_ymin < 0:
upper_v, lower_v = -ymax_bound, ax.get_ylim()[-1]
else:
upper_v, lower_v = ax.get_ylim()[0], ymax_bound
ax.vlines(
vlines, upper_v, lower_v, linestyles="--", colors="k", linewidth=1.0, zorder=1
)
_setup_ax_spines(ax, vlines, tmin, tmax, invert_y, ymax_bound, unit, truncate_xaxis)
# and now for 3 "legends" ..
# a head plot showing the sensors that are being plotted
if show_sensors:
if show_sensors is True:
ymin, ymax = np.abs(ax.get_ylim())
show_sensors = "lower right" if ymin > ymax else "upper right"
try:
pos = _auto_topomap_coords(
one_evoked.info, pos_picks, ignore_overlap=True, to_sphere=True
)
except ValueError:
warn(
"Cannot find channel coordinates in the supplied Evokeds. "
"Not showing channel locations."
)
else:
head_pos = {"center": (0, 0), "scale": (0.5, 0.5)}
pos, outlines = _check_outlines(pos, np.array([1, 1]), head_pos)
if not isinstance(show_sensors, (np.int, bool, str)):
raise TypeError(
"show_sensors must be numeric, str or bool, "
"not " + str(type(show_sensors))
)
show_sensors = _check_loc_legal(show_sensors, "show_sensors")
_plot_legend(
pos, ["k"] * len(picks), ax, list(), outlines, show_sensors, size=25
)
# the condition legend
if len(conditions) > 1 and show_legend is not False:
show_legend = _check_loc_legal(show_legend, "show_legend")
legend_params = dict(loc=show_legend, frameon=True)
if split_legend:
if len(legend_lines) > 1:
ax.legend(
legend_lines,
legend_labels, # see above: mpl 1.3
ncol=1 + (len(legend_lines) // 4),
**legend_params,
)
else:
ax.legend(ncol=1 + (len(conditions) // 5), **legend_params)
# the colormap, if `cmap` is provided
if split_legend and cmap is not None:
# plot the colorbar ... complicated cause we don't have a heatmap
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
ax_cb = divider.append_axes("right", size="5%", pad=0.05)
if colors_are_float:
ax_cb.imshow(
the_colors[:, np.newaxis, :], interpolation="none", aspect=0.05
)
color_ticks = np.array(list(set(colors.values()))) * 100
ax_cb.set_yticks(color_ticks)
ax_cb.set_yticklabels(color_ticks)
else:
ax_cb.imshow(the_colors[:, np.newaxis, :], interpolation="none")
ax_cb.set_yticks(np.arange(len(the_colors)))
ax_cb.set_yticklabels(np.array(color_conds)[color_order])
ax_cb.yaxis.tick_right()
ax_cb.set(xticks=(), ylabel=cmap_label)
plt_show(show)
return ax.figure
|
def plot_compare_evokeds(
evokeds,
picks=None,
gfp=False,
colors=None,
linestyles=["-"],
styles=None,
cmap=None,
vlines="auto",
ci=0.95,
truncate_yaxis=False,
truncate_xaxis=True,
ylim=dict(),
invert_y=False,
show_sensors=None,
show_legend=True,
split_legend=False,
axes=None,
title=None,
show=True,
):
"""Plot evoked time courses for one or more conditions and/or channels.
Parameters
----------
evokeds : instance of mne.Evoked | list | dict
If a single Evoked instance, it is plotted as a time series.
If a dict whose values are Evoked objects, the contents are plotted as
single time series each and the keys are used as condition labels.
If a list of Evokeds, the contents are plotted with indices as labels.
If a [dict/list] of lists, the unweighted mean is plotted as a time
series and the parametric confidence interval is plotted as a shaded
area. All instances must have the same shape - channel numbers, time
points etc.
If dict, keys must be of type str.
picks : None | int | list of int
If int or list of int, the indices of the sensors to average and plot.
If multiple channel types are selected, one figure will be returned for
each channel type.
If the selected channels are gradiometers, the signal from
corresponding (gradiometer) pairs will be combined.
If None, it defaults to all data channels, in which case the global
field power will be plotted for all channel type available.
gfp : bool
If True, the channel type wise GFP is plotted.
If `picks` is an empty list (default), this is set to True.
colors : list | dict | None
If a list, will be sequentially used for line colors.
If a dict, can map evoked keys or '/'-separated (HED) tags to
conditions.
For example, if `evokeds` is a dict with the keys "Aud/L", "Aud/R",
"Vis/L", "Vis/R", `colors` can be `dict(Aud='r', Vis='b')` to map both
Aud/L and Aud/R to the color red and both Visual conditions to blue.
If None (default), a sequence of desaturated colors is used.
If `cmap` is None, `colors` will indicate how each condition is
colored with reference to its position on the colormap - see `cmap`
below. In that case, the values of colors must be either integers,
in which case they will be mapped to colors in rank order; or floats
between 0 and 1, in which case they will be mapped to percentiles of
the colormap.
linestyles : list | dict
If a list, will be sequentially and repeatedly used for evoked plot
linestyles.
If a dict, can map the `evoked` keys or '/'-separated (HED) tags to
conditions.
For example, if evokeds is a dict with the keys "Aud/L", "Aud/R",
"Vis/L", "Vis/R", `linestyles` can be `dict(L='--', R='-')` to map both
Aud/L and Vis/L to dashed lines and both Right-side conditions to
straight lines.
styles : dict | None
If a dict, keys must map to evoked keys or conditions, and values must
be a dict of legal inputs to `matplotlib.pyplot.plot`. These
parameters will be passed to the line plot call of the corresponding
condition, overriding defaults.
E.g., if evokeds is a dict with the keys "Aud/L", "Aud/R",
"Vis/L", "Vis/R", `styles` can be `{"Aud/L": {"linewidth": 1}}` to set
the linewidth for "Aud/L" to 1. Note that HED ('/'-separated) tags are
not supported.
cmap : None | str | tuple
If not None, plot evoked activity with colors from a color gradient
(indicated by a str referencing a matplotlib colormap - e.g., "viridis"
or "Reds").
If ``evokeds`` is a list and ``colors`` is `None`, the color will
depend on the list position. If ``colors`` is a list, it must contain
integers where the list positions correspond to ``evokeds``, and the
value corresponds to the position on the colorbar.
If ``evokeds`` is a dict, ``colors`` should be a dict mapping from
(potentially HED-style) condition tags to numbers corresponding to
positions on the colorbar - rank order for integers, or floats for
percentiles. E.g., ::
evokeds={"cond1/A": ev1, "cond2/A": ev2, "cond3/A": ev3, "B": ev4},
cmap='viridis', colors=dict(cond1=1 cond2=2, cond3=3),
linestyles={"A": "-", "B": ":"}
If ``cmap`` is a tuple of length 2, the first item must be
a string which will become the colorbar label, and the second one
must indicate a colormap, e.g. ::
cmap=('conds', 'viridis'), colors=dict(cond1=1 cond2=2, cond3=3),
vlines : "auto" | list of float
A list in seconds at which to plot dashed vertical lines.
If "auto" and the supplied data includes 0, it is set to [0.]
and a vertical bar is plotted at time 0.
ci : float | callable | None | bool
If not None and ``evokeds`` is a [list/dict] of lists, a shaded
confidence interval is drawn around the individual time series. If
float, a percentile bootstrap method is used to estimate the confidence
interval and this value determines the CI width. E.g., if this value is
.95 (the default), the 95% confidence interval is drawn. If a callable,
it must take as its single argument an array (observations x times) and
return the upper and lower confidence bands.
If None or False, no confidence band is plotted.
If True, the 95% confidence interval is drawn.
truncate_yaxis : bool | str
If True, the left y axis spine is truncated to reduce visual clutter.
If 'max_ticks', the spine is truncated at the minimum and maximum
ticks. Else, it is truncated to half the max absolute value, rounded to
.25. Defaults to False.
truncate_xaxis : bool
If True, the x axis is truncated to span from the first to the last.
xtick. Defaults to True.
ylim : dict | None
ylim for plots (after scaling has been applied). e.g.
ylim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the ylim parameter
for each channel equals the pyplot default.
invert_y : bool
If True, negative values are plotted up (as is sometimes done
for ERPs out of tradition). Defaults to False.
show_sensors: bool | int | str | None
If not False, channel locations are plotted on a small head circle.
If int or str, the position of the axes (forwarded to
``mpl_toolkits.axes_grid1.inset_locator.inset_axes``).
If None, defaults to True if ``gfp`` is False, else to False.
show_legend : bool | str | int
If not False, show a legend. If int or str, it is the position of the
legend axes (forwarded to
``mpl_toolkits.axes_grid1.inset_locator.inset_axes``).
split_legend : bool
If True, the legend shows color and linestyle separately; `colors` must
not be None. Defaults to True if ``cmap`` is not None, else defaults to
False.
axes : None | `matplotlib.axes.Axes` instance | list of `axes`
What axes to plot to. If None, a new axes is created.
When plotting multiple channel types, can also be a list of axes, one
per channel type.
title : None | str
If str, will be plotted as figure title. If None, the channel names
will be shown.
show : bool
If True, show the figure.
Returns
-------
fig : Figure | list of Figures
The figure(s) in which the plot is drawn. When plotting multiple
channel types, a list of figures, one for each channel type is
returned.
Notes
-----
When multiple channels are passed, this function combines them all, to
get one time course for each condition. If gfp is True it combines
channels using global field power (GFP) computation, else it is taking
a plain mean.
This function is useful for comparing multiple ER[P/F]s - e.g., for
multiple conditions - at a specific location.
It can plot:
- a simple :class:`mne.Evoked` object,
- a list or dict of :class:`mne.Evoked` objects (e.g., for multiple
conditions),
- a list or dict of lists of :class:`mne.Evoked` (e.g., for multiple
subjects in multiple conditions).
In the last case, it can show a confidence interval (across e.g. subjects)
using parametric or bootstrap estimation.
When ``picks`` includes more than one planar gradiometer, the planar
gradiometers are combined with RMSE. For example data from a
VectorView system with 204 gradiometers will be transformed to
102 channels.
"""
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
evokeds, colors = _format_evokeds_colors(evokeds, cmap, colors)
conditions = sorted(list(evokeds.keys()))
# check ci parameter
if ci is None:
ci = False
if ci is True:
ci = 0.95
elif ci is not False and not (isinstance(ci, np.float) or callable(ci)):
raise TypeError("ci must be None, bool, float or callable, got %s" % type(ci))
# get and set a few limits and variables (times, channels, units)
one_evoked = evokeds[conditions[0]][0]
times = one_evoked.times
info = one_evoked.info
tmin, tmax = times[0], times[-1]
if vlines == "auto" and (tmin < 0 and tmax > 0):
vlines = [0.0]
if not isinstance(vlines, (list, tuple)):
raise TypeError("vlines must be a list or tuple, not %s" % type(vlines))
if isinstance(picks, Integral):
picks = [picks]
elif picks is None:
logger.info("No picks, plotting the GFP ...")
gfp = True
picks = _pick_data_channels(info)
if not isinstance(picks, (list, np.ndarray)):
raise TypeError(
"picks should be a list or np.array of integers. Got %s." % type(picks)
)
if len(picks) == 0:
raise ValueError(
"No valid channels were found to plot the GFP. "
+ "Use 'picks' instead to select them manually."
)
if ylim is None:
ylim = dict()
# deal with picks: infer indices and names
if gfp is True:
if show_sensors is None:
show_sensors = False # don't show sensors for GFP
ch_names = ["Global Field Power"]
if len(picks) < 2:
raise ValueError(
"A GFP with less than 2 channels doesn't work, "
"please pick more than %d channels." % len(picks)
)
else:
if show_sensors is None:
show_sensors = True # show sensors when not doing GFP
ch_names = [one_evoked.ch_names[pick] for pick in picks]
picks_by_types = channel_indices_by_type(info, picks)
# keep only channel types for which there is a channel:
ch_types = [t for t in picks_by_types if len(picks_by_types[t]) > 0]
# let's take care of axis and figs
if axes is not None:
if not isinstance(axes, list):
axes = [axes]
_validate_if_list_of_axes(axes, obligatory_len=len(ch_types))
else:
axes = [plt.subplots(figsize=(8, 6))[1] for _ in range(len(ch_types))]
if len(ch_types) > 1:
logger.info("Multiple channel types selected, returning one figure per type.")
figs = list()
for ii, t in enumerate(ch_types):
picks_ = picks_by_types[t]
title_ = "GFP, " + t if (title is None and gfp is True) else title
figs.append(
plot_compare_evokeds(
evokeds,
picks=picks_,
gfp=gfp,
colors=colors,
linestyles=linestyles,
styles=styles,
vlines=vlines,
ci=ci,
truncate_yaxis=truncate_yaxis,
ylim=ylim,
invert_y=invert_y,
axes=axes[ii],
title=title_,
show=show,
)
)
return figs
# From now on there is only 1 channel type
assert len(ch_types) == 1
ch_type = ch_types[0]
all_positive = gfp # True if not gfp, False if gfp
pos_picks = picks # keep locations to pick for plotting
if ch_type == "grad" and len(picks) > 1:
logger.info("Combining all planar gradiometers with RMSE.")
pos_picks, _ = _grad_pair_pick_and_name(one_evoked.info, picks)
pos_picks = pos_picks[::2]
all_positive = True
for cond, this_evokeds in evokeds.items():
evokeds[cond] = [_combine_grad(e, picks) for e in this_evokeds]
ch_names = evokeds[cond][0].ch_names
picks = range(len(ch_names))
del info
ymin, ymax = ylim.get(ch_type, [None, None])
scaling = _handle_default("scalings")[ch_type]
unit = _handle_default("units")[ch_type]
if (ymin is None) and all_positive:
ymin = 0.0 # 'grad' and GFP are plotted as all-positive
# if we have a dict/list of lists, we compute the grand average and the CI
_ci_fun = None
if ci is not False:
if callable(ci):
_ci_fun = ci
else:
from ..stats import _ci
_ci_fun = partial(_ci, ci=ci, method="bootstrap")
# calculate the CI
ci_dict, data_dict = dict(), dict()
for cond in conditions:
this_evokeds = evokeds[cond]
# this will fail if evokeds do not have the same structure
# (e.g. channel count)
data = [e.data[picks, :] * scaling for e in this_evokeds]
data = np.array(data)
if gfp:
data = np.sqrt(np.mean(data * data, axis=1))
else:
data = np.mean(data, axis=1) # average across channels
if _ci_fun is not None: # compute CI if requested:
ci_dict[cond] = _ci_fun(data)
# average across conditions:
data_dict[cond] = np.mean(data, axis=0)
del evokeds
# we now have dicts for data ('evokeds' - grand averaged Evoked's)
# and the CI ('ci_array') with cond name labels
# style the individual condition time series
# Styles (especially color and linestyle) are pulled from a dict 'styles'.
# This dict has one entry per condition. Its color and linestyle entries
# are pulled from the 'colors' and 'linestyles' dicts via '/'-tag matching
# unless they are overwritten by entries from a user-provided 'styles'.
# first, copy to avoid overwriting
styles = deepcopy(styles)
colors = deepcopy(colors)
linestyles = deepcopy(linestyles)
# second, check if input is valid
if isinstance(styles, dict):
for style_ in styles:
if style_ not in conditions:
raise ValueError(
"Could not map between 'styles' and "
"conditions. Condition "
+ style_
+ " was not found in the supplied data."
)
# third, color
# check: is color a list?
if (
colors is not None
and not isinstance(colors, string_types)
and not isinstance(colors, dict)
and len(colors) > 1
):
colors = dict(
(condition, color) for condition, color in zip(conditions, colors)
)
if cmap is not None:
if not isinstance(cmap, string_types) and len(cmap) == 2:
cmap_label, cmap = cmap
else:
cmap_label = ""
# dealing with a split legend
if split_legend is None:
split_legend = cmap is not None # default to True iff cmap is given
if split_legend is True:
if colors is None:
raise ValueError("If `split_legend` is True, `colors` must not be None.")
# mpl 1.3 requires us to split it like this. with recent mpl,
# we could use the label parameter of the Line2D
legend_lines, legend_labels = list(), list()
if cmap is None: # ... one set of lines for the colors
for color in sorted(colors.keys()):
line = mlines.Line2D([], [], linestyle="-", color=colors[color])
legend_lines.append(line)
legend_labels.append(color)
if len(list(linestyles)) > 1: # ... one set for the linestyle
for style, s in linestyles.items():
line = mlines.Line2D([], [], color="k", linestyle=s)
legend_lines.append(line)
legend_labels.append(style)
styles, the_colors, color_conds, color_order, colors_are_float = _setup_styles(
data_dict.keys(), styles, cmap, colors, linestyles
)
# We now have a 'styles' dict with one entry per condition, specifying at
# least color and linestyles.
(ax,) = axes
del axes
# the actual plot
any_negative, any_positive = False, False
for condition in conditions:
# plot the actual data ('d') as a line
d = data_dict[condition].T
ax.plot(
times, d, zorder=1000, label=condition, clip_on=False, **styles[condition]
)
if np.any(d > 0) or all_positive:
any_positive = True
if np.any(d < 0):
any_negative = True
# plot the confidence interval if available
if _ci_fun is not None:
ci_ = ci_dict[condition]
ax.fill_between(
times,
ci_[0].flatten(),
ci_[1].flatten(),
zorder=9,
color=styles[condition]["c"],
alpha=0.3,
clip_on=False,
)
# truncate the y axis
orig_ymin, orig_ymax = ax.get_ylim()
if not any_positive:
orig_ymax = 0
if not any_negative:
orig_ymin = 0
ax.set_ylim(
orig_ymin if ymin is None else ymin, orig_ymax if ymax is None else ymax
)
fraction = 2 if ax.get_ylim()[0] >= 0 else 3
if truncate_yaxis is not False:
_, ymax_bound = _truncate_yaxis(
ax,
ymin,
ymax,
orig_ymin,
orig_ymax,
fraction,
any_positive,
any_negative,
truncate_yaxis,
)
else:
if truncate_yaxis is True and ymin is not None and ymin > 0:
warn("ymin is all-positive, not truncating yaxis")
ymax_bound = ax.get_ylim()[-1]
title = _set_title_multiple_electrodes(
title, "average" if gfp is False else "gfp", ch_names
)
ax.set_title(title)
current_ymin = ax.get_ylim()[0]
# plot v lines
if invert_y is True and current_ymin < 0:
upper_v, lower_v = -ymax_bound, ax.get_ylim()[-1]
else:
upper_v, lower_v = ax.get_ylim()[0], ymax_bound
ax.vlines(
vlines, upper_v, lower_v, linestyles="--", colors="k", linewidth=1.0, zorder=1
)
_setup_ax_spines(ax, vlines, tmin, tmax, invert_y, ymax_bound, unit, truncate_xaxis)
# and now for 3 "legends" ..
# a head plot showing the sensors that are being plotted
if show_sensors:
if show_sensors is True:
ymin, ymax = np.abs(ax.get_ylim())
show_sensors = "lower right" if ymin > ymax else "upper right"
try:
pos = _auto_topomap_coords(
one_evoked.info, pos_picks, ignore_overlap=True, to_sphere=True
)
except ValueError:
warn(
"Cannot find channel coordinates in the supplied Evokeds. "
"Not showing channel locations."
)
else:
head_pos = {"center": (0, 0), "scale": (0.5, 0.5)}
pos, outlines = _check_outlines(pos, np.array([1, 1]), head_pos)
if not isinstance(show_sensors, (np.int, bool, str)):
raise TypeError(
"show_sensors must be numeric, str or bool, "
"not " + str(type(show_sensors))
)
show_sensors = _check_loc_legal(show_sensors, "show_sensors")
_plot_legend(
pos, ["k" for _ in picks], ax, list(), outlines, show_sensors, size=25
)
# the condition legend
if len(conditions) > 1 and show_legend is not False:
show_legend = _check_loc_legal(show_legend, "show_legend")
legend_params = dict(loc=show_legend, frameon=True)
if split_legend:
if len(legend_lines) > 1:
ax.legend(
legend_lines,
legend_labels, # see above: mpl 1.3
ncol=1 + (len(legend_lines) // 4),
**legend_params,
)
else:
ax.legend(ncol=1 + (len(conditions) // 5), **legend_params)
# the colormap, if `cmap` is provided
if split_legend and cmap is not None:
# plot the colorbar ... complicated cause we don't have a heatmap
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
ax_cb = divider.append_axes("right", size="5%", pad=0.05)
if colors_are_float:
ax_cb.imshow(
the_colors[:, np.newaxis, :], interpolation="none", aspect=0.05
)
color_ticks = np.array(list(set(colors.values()))) * 100
ax_cb.set_yticks(color_ticks)
ax_cb.set_yticklabels(color_ticks)
else:
ax_cb.imshow(the_colors[:, np.newaxis, :], interpolation="none")
ax_cb.set_yticks(np.arange(len(the_colors)))
ax_cb.set_yticklabels(np.array(color_conds)[color_order])
ax_cb.yaxis.tick_right()
ax_cb.set(xticks=(), ylabel=cmap_label)
plt_show(show)
return ax.figure
|
https://github.com/mne-tools/mne-python/issues/5046
|
/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/evoked.py:433: VisibleDeprecationWarning: boolean index did not match indexed array along dimension 0; dimension is 1 but corresponding boolean dimension is 2
for ax in np.array(axes)[selectables]:
Traceback (most recent call last):
File "psd_bug.py", line 10, in <module>
ax = raw.plot_psd(picks=picks, dB=False, show=True, average=False)
File "<string>", line 2, in plot_psd
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/utils.py", line 728, in verbose
return function(*args, **kwargs)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/io/base.py", line 1779, in plot_psd
xscale=xscale, reject_by_annotation=reject_by_annotation)
File "<string>", line 2, in plot_raw_psd
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/utils.py", line 728, in verbose
return function(*args, **kwargs)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/raw.py", line 812, in plot_raw_psd
line_alpha=line_alpha)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/evoked.py", line 433, in _plot_lines
for ax in np.array(axes)[selectables]:
IndexError: index 1 is out of bounds for axis 1 with size 1
|
IndexError
|
def _set_psd_plot_params(info, proj, picks, ax, area_mode):
"""Set PSD plot params."""
import matplotlib.pyplot as plt
if area_mode not in [None, "std", "range"]:
raise ValueError('"area_mode" must be "std", "range", or None')
# XXX this could be refactored more with e.g., plot_evoked
megs = ["mag", "grad", False, False, False]
eegs = [False, False, True, False, False]
seegs = [False, False, False, True, False]
ecogs = [False, False, False, False, True]
names = ["mag", "grad", "eeg", "seeg", "ecog"]
titles = _handle_default("titles", None)
units = _handle_default("units", None)
scalings = _handle_default("scalings", None)
picks_list = list()
titles_list = list()
units_list = list()
scalings_list = list()
for meg, eeg, seeg, ecog, name in zip(megs, eegs, seegs, ecogs, names):
these_picks = pick_types(
info, meg=meg, eeg=eeg, seeg=seeg, ecog=ecog, ref_meg=False
)
if picks is not None:
these_picks = np.intersect1d(these_picks, picks)
if len(these_picks) > 0:
picks_list.append(these_picks)
titles_list.append(titles[name])
units_list.append(units[name])
scalings_list.append(scalings[name])
if len(picks_list) == 0:
raise RuntimeError("No data channels found")
if ax is not None:
if isinstance(ax, plt.Axes):
ax = [ax]
if len(ax) != len(picks_list):
raise ValueError(
"For this dataset with picks=None %s axes "
"must be supplied, got %s" % (len(picks_list), len(ax))
)
ax_list = ax
del picks
make_label = False
fig = None
if ax is None:
fig = plt.figure()
ax_list = list()
for ii in range(len(picks_list)):
# Make x-axes change together
if ii > 0:
ax_list.append(
plt.subplot(len(picks_list), 1, ii + 1, sharex=ax_list[0])
)
else:
ax_list.append(plt.subplot(len(picks_list), 1, ii + 1))
make_label = True
else:
fig = ax_list[0].get_figure()
return (
fig,
picks_list,
titles_list,
units_list,
scalings_list,
ax_list,
make_label,
)
|
def _set_psd_plot_params(info, proj, picks, ax, area_mode):
"""Set PSD plot params."""
import matplotlib.pyplot as plt
if area_mode not in [None, "std", "range"]:
raise ValueError('"area_mode" must be "std", "range", or None')
if picks is None:
# XXX this could be refactored more with e.g., plot_evoked
megs = ["mag", "grad", False, False, False]
eegs = [False, False, True, False, False]
seegs = [False, False, False, True, False]
ecogs = [False, False, False, False, True]
names = ["mag", "grad", "eeg", "seeg", "ecog"]
titles = _handle_default("titles", None)
units = _handle_default("units", None)
scalings = _handle_default("scalings", None)
picks_list = list()
titles_list = list()
units_list = list()
scalings_list = list()
for meg, eeg, seeg, ecog, name in zip(megs, eegs, seegs, ecogs, names):
picks = pick_types(
info, meg=meg, eeg=eeg, seeg=seeg, ecog=ecog, ref_meg=False
)
if len(picks) > 0:
picks_list.append(picks)
titles_list.append(titles[name])
units_list.append(units[name])
scalings_list.append(scalings[name])
if len(picks_list) == 0:
raise RuntimeError("No data channels found")
if ax is not None:
if isinstance(ax, plt.Axes):
ax = [ax]
if len(ax) != len(picks_list):
raise ValueError(
"For this dataset with picks=None %s axes "
"must be supplied, got %s" % (len(picks_list), len(ax))
)
ax_list = ax
else:
picks_list = [picks]
titles_list = ["Selected channels"]
units_list = ["amplitude"]
scalings_list = [1.0]
ax_list = [ax]
make_label = False
fig = None
if ax is None:
fig = plt.figure()
ax_list = list()
for ii in range(len(picks_list)):
# Make x-axes change together
if ii > 0:
ax_list.append(
plt.subplot(len(picks_list), 1, ii + 1, sharex=ax_list[0])
)
else:
ax_list.append(plt.subplot(len(picks_list), 1, ii + 1))
make_label = True
else:
fig = ax_list[0].get_figure()
return (
fig,
picks_list,
titles_list,
units_list,
scalings_list,
ax_list,
make_label,
)
|
https://github.com/mne-tools/mne-python/issues/5046
|
/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/evoked.py:433: VisibleDeprecationWarning: boolean index did not match indexed array along dimension 0; dimension is 1 but corresponding boolean dimension is 2
for ax in np.array(axes)[selectables]:
Traceback (most recent call last):
File "psd_bug.py", line 10, in <module>
ax = raw.plot_psd(picks=picks, dB=False, show=True, average=False)
File "<string>", line 2, in plot_psd
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/utils.py", line 728, in verbose
return function(*args, **kwargs)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/io/base.py", line 1779, in plot_psd
xscale=xscale, reject_by_annotation=reject_by_annotation)
File "<string>", line 2, in plot_raw_psd
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/utils.py", line 728, in verbose
return function(*args, **kwargs)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/raw.py", line 812, in plot_raw_psd
line_alpha=line_alpha)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/evoked.py", line 433, in _plot_lines
for ax in np.array(axes)[selectables]:
IndexError: index 1 is out of bounds for axis 1 with size 1
|
IndexError
|
def plot_raw_psd(
raw,
tmin=0.0,
tmax=np.inf,
fmin=0,
fmax=np.inf,
proj=False,
n_fft=None,
picks=None,
ax=None,
color="black",
area_mode="std",
area_alpha=0.33,
n_overlap=0,
dB=True,
estimate="auto",
average=False,
show=True,
n_jobs=1,
line_alpha=None,
spatial_colors=None,
xscale="linear",
reject_by_annotation=True,
verbose=None,
):
"""Plot the power spectral density across channels.
Different channel types are drawn in sub-plots. When the data has been
processed with a bandpass, lowpass or highpass filter, dashed lines
indicate the boundaries of the filter (--). The line noise frequency is
also indicated with a dashed line (-.).
Parameters
----------
raw : instance of io.Raw
The raw instance to use.
tmin : float
Start time for calculations.
tmax : float
End time for calculations.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int | None
Number of points to use in Welch FFT calculations.
Default is None, which uses the minimum of 2048 and the
number of time points.
picks : array-like of int | None
List of channels to use. Cannot be None if `ax` is supplied. If both
`picks` and `ax` are None, separate subplots will be created for
each standard channel type (`mag`, `grad`, and `eeg`).
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use. Has no effect when
spatial_colors=True.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted. If average=False, no area is plotted.
area_alpha : float
Alpha for the area.
n_overlap : int
The number of points of overlap between blocks. The default value
is 0 (no overlap).
dB : bool
Plot Power Spectral Density (PSD), in units (amplitude**2/Hz (dB)) if
``dB=True``, and ``estimate='power'`` or ``estimate='auto'``. Plot PSD
in units (amplitude**2/Hz) if ``dB=False`` and,
``estimate='power'``. Plot Amplitude Spectral Density (ASD), in units
(amplitude/sqrt(Hz)), if ``dB=False`` and ``estimate='amplitude'`` or
``estimate='auto'``. Plot ASD, in units (amplitude/sqrt(Hz) (db)), if
``dB=True`` and ``estimate='amplitude'``.
estimate : str, {'auto', 'power', 'amplitude'}
Can be "power" for power spectral density (PSD), "amplitude" for
amplitude spectrum density (ASD), or "auto" (default), which uses
"power" when dB is True and "amplitude" otherwise.
average : bool
If False (default), the PSDs of all channels is displayed. No averaging
is done and parameters area_mode and area_alpha are ignored. When
False, it is possible to paint an area (hold left mouse button and
drag) to plot a topomap.
show : bool
Show figure if True.
n_jobs : int
Number of jobs to run in parallel.
line_alpha : float | None
Alpha for the PSD line. Can be None (default) to use 1.0 when
``average=True`` and 0.1 when ``average=False``.
spatial_colors : bool
Whether to use spatial colors. Only used when ``average=False``.
xscale : str
Can be 'linear' (default) or 'log'.
reject_by_annotation : bool
Whether to omit bad segments from the data while computing the
PSD. If True, annotated segments with a description that starts
with 'bad' are omitted. Has no effect if ``inst`` is an Epochs or
Evoked object. Defaults to True.
.. versionadded:: 0.15.0
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
fig : instance of matplotlib figure
Figure with frequency spectra of the data channels.
"""
from matplotlib.ticker import ScalarFormatter
if average and spatial_colors:
raise ValueError("Average and spatial_colors cannot be enabled simultaneously.")
if spatial_colors is None:
spatial_colors = False if average else True
fig, picks_list, titles_list, units_list, scalings_list, ax_list, make_label = (
_set_psd_plot_params(raw.info, proj, picks, ax, area_mode)
)
del ax
if line_alpha is None:
line_alpha = 1.0 if average else 0.75
line_alpha = float(line_alpha)
psd_list = list()
ylabels = list()
if n_fft is None:
tmax = raw.times[-1] if not np.isfinite(tmax) else tmax
n_fft = min(np.diff(raw.time_as_index([tmin, tmax]))[0] + 1, 2048)
for ii, picks in enumerate(picks_list):
ax = ax_list[ii]
psds, freqs = psd_welch(
raw,
tmin=tmin,
tmax=tmax,
picks=picks,
fmin=fmin,
fmax=fmax,
proj=proj,
n_fft=n_fft,
n_overlap=n_overlap,
n_jobs=n_jobs,
reject_by_annotation=reject_by_annotation,
)
ylabel = _convert_psds(
psds,
dB,
estimate,
scalings_list[ii],
units_list[ii],
[raw.ch_names[pi] for pi in picks],
)
if average:
psd_mean = np.mean(psds, axis=0)
if area_mode == "std":
psd_std = np.std(psds, axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == "range":
hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color, alpha=line_alpha, linewidth=0.5)
if hyp_limits is not None:
ax.fill_between(
freqs,
hyp_limits[0],
y2=hyp_limits[1],
color=color,
alpha=area_alpha,
)
else:
psd_list.append(psds)
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel("Frequency (Hz)")
ax.set_ylabel(ylabel)
ax.set_title(titles_list[ii])
ax.set_xlim(freqs[0], freqs[-1])
ylabels.append(ylabel)
for key, ls in zip(["lowpass", "highpass", "line_freq"], ["--", "--", "-."]):
if raw.info[key] is not None:
for ax in ax_list:
ax.axvline(
raw.info[key],
color="k",
linestyle=ls,
alpha=0.25,
linewidth=2,
zorder=2,
)
if not average:
picks = np.concatenate(picks_list)
psd_list = np.concatenate(psd_list)
types = np.array([channel_type(raw.info, idx) for idx in picks])
# Needed because the data does not match the info anymore.
info = create_info([raw.ch_names[p] for p in picks], raw.info["sfreq"], types)
info["chs"] = [raw.info["chs"][p] for p in picks]
valid_channel_types = [
"mag",
"grad",
"eeg",
"seeg",
"eog",
"ecg",
"emg",
"dipole",
"gof",
"bio",
"ecog",
"hbo",
"hbr",
"misc",
]
ch_types_used = list()
for this_type in valid_channel_types:
if this_type in types:
ch_types_used.append(this_type)
assert len(ch_types_used) == len(ax_list)
unit = ""
units = {t: yl for t, yl in zip(ch_types_used, ylabels)}
titles = {c: t for c, t in zip(ch_types_used, titles_list)}
picks = np.arange(len(psd_list))
if not spatial_colors:
spatial_colors = color
_plot_lines(
psd_list,
info,
picks,
fig,
ax_list,
spatial_colors,
unit,
units=units,
scalings=None,
hline=None,
gfp=False,
types=types,
zorder="std",
xlim=(freqs[0], freqs[-1]),
ylim=None,
times=freqs,
bad_ch_idx=[],
titles=titles,
ch_types_used=ch_types_used,
selectable=True,
psd=True,
line_alpha=line_alpha,
)
for ax in ax_list:
ax.grid(True, linestyle=":")
if xscale == "log":
ax.set(xscale="log")
ax.set(xlim=[freqs[1] if freqs[0] == 0 else freqs[0], freqs[-1]])
ax.get_xaxis().set_major_formatter(ScalarFormatter())
if make_label:
tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
plt_show(show)
return fig
|
def plot_raw_psd(
raw,
tmin=0.0,
tmax=np.inf,
fmin=0,
fmax=np.inf,
proj=False,
n_fft=None,
picks=None,
ax=None,
color="black",
area_mode="std",
area_alpha=0.33,
n_overlap=0,
dB=True,
estimate="auto",
average=False,
show=True,
n_jobs=1,
line_alpha=None,
spatial_colors=None,
xscale="linear",
reject_by_annotation=True,
verbose=None,
):
"""Plot the power spectral density across channels.
Different channel types are drawn in sub-plots. When the data has been
processed with a bandpass, lowpass or highpass filter, dashed lines
indicate the boundaries of the filter (--). The line noise frequency is
also indicated with a dashed line (-.).
Parameters
----------
raw : instance of io.Raw
The raw instance to use.
tmin : float
Start time for calculations.
tmax : float
End time for calculations.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int | None
Number of points to use in Welch FFT calculations.
Default is None, which uses the minimum of 2048 and the
number of time points.
picks : array-like of int | None
List of channels to use. Cannot be None if `ax` is supplied. If both
`picks` and `ax` are None, separate subplots will be created for
each standard channel type (`mag`, `grad`, and `eeg`).
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use. Has no effect when
spatial_colors=True.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted. If average=False, no area is plotted.
area_alpha : float
Alpha for the area.
n_overlap : int
The number of points of overlap between blocks. The default value
is 0 (no overlap).
dB : bool
Plot Power Spectral Density (PSD), in units (amplitude**2/Hz (dB)) if
``dB=True``, and ``estimate='power'`` or ``estimate='auto'``. Plot PSD
in units (amplitude**2/Hz) if ``dB=False`` and,
``estimate='power'``. Plot Amplitude Spectral Density (ASD), in units
(amplitude/sqrt(Hz)), if ``dB=False`` and ``estimate='amplitude'`` or
``estimate='auto'``. Plot ASD, in units (amplitude/sqrt(Hz) (db)), if
``dB=True`` and ``estimate='amplitude'``.
estimate : str, {'auto', 'power', 'amplitude'}
Can be "power" for power spectral density (PSD), "amplitude" for
amplitude spectrum density (ASD), or "auto" (default), which uses
"power" when dB is True and "amplitude" otherwise.
average : bool
If False (default), the PSDs of all channels is displayed. No averaging
is done and parameters area_mode and area_alpha are ignored. When
False, it is possible to paint an area (hold left mouse button and
drag) to plot a topomap.
show : bool
Show figure if True.
n_jobs : int
Number of jobs to run in parallel.
line_alpha : float | None
Alpha for the PSD line. Can be None (default) to use 1.0 when
``average=True`` and 0.1 when ``average=False``.
spatial_colors : bool
Whether to use spatial colors. Only used when ``average=False``.
xscale : str
Can be 'linear' (default) or 'log'.
reject_by_annotation : bool
Whether to omit bad segments from the data while computing the
PSD. If True, annotated segments with a description that starts
with 'bad' are omitted. Has no effect if ``inst`` is an Epochs or
Evoked object. Defaults to True.
.. versionadded:: 0.15.0
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
fig : instance of matplotlib figure
Figure with frequency spectra of the data channels.
"""
from matplotlib.ticker import ScalarFormatter
if average and spatial_colors:
raise ValueError("Average and spatial_colors cannot be enabled simultaneously.")
if spatial_colors is None:
spatial_colors = False if average else True
fig, picks_list, titles_list, units_list, scalings_list, ax_list, make_label = (
_set_psd_plot_params(raw.info, proj, picks, ax, area_mode)
)
del ax
if line_alpha is None:
line_alpha = 1.0 if average else 0.75
line_alpha = float(line_alpha)
psd_list = list()
ylabels = list()
if n_fft is None:
tmax = raw.times[-1] if not np.isfinite(tmax) else tmax
n_fft = min(np.diff(raw.time_as_index([tmin, tmax]))[0] + 1, 2048)
for ii, picks in enumerate(picks_list):
ax = ax_list[ii]
psds, freqs = psd_welch(
raw,
tmin=tmin,
tmax=tmax,
picks=picks,
fmin=fmin,
fmax=fmax,
proj=proj,
n_fft=n_fft,
n_overlap=n_overlap,
n_jobs=n_jobs,
reject_by_annotation=reject_by_annotation,
)
ylabel = _convert_psds(
psds,
dB,
estimate,
scalings_list[ii],
units_list[ii],
[raw.ch_names[pi] for pi in picks],
)
if average:
psd_mean = np.mean(psds, axis=0)
if area_mode == "std":
psd_std = np.std(psds, axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == "range":
hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color, alpha=line_alpha, linewidth=0.5)
if hyp_limits is not None:
ax.fill_between(
freqs,
hyp_limits[0],
y2=hyp_limits[1],
color=color,
alpha=area_alpha,
)
else:
psd_list.append(psds)
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel("Frequency (Hz)")
ax.set_ylabel(ylabel)
ax.set_title(titles_list[ii])
ax.set_xlim(freqs[0], freqs[-1])
ylabels.append(ylabel)
for key, ls in zip(["lowpass", "highpass", "line_freq"], ["--", "--", "-."]):
if raw.info[key] is not None:
for ax in ax_list:
ax.axvline(
raw.info[key],
color="k",
linestyle=ls,
alpha=0.25,
linewidth=2,
zorder=2,
)
if not average:
picks = np.concatenate(picks_list)
psd_list = np.concatenate(psd_list)
types = np.array([channel_type(raw.info, idx) for idx in picks])
# Needed because the data does not match the info anymore.
info = create_info([raw.ch_names[p] for p in picks], raw.info["sfreq"], types)
info["chs"] = [raw.info["chs"][p] for p in picks]
valid_channel_types = [
"mag",
"grad",
"eeg",
"seeg",
"eog",
"ecg",
"emg",
"dipole",
"gof",
"bio",
"ecog",
"hbo",
"hbr",
"misc",
]
ch_types_used = list()
for this_type in valid_channel_types:
if this_type in types:
ch_types_used.append(this_type)
unit = ""
units = {t: yl for t, yl in zip(ch_types_used, ylabels)}
titles = {c: t for c, t in zip(ch_types_used, titles_list)}
picks = np.arange(len(psd_list))
if not spatial_colors:
spatial_colors = color
_plot_lines(
psd_list,
info,
picks,
fig,
ax_list,
spatial_colors,
unit,
units=units,
scalings=None,
hline=None,
gfp=False,
types=types,
zorder="std",
xlim=(freqs[0], freqs[-1]),
ylim=None,
times=freqs,
bad_ch_idx=[],
titles=titles,
ch_types_used=ch_types_used,
selectable=True,
psd=True,
line_alpha=line_alpha,
)
for ax in ax_list:
ax.grid(True, linestyle=":")
if xscale == "log":
ax.set(xscale="log")
ax.set(xlim=[freqs[1] if freqs[0] == 0 else freqs[0], freqs[-1]])
ax.get_xaxis().set_major_formatter(ScalarFormatter())
if make_label:
tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
plt_show(show)
return fig
|
https://github.com/mne-tools/mne-python/issues/5046
|
/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/evoked.py:433: VisibleDeprecationWarning: boolean index did not match indexed array along dimension 0; dimension is 1 but corresponding boolean dimension is 2
for ax in np.array(axes)[selectables]:
Traceback (most recent call last):
File "psd_bug.py", line 10, in <module>
ax = raw.plot_psd(picks=picks, dB=False, show=True, average=False)
File "<string>", line 2, in plot_psd
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/utils.py", line 728, in verbose
return function(*args, **kwargs)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/io/base.py", line 1779, in plot_psd
xscale=xscale, reject_by_annotation=reject_by_annotation)
File "<string>", line 2, in plot_raw_psd
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/utils.py", line 728, in verbose
return function(*args, **kwargs)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/raw.py", line 812, in plot_raw_psd
line_alpha=line_alpha)
File "/home/erik/miniconda3/envs/mne/lib/python2.7/site-packages/mne/viz/evoked.py", line 433, in _plot_lines
for ax in np.array(axes)[selectables]:
IndexError: index 1 is out of bounds for axis 1 with size 1
|
IndexError
|
def restrict_forward_to_stc(fwd, stc):
"""Restrict forward operator to active sources in a source estimate.
Parameters
----------
fwd : Forward
Forward operator.
stc : SourceEstimate
Source estimate.
Returns
-------
fwd_out : dict
Restricted forward operator.
See Also
--------
restrict_forward_to_label
"""
fwd_out = deepcopy(fwd)
src_sel = _stc_src_sel(fwd["src"], stc)
fwd_out["source_rr"] = fwd["source_rr"][src_sel]
fwd_out["nsource"] = len(src_sel)
if is_fixed_orient(fwd):
idx = src_sel
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
fwd_out["source_nn"] = fwd["source_nn"][idx]
fwd_out["sol"]["data"] = fwd["sol"]["data"][:, idx]
fwd_out["sol"]["ncol"] = len(idx)
for i in range(2):
fwd_out["src"][i]["vertno"] = stc.vertices[i]
fwd_out["src"][i]["nuse"] = len(stc.vertices[i])
fwd_out["src"][i]["inuse"] = fwd["src"][i]["inuse"].copy()
fwd_out["src"][i]["inuse"].fill(0)
fwd_out["src"][i]["inuse"][stc.vertices[i]] = 1
fwd_out["src"][i]["use_tris"] = np.array([[]], int)
fwd_out["src"][i]["nuse_tri"] = np.array([0])
return fwd_out
|
def restrict_forward_to_stc(fwd, stc):
"""Restrict forward operator to active sources in a source estimate.
Parameters
----------
fwd : Forward
Forward operator.
stc : SourceEstimate
Source estimate.
Returns
-------
fwd_out : dict
Restricted forward operator.
See Also
--------
restrict_forward_to_label
"""
fwd_out = deepcopy(fwd)
src_sel = _stc_src_sel(fwd["src"], stc)
fwd_out["source_rr"] = fwd["source_rr"][src_sel]
fwd_out["nsource"] = len(src_sel)
if is_fixed_orient(fwd):
idx = src_sel
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
fwd_out["source_nn"] = fwd["source_nn"][idx]
fwd_out["sol"]["data"] = fwd["sol"]["data"][:, idx]
fwd_out["sol"]["ncol"] = len(idx)
for i in range(2):
fwd_out["src"][i]["vertno"] = stc.vertices[i]
fwd_out["src"][i]["nuse"] = len(stc.vertices[i])
fwd_out["src"][i]["inuse"] = fwd["src"][i]["inuse"].copy()
fwd_out["src"][i]["inuse"].fill(0)
fwd_out["src"][i]["inuse"][stc.vertices[i]] = 1
fwd_out["src"][i]["use_tris"] = np.array([], int)
fwd_out["src"][i]["nuse_tri"] = np.array([0])
return fwd_out
|
https://github.com/mne-tools/mne-python/issues/4459
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-300-d1587ecf7da4> in <module>()
4 stc = mne.read_source_estimate(path + '/MEG/sample/sample_audvis-meg')
5 fwd_restricted = mne.forward.restrict_forward_to_stc(fwd, stc)
----> 6 mne.write_forward_solution('restricted-fwd.fif', fwd_r, overwrite=True)
/data/marijn/mne-python/mne/forward/forward.pyc in write_forward_solution(fname, fwd, overwrite, verbose)
/data/marijn/mne-python/mne/utils.pyc in verbose(function, *args, **kwargs)
724 with use_log_level(verbose_level):
725 return function(*args, **kwargs)
--> 726 return function(*args, **kwargs)
727
728
/data/marijn/mne-python/mne/forward/forward.pyc in write_forward_solution(fname, fwd, overwrite, verbose)
732 # Write the source spaces (again)
733 #
--> 734 _write_source_spaces_to_fid(fid, src)
735 n_vert = sum([ss['nuse'] for ss in src])
736 n_col = fwd['sol']['data'].shape[1]
/data/marijn/mne-python/mne/source_space.pyc in _write_source_spaces_to_fid(fid, src, verbose)
/data/marijn/mne-python/mne/utils.pyc in verbose(function, *args, **kwargs)
724 with use_log_level(verbose_level):
725 return function(*args, **kwargs)
--> 726 return function(*args, **kwargs)
727
728
/data/marijn/mne-python/mne/source_space.pyc in _write_source_spaces_to_fid(fid, src, verbose)
926 logger.info(' Write a source space...')
927 start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
--> 928 _write_one_source_space(fid, s, verbose)
929 end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
930 logger.info(' [done]')
/data/marijn/mne-python/mne/source_space.pyc in _write_one_source_space(fid, this, verbose)
1016 write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
1017 write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
-> 1018 this['use_tris'] + 1)
1019
1020 if this['type'] == 'vol':
/data/marijn/mne-python/mne/io/write.pyc in write_int_matrix(fid, kind, mat)
172
173 dims = np.empty(3, dtype=np.int32)
--> 174 dims[0] = mat.shape[1]
175 dims[1] = mat.shape[0]
176 dims[2] = 2
IndexError: tuple index out of range
|
IndexError
|
def restrict_forward_to_label(fwd, labels):
"""Restrict forward operator to labels.
Parameters
----------
fwd : Forward
Forward operator.
labels : label object | list
Label object or list of label objects.
Returns
-------
fwd_out : dict
Restricted forward operator.
See Also
--------
restrict_forward_to_stc
"""
message = "labels must be instance of Label or a list of Label."
vertices = [np.array([], int), np.array([], int)]
if not isinstance(labels, list):
labels = [labels]
# Get vertices separately of each hemisphere from all label
for label in labels:
if not isinstance(label, Label):
raise TypeError(message + " Instead received %s" % type(label))
i = 0 if label.hemi == "lh" else 1
vertices[i] = np.append(vertices[i], label.vertices)
# Remove duplicates and sort
vertices = [np.unique(vert_hemi) for vert_hemi in vertices]
fwd_out = deepcopy(fwd)
fwd_out["source_rr"] = np.zeros((0, 3))
fwd_out["nsource"] = 0
fwd_out["source_nn"] = np.zeros((0, 3))
fwd_out["sol"]["data"] = np.zeros((fwd["sol"]["data"].shape[0], 0))
fwd_out["sol"]["ncol"] = 0
nuse_lh = fwd["src"][0]["nuse"]
for i in range(2):
fwd_out["src"][i]["vertno"] = np.array([], int)
fwd_out["src"][i]["nuse"] = 0
fwd_out["src"][i]["inuse"] = fwd["src"][i]["inuse"].copy()
fwd_out["src"][i]["inuse"].fill(0)
fwd_out["src"][i]["use_tris"] = np.array([[]], int)
fwd_out["src"][i]["nuse_tri"] = np.array([0])
# src_sel is idx to cols in fwd that are in any label per hemi
src_sel = np.intersect1d(fwd["src"][i]["vertno"], vertices[i])
src_sel = np.searchsorted(fwd["src"][i]["vertno"], src_sel)
# Reconstruct each src
vertno = fwd["src"][i]["vertno"][src_sel]
fwd_out["src"][i]["inuse"][vertno] = 1
fwd_out["src"][i]["nuse"] += len(vertno)
fwd_out["src"][i]["vertno"] = np.where(fwd_out["src"][i]["inuse"])[0]
# Reconstruct part of fwd that is not sol data
src_sel += i * nuse_lh # Add column shift to right hemi
fwd_out["source_rr"] = np.vstack(
[fwd_out["source_rr"], fwd["source_rr"][src_sel]]
)
fwd_out["nsource"] += len(src_sel)
if is_fixed_orient(fwd):
idx = src_sel
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
fwd_out["source_nn"] = np.vstack([fwd_out["source_nn"], fwd["source_nn"][idx]])
fwd_out["sol"]["data"] = np.hstack(
[fwd_out["sol"]["data"], fwd["sol"]["data"][:, idx]]
)
fwd_out["sol"]["ncol"] += len(idx)
return fwd_out
|
def restrict_forward_to_label(fwd, labels):
"""Restrict forward operator to labels.
Parameters
----------
fwd : Forward
Forward operator.
labels : label object | list
Label object or list of label objects.
Returns
-------
fwd_out : dict
Restricted forward operator.
See Also
--------
restrict_forward_to_stc
"""
message = "labels must be instance of Label or a list of Label."
vertices = [np.array([], int), np.array([], int)]
if not isinstance(labels, list):
labels = [labels]
# Get vertices separately of each hemisphere from all label
for label in labels:
if not isinstance(label, Label):
raise TypeError(message + " Instead received %s" % type(label))
i = 0 if label.hemi == "lh" else 1
vertices[i] = np.append(vertices[i], label.vertices)
# Remove duplicates and sort
vertices = [np.unique(vert_hemi) for vert_hemi in vertices]
fwd_out = deepcopy(fwd)
fwd_out["source_rr"] = np.zeros((0, 3))
fwd_out["nsource"] = 0
fwd_out["source_nn"] = np.zeros((0, 3))
fwd_out["sol"]["data"] = np.zeros((fwd["sol"]["data"].shape[0], 0))
fwd_out["sol"]["ncol"] = 0
nuse_lh = fwd["src"][0]["nuse"]
for i in range(2):
fwd_out["src"][i]["vertno"] = np.array([], int)
fwd_out["src"][i]["nuse"] = 0
fwd_out["src"][i]["inuse"] = fwd["src"][i]["inuse"].copy()
fwd_out["src"][i]["inuse"].fill(0)
fwd_out["src"][i]["use_tris"] = np.array([], int)
fwd_out["src"][i]["nuse_tri"] = np.array([0])
# src_sel is idx to cols in fwd that are in any label per hemi
src_sel = np.intersect1d(fwd["src"][i]["vertno"], vertices[i])
src_sel = np.searchsorted(fwd["src"][i]["vertno"], src_sel)
# Reconstruct each src
vertno = fwd["src"][i]["vertno"][src_sel]
fwd_out["src"][i]["inuse"][vertno] = 1
fwd_out["src"][i]["nuse"] += len(vertno)
fwd_out["src"][i]["vertno"] = np.where(fwd_out["src"][i]["inuse"])[0]
# Reconstruct part of fwd that is not sol data
src_sel += i * nuse_lh # Add column shift to right hemi
fwd_out["source_rr"] = np.vstack(
[fwd_out["source_rr"], fwd["source_rr"][src_sel]]
)
fwd_out["nsource"] += len(src_sel)
if is_fixed_orient(fwd):
idx = src_sel
else:
idx = (3 * src_sel[:, None] + np.arange(3)).ravel()
fwd_out["source_nn"] = np.vstack([fwd_out["source_nn"], fwd["source_nn"][idx]])
fwd_out["sol"]["data"] = np.hstack(
[fwd_out["sol"]["data"], fwd["sol"]["data"][:, idx]]
)
fwd_out["sol"]["ncol"] += len(idx)
return fwd_out
|
https://github.com/mne-tools/mne-python/issues/4459
|
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-300-d1587ecf7da4> in <module>()
4 stc = mne.read_source_estimate(path + '/MEG/sample/sample_audvis-meg')
5 fwd_restricted = mne.forward.restrict_forward_to_stc(fwd, stc)
----> 6 mne.write_forward_solution('restricted-fwd.fif', fwd_r, overwrite=True)
/data/marijn/mne-python/mne/forward/forward.pyc in write_forward_solution(fname, fwd, overwrite, verbose)
/data/marijn/mne-python/mne/utils.pyc in verbose(function, *args, **kwargs)
724 with use_log_level(verbose_level):
725 return function(*args, **kwargs)
--> 726 return function(*args, **kwargs)
727
728
/data/marijn/mne-python/mne/forward/forward.pyc in write_forward_solution(fname, fwd, overwrite, verbose)
732 # Write the source spaces (again)
733 #
--> 734 _write_source_spaces_to_fid(fid, src)
735 n_vert = sum([ss['nuse'] for ss in src])
736 n_col = fwd['sol']['data'].shape[1]
/data/marijn/mne-python/mne/source_space.pyc in _write_source_spaces_to_fid(fid, src, verbose)
/data/marijn/mne-python/mne/utils.pyc in verbose(function, *args, **kwargs)
724 with use_log_level(verbose_level):
725 return function(*args, **kwargs)
--> 726 return function(*args, **kwargs)
727
728
/data/marijn/mne-python/mne/source_space.pyc in _write_source_spaces_to_fid(fid, src, verbose)
926 logger.info(' Write a source space...')
927 start_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
--> 928 _write_one_source_space(fid, s, verbose)
929 end_block(fid, FIFF.FIFFB_MNE_SOURCE_SPACE)
930 logger.info(' [done]')
/data/marijn/mne-python/mne/source_space.pyc in _write_one_source_space(fid, this, verbose)
1016 write_int(fid, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE_TRI, this['nuse_tri'])
1017 write_int_matrix(fid, FIFF.FIFF_MNE_SOURCE_SPACE_USE_TRIANGLES,
-> 1018 this['use_tris'] + 1)
1019
1020 if this['type'] == 'vol':
/data/marijn/mne-python/mne/io/write.pyc in write_int_matrix(fid, kind, mat)
172
173 dims = np.empty(3, dtype=np.int32)
--> 174 dims[0] = mat.shape[1]
175 dims[1] = mat.shape[0]
176 dims[2] = 2
IndexError: tuple index out of range
|
IndexError
|
def _plot_update_evoked_topomap(params, bools):
"""Update topomaps."""
projs = [
proj for ii, proj in enumerate(params["projs"]) if ii in np.where(bools)[0]
]
params["proj_bools"] = bools
new_evoked = params["evoked"].copy()
new_evoked.info["projs"] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
data = new_evoked.data[:, params["time_idx"]] * params["scale"]
if params["merge_grads"]:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
image_mask = params["image_mask"]
pos_x, pos_y = np.asarray(params["pos"])[:, :2].T
xi = np.linspace(pos_x.min(), pos_x.max(), params["res"])
yi = np.linspace(pos_y.min(), pos_y.max(), params["res"])
Xi, Yi = np.meshgrid(xi, yi)
for ii, im in enumerate(params["images"]):
Zi = _griddata(pos_x, pos_y, data[:, ii], Xi, Yi)
Zi[~image_mask] = np.nan
im.set_data(Zi)
for cont in params["contours"]:
cont.set_array(np.c_[Xi, Yi, Zi])
params["fig"].canvas.draw()
|
def _plot_update_evoked_topomap(params, bools):
"""Update topomaps."""
projs = [
proj for ii, proj in enumerate(params["projs"]) if ii in np.where(bools)[0]
]
params["proj_bools"] = bools
new_evoked = params["evoked"].copy()
new_evoked.info["projs"] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
data = (
new_evoked.data[np.ix_(params["picks"], params["time_idx"])] * params["scale"]
)
if params["merge_grads"]:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
image_mask = params["image_mask"]
pos_x, pos_y = np.asarray(params["pos"])[:, :2].T
xi = np.linspace(pos_x.min(), pos_x.max(), params["res"])
yi = np.linspace(pos_y.min(), pos_y.max(), params["res"])
Xi, Yi = np.meshgrid(xi, yi)
for ii, im in enumerate(params["images"]):
Zi = _griddata(pos_x, pos_y, data[:, ii], Xi, Yi)
Zi[~image_mask] = np.nan
im.set_data(Zi)
for cont in params["contours"]:
cont.set_array(np.c_[Xi, Yi, Zi])
params["fig"].canvas.draw()
|
https://github.com/mne-tools/mne-python/issues/4054
|
Traceback (most recent call last):
File "/home/mainak/anaconda2/lib/python2.7/site-packages/matplotlib/backends/t
guiEvent=event)
File "/home/mainak/anaconda2/lib/python2.7/site-packages/matplotlib/backend_bt
self.callbacks.process(s, mouseevent)
File "/home/mainak/anaconda2/lib/python2.7/site-packages/matplotlib/cbook.py"s
proxy(*args, **kwargs)
File "/home/mainak/anaconda2/lib/python2.7/site-packages/matplotlib/cbook.py"_
return mtd(*args, **kwargs)
File "/home/mainak/anaconda2/lib/python2.7/site-packages/matplotlib/widgets.pd
self.set_active(i)
File "/home/mainak/anaconda2/lib/python2.7/site-packages/matplotlib/widgets.pe
func(self.labels[index].get_text())
File "mne/viz/utils.py", line 257, in _toggle_proj
params['plot_update_proj_callback'](params, bools)
File "mne/viz/topomap.py", line 103, in _plot_update_evoked_topomap
params['time_idx'])] * params['scale']
IndexError: index 104 is out of bounds for axis 0 with size 102
|
IndexError
|
def _pair_grad_sensors(
info, layout=None, topomap_coords=True, exclude="bads", raise_error=True
):
"""Find the picks for pairing grad channels
Parameters
----------
info : instance of Info
An info dictionary containing channel information.
layout : Layout | None
The layout if available. Defaults to None.
topomap_coords : bool
Return the coordinates for a topomap plot along with the picks. If
False, only picks are returned. Defaults to True.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in info['bads']. Defaults to 'bads'.
raise_error : bool
Whether to raise an error when no pairs are found. If False, raises a
warning.
Returns
-------
picks : array of int
Picks for the grad channels, ordered in pairs.
coords : array, shape = (n_grad_channels, 3)
Coordinates for a topomap plot (optional, only returned if
topomap_coords == True).
"""
# find all complete pairs of grad channels
pairs = defaultdict(list)
grad_picks = pick_types(info, meg="grad", ref_meg=False, exclude=exclude)
for i in grad_picks:
ch = info["chs"][i]
name = ch["ch_name"]
if name.startswith("MEG"):
if name.endswith(("2", "3")):
key = name[-4:-1]
pairs[key].append(ch)
pairs = [p for p in pairs.values() if len(p) == 2]
if len(pairs) == 0:
if raise_error:
raise ValueError("No 'grad' channel pairs found.")
else:
warn("No 'grad' channel pairs found.")
return list()
# find the picks corresponding to the grad channels
grad_chs = sum(pairs, [])
ch_names = info["ch_names"]
picks = [ch_names.index(c["ch_name"]) for c in grad_chs]
if topomap_coords:
shape = (len(pairs), 2, -1)
coords = _find_topomap_coords(info, picks, layout).reshape(shape).mean(axis=1)
return picks, coords
else:
return picks
|
def _pair_grad_sensors(info, layout=None, topomap_coords=True, exclude="bads"):
"""Find the picks for pairing grad channels
Parameters
----------
info : instance of Info
An info dictionary containing channel information.
layout : Layout | None
The layout if available. Defaults to None.
topomap_coords : bool
Return the coordinates for a topomap plot along with the picks. If
False, only picks are returned. Defaults to True.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in info['bads']. Defaults to 'bads'.
Returns
-------
picks : array of int
Picks for the grad channels, ordered in pairs.
coords : array, shape = (n_grad_channels, 3)
Coordinates for a topomap plot (optional, only returned if
topomap_coords == True).
"""
# find all complete pairs of grad channels
pairs = defaultdict(list)
grad_picks = pick_types(info, meg="grad", ref_meg=False, exclude=exclude)
for i in grad_picks:
ch = info["chs"][i]
name = ch["ch_name"]
if name.startswith("MEG"):
if name.endswith(("2", "3")):
key = name[-4:-1]
pairs[key].append(ch)
pairs = [p for p in pairs.values() if len(p) == 2]
if len(pairs) == 0:
raise ValueError("No 'grad' channel pairs found.")
# find the picks corresponding to the grad channels
grad_chs = sum(pairs, [])
ch_names = info["ch_names"]
picks = [ch_names.index(c["ch_name"]) for c in grad_chs]
if topomap_coords:
shape = (len(pairs), 2, -1)
coords = _find_topomap_coords(info, picks, layout).reshape(shape).mean(axis=1)
return picks, coords
else:
return picks
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_patterns(
self,
info,
times=None,
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=True,
scale=None,
scale_time=1e3,
unit="a.u.",
res=64,
size=1,
cbar_fmt="%3.1f",
name_format="%01d ms",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
):
"""
Plot topographic patterns of the linear model.
The patterns explain how the measured data was generated
from the neural sources (a.k.a. the forward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit the linear model.
If not possible, consider using ``create_info``.
times : float | array of floats | None.
The time point(s) to plot. If None, the number of ``axes``
determines the amount of time point(s). If ``axes`` is also None,
10 topographies will be shown with a regular time spacing between
the first and last time instant.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "%03f ms"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if times is None:
tmin = 0
times = "auto"
else:
tmin = times[0]
# create an evoked
patterns = EvokedArray(self.patterns_.reshape(info["nchan"], -1), info, tmin=tmin)
# the call plot_topomap
return patterns.plot_topomap(
times=times,
ch_type=ch_type,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
colorbar=colorbar,
res=res,
cbar_fmt=cbar_fmt,
sensors=sensors,
scale=scale,
scale_time=scale_time,
time_format=name_format,
size=size,
show_names=show_names,
unit=unit,
mask_params=mask_params,
mask=mask,
outlines=outlines,
contours=contours,
title=title,
image_interp=image_interp,
show=show,
head_pos=head_pos,
)
|
def plot_patterns(
self,
info,
times=None,
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=True,
scale=None,
scale_time=1e3,
unit="a.u.",
res=64,
size=1,
cbar_fmt="%3.1f",
name_format="%01d ms",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
):
"""
Plot topographic patterns of the linear model.
The patterns explain how the measured data was generated
from the neural sources (a.k.a. the forward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit the linear model.
If not possible, consider using ``create_info``.
times : float | array of floats | None.
The time point(s) to plot. If None, the number of ``axes``
determines the amount of time point(s). If ``axes`` is also None,
10 topographies will be shown with a regular time spacing between
the first and last time instant.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "%03f ms"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if times is None:
tmin = 0
times = "auto"
else:
tmin = times[0]
# create an evoked
patterns = EvokedArray(self.patterns_.reshape(info["nchan"], -1), info, tmin=tmin)
# the call plot_topomap
return patterns.plot_topomap(
times=times,
ch_type=ch_type,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
colorbar=colorbar,
res=res,
cbar_fmt=cbar_fmt,
sensors=sensors,
scale=scale,
scale_time=scale_time,
time_format=name_format,
size=size,
show_names=show_names,
unit=unit,
mask_params=mask_params,
mask=mask,
outlines=outlines,
contours=contours,
title=title,
image_interp=image_interp,
show=show,
head_pos=head_pos,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_filters(
self,
info,
times=None,
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=True,
scale=None,
scale_time=1e3,
unit="a.u.",
res=64,
size=1,
cbar_fmt="%3.1f",
name_format="%01d ms",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
):
"""
Plot topographic filters of the linear model.
The filters are used to extract discriminant neural sources from
the measured data (a.k.a. the backward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit the linear model.
If not possible, consider using ``create_info``.
times : float | array of floats | None.
The time point(s) to plot. If None, the number of ``axes``
determines the amount of time point(s). If ``axes`` is also None,
10 topographies will be shown with a regular time spacing between
the first and last time instant.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "%03f ms"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if times is None:
tmin = 0
times = "auto"
else:
tmin = times[0]
# create an evoked
filters = EvokedArray(self.filters_.T.reshape(info["nchan"], -1), info, tmin=tmin)
# the call plot_topomap
return filters.plot_topomap(
times=times,
ch_type=ch_type,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
colorbar=colorbar,
res=res,
cbar_fmt=cbar_fmt,
sensors=sensors,
scale=scale,
scale_time=scale_time,
time_format=name_format,
size=size,
show_names=show_names,
unit=unit,
mask_params=mask_params,
mask=mask,
outlines=outlines,
contours=contours,
title=title,
image_interp=image_interp,
show=show,
head_pos=head_pos,
)
|
def plot_filters(
self,
info,
times=None,
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=True,
scale=None,
scale_time=1e3,
unit="a.u.",
res=64,
size=1,
cbar_fmt="%3.1f",
name_format="%01d ms",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
):
"""
Plot topographic filters of the linear model.
The filters are used to extract discriminant neural sources from
the measured data (a.k.a. the backward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit the linear model.
If not possible, consider using ``create_info``.
times : float | array of floats | None.
The time point(s) to plot. If None, the number of ``axes``
determines the amount of time point(s). If ``axes`` is also None,
10 topographies will be shown with a regular time spacing between
the first and last time instant.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "%03f ms"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if times is None:
tmin = 0
times = "auto"
else:
tmin = times[0]
# create an evoked
filters = EvokedArray(self.filters_.T.reshape(info["nchan"], -1), info, tmin=tmin)
# the call plot_topomap
return filters.plot_topomap(
times=times,
ch_type=ch_type,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
colorbar=colorbar,
res=res,
cbar_fmt=cbar_fmt,
sensors=sensors,
scale=scale,
scale_time=scale_time,
time_format=name_format,
size=size,
show_names=show_names,
unit=unit,
mask_params=mask_params,
mask=mask,
outlines=outlines,
contours=contours,
title=title,
image_interp=image_interp,
show=show,
head_pos=head_pos,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_patterns(
self,
info,
components=None,
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=True,
scale=None,
scale_time=1,
unit=None,
res=64,
size=1,
cbar_fmt="%3.1f",
name_format="CSP%01d",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
):
"""Plot topographic patterns of CSP components.
The CSP patterns explain how the measured data was generated
from the neural sources (a.k.a. the forward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info["sfreq"] = 1.0
# create an evoked
patterns = EvokedArray(self.patterns_.T, info, tmin=0)
# the call plot_topomap
return patterns.plot_topomap(
times=components,
ch_type=ch_type,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
colorbar=colorbar,
res=res,
cbar_fmt=cbar_fmt,
sensors=sensors,
scale=1,
scale_time=1,
unit="a.u.",
time_format=name_format,
size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask,
outlines=outlines,
contours=contours,
image_interp=image_interp,
show=show,
head_pos=head_pos,
)
|
def plot_patterns(
self,
info,
components=None,
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=True,
scale=None,
scale_time=1,
unit=None,
res=64,
size=1,
cbar_fmt="%3.1f",
name_format="CSP%01d",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
):
"""Plot topographic patterns of CSP components.
The CSP patterns explain how the measured data was generated
from the neural sources (a.k.a. the forward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info["sfreq"] = 1.0
# create an evoked
patterns = EvokedArray(self.patterns_.T, info, tmin=0)
# the call plot_topomap
return patterns.plot_topomap(
times=components,
ch_type=ch_type,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
colorbar=colorbar,
res=res,
cbar_fmt=cbar_fmt,
sensors=sensors,
scale=1,
scale_time=1,
unit="a.u.",
time_format=name_format,
size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask,
outlines=outlines,
contours=contours,
image_interp=image_interp,
show=show,
head_pos=head_pos,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_filters(
self,
info,
components=None,
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=True,
scale=None,
scale_time=1,
unit=None,
res=64,
size=1,
cbar_fmt="%3.1f",
name_format="CSP%01d",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
):
"""Plot topographic filters of CSP components.
The CSP filters are used to extract discriminant neural sources from
the measured data (a.k.a. the backward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info["sfreq"] = 1.0
# create an evoked
filters = EvokedArray(self.filters_, info, tmin=0)
# the call plot_topomap
return filters.plot_topomap(
times=components,
ch_type=ch_type,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
colorbar=colorbar,
res=res,
cbar_fmt=cbar_fmt,
sensors=sensors,
scale=1,
scale_time=1,
unit="a.u.",
time_format=name_format,
size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask,
outlines=outlines,
contours=contours,
image_interp=image_interp,
show=show,
head_pos=head_pos,
)
|
def plot_filters(
self,
info,
components=None,
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=True,
scale=None,
scale_time=1,
unit=None,
res=64,
size=1,
cbar_fmt="%3.1f",
name_format="CSP%01d",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
):
"""Plot topographic filters of CSP components.
The CSP filters are used to extract discriminant neural sources from
the measured data (a.k.a. the backward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info["sfreq"] = 1.0
# create an evoked
filters = EvokedArray(self.filters_, info, tmin=0)
# the call plot_topomap
return filters.plot_topomap(
times=components,
ch_type=ch_type,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
colorbar=colorbar,
res=res,
cbar_fmt=cbar_fmt,
sensors=sensors,
scale=1,
scale_time=1,
unit="a.u.",
time_format=name_format,
size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask,
outlines=outlines,
contours=contours,
image_interp=image_interp,
show=show,
head_pos=head_pos,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_psd_topomap(
self,
bands=None,
vmin=None,
vmax=None,
proj=False,
bandwidth=None,
adaptive=False,
low_bias=True,
normalization="length",
ch_type=None,
layout=None,
cmap="RdBu_r",
agg_fun=None,
dB=True,
n_jobs=1,
normalize=False,
cbar_fmt="%0.3f",
outlines="head",
show=True,
verbose=None,
):
"""Plot the topomap of the power spectral density across epochs
Parameters
----------
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the
output equals vmax(data). Defaults to None.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz.
The default value is a window half-bandwidth of 4 Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
ch_type : {None, 'mag', 'grad', 'planar1', 'planar2', 'eeg'}
The channel type to plot. For 'grad', the gradiometers are
collected in
pairs and the RMS for each pair is plotted. If None, defaults to
'mag' if MEG data are present and to 'eeg' if only EEG data are
present.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None (default), 'Reds' is used
for all positive data, otherwise defaults to 'RdBu_r'. If
'interactive', translates to (None, True).
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize
is False.
n_jobs : int
Number of jobs to run in parallel.
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_epochs_psd_topomap(
self,
bands=bands,
vmin=vmin,
vmax=vmax,
proj=proj,
bandwidth=bandwidth,
adaptive=adaptive,
low_bias=low_bias,
normalization=normalization,
ch_type=ch_type,
layout=layout,
cmap=cmap,
agg_fun=agg_fun,
dB=dB,
n_jobs=n_jobs,
normalize=normalize,
cbar_fmt=cbar_fmt,
outlines=outlines,
show=show,
verbose=None,
)
|
def plot_psd_topomap(
self,
bands=None,
vmin=None,
vmax=None,
proj=False,
bandwidth=None,
adaptive=False,
low_bias=True,
normalization="length",
ch_type=None,
layout=None,
cmap="RdBu_r",
agg_fun=None,
dB=True,
n_jobs=1,
normalize=False,
cbar_fmt="%0.3f",
outlines="head",
show=True,
verbose=None,
):
"""Plot the topomap of the power spectral density across epochs
Parameters
----------
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the
output equals vmax(data). Defaults to None.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz.
The default value is a window half-bandwidth of 4 Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
ch_type : {None, 'mag', 'grad', 'planar1', 'planar2', 'eeg'}
The channel type to plot. For 'grad', the gradiometers are
collected in
pairs and the RMS for each pair is plotted. If None, defaults to
'mag' if MEG data are present and to 'eeg' if only EEG data are
present.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize
is False.
n_jobs : int
Number of jobs to run in parallel.
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
return plot_epochs_psd_topomap(
self,
bands=bands,
vmin=vmin,
vmax=vmax,
proj=proj,
bandwidth=bandwidth,
adaptive=adaptive,
low_bias=low_bias,
normalization=normalization,
ch_type=ch_type,
layout=layout,
cmap=cmap,
agg_fun=agg_fun,
dB=dB,
n_jobs=n_jobs,
normalize=normalize,
cbar_fmt=cbar_fmt,
outlines=outlines,
show=show,
verbose=None,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_image(
self,
picks=None,
sigma=0.0,
vmin=None,
vmax=None,
colorbar=True,
order=None,
show=True,
units=None,
scalings=None,
cmap="RdBu_r",
fig=None,
overlay_times=None,
):
"""Plot Event Related Potential / Fields image
Parameters
----------
picks : int | array-like of int | None
The indices of the channels to consider. If None, the first
five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is
applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times).
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`.
cmap : matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use
and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the scale. Up and down arrows can
be used to change the colormap. If 'interactive', translates to
('RdBu_r', True). Defaults to 'RdBu_r'.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two
axes for drawing the single trials and evoked responses. If
None a new figure is created. Defaults to None.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in
seconds and is added to the image. It is typically useful to
display reaction times. Note that it is defined with respect
to the order of epochs such that overlay_times[0] corresponds
to epochs[0].
Returns
-------
figs : list of matplotlib figures
One figure per channel displayed.
"""
return plot_epochs_image(
self,
picks=picks,
sigma=sigma,
vmin=vmin,
vmax=vmax,
colorbar=colorbar,
order=order,
show=show,
units=units,
scalings=scalings,
cmap=cmap,
fig=fig,
overlay_times=overlay_times,
)
|
def plot_image(
self,
picks=None,
sigma=0.0,
vmin=None,
vmax=None,
colorbar=True,
order=None,
show=True,
units=None,
scalings=None,
cmap="RdBu_r",
fig=None,
overlay_times=None,
):
"""Plot Event Related Potential / Fields image
Parameters
----------
picks : int | array-like of int | None
The indices of the channels to consider. If None, the first
five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is
applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times).
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`.
cmap : matplotlib colormap
Colormap.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two
axes for drawing the single trials and evoked responses. If
None a new figure is created. Defaults to None.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in
seconds and is added to the image. It is typically useful to
display reaction times. Note that it is defined with respect
to the order of epochs such that overlay_times[0] corresponds
to epochs[0].
Returns
-------
figs : list of matplotlib figures
One figure per channel displayed.
"""
return plot_epochs_image(
self,
picks=picks,
sigma=sigma,
vmin=vmin,
vmax=vmax,
colorbar=colorbar,
order=order,
show=show,
units=units,
scalings=scalings,
cmap=cmap,
fig=fig,
overlay_times=overlay_times,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_image(
self,
picks=None,
exclude="bads",
unit=True,
show=True,
clim=None,
xlim="tight",
proj=False,
units=None,
scalings=None,
titles=None,
axes=None,
cmap="RdBu_r",
):
"""Plot evoked data as images
Parameters
----------
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Call pyplot.show() at the end or not.
clim : dict
clim for images. e.g. clim = dict(eeg=[-200e-6, 200e6])
Valid keys are eeg, mag, grad
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
cmap : matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use
and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the scale. Up and down arrows can
be used to change the colormap. If 'interactive', translates to
('RdBu_r', True). Defaults to 'RdBu_r'.
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the images.
"""
return plot_evoked_image(
self,
picks=picks,
exclude=exclude,
unit=unit,
show=show,
clim=clim,
proj=proj,
xlim=xlim,
units=units,
scalings=scalings,
titles=titles,
axes=axes,
cmap=cmap,
)
|
def plot_image(
self,
picks=None,
exclude="bads",
unit=True,
show=True,
clim=None,
xlim="tight",
proj=False,
units=None,
scalings=None,
titles=None,
axes=None,
cmap="RdBu_r",
):
"""Plot evoked data as images
Parameters
----------
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Call pyplot.show() at the end or not.
clim : dict
clim for images. e.g. clim = dict(eeg=[-200e-6, 200e6])
Valid keys are eeg, mag, grad
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
cmap : matplotlib colormap
Colormap.
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the images.
"""
return plot_evoked_image(
self,
picks=picks,
exclude=exclude,
unit=unit,
show=show,
clim=clim,
proj=proj,
xlim=xlim,
units=units,
scalings=scalings,
titles=titles,
axes=axes,
cmap=cmap,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_topomap(
self,
times="auto",
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap=None,
sensors=True,
colorbar=True,
scale=None,
scale_time=1e3,
unit=None,
res=64,
size=1,
cbar_fmt="%3.1f",
time_format="%01d ms",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
axes=None,
):
"""Plot topographic maps of specific time points
Parameters
----------
times : float | array of floats | "auto" | "peaks".
The time point(s) to plot. If "auto", the number of ``axes``
determines the amount of time point(s). If ``axes`` is also None,
10 topographies will be shown with a regular time spacing between
the first and last time instant. If "peaks", finds time points
automatically by checking for local maxima in Global Field Power.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collec-
ted in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct
layout file is inferred from the data; if no appropriate layout
file was found, the layout is automatically generated from the
sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.max(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None (default), 'Reds' is used
for all positive data, otherwise defaults to 'RdBu_r'. If
'interactive', translates to (None, True).
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3 (ms).
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : scalar
Side length of the topomaps in inches (only applies when plotting
multiple topomaps at a time).
cbar_fmt : str
String format for colorbar values.
time_format : str
String format for topomap values. Defaults to ``"%01d ms"``.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
show : bool
Call pyplot.show() at the end.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals:
``dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)``.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be
drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as ``times`` (unless ``times`` is None). If
instance of Axes, ``times`` must be a float or a list of one float.
Defaults to None.
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
return plot_evoked_topomap(
self,
times=times,
ch_type=ch_type,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
sensors=sensors,
colorbar=colorbar,
scale=scale,
scale_time=scale_time,
unit=unit,
res=res,
proj=proj,
size=size,
cbar_fmt=cbar_fmt,
time_format=time_format,
show=show,
show_names=show_names,
title=title,
mask=mask,
mask_params=mask_params,
outlines=outlines,
contours=contours,
image_interp=image_interp,
average=average,
head_pos=head_pos,
axes=axes,
)
|
def plot_topomap(
self,
times="auto",
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap=None,
sensors=True,
colorbar=True,
scale=None,
scale_time=1e3,
unit=None,
res=64,
size=1,
cbar_fmt="%3.1f",
time_format="%01d ms",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
axes=None,
):
"""Plot topographic maps of specific time points
Parameters
----------
times : float | array of floats | "auto" | "peaks".
The time point(s) to plot. If "auto", the number of ``axes``
determines the amount of time point(s). If ``axes`` is also None,
10 topographies will be shown with a regular time spacing between
the first and last time instant. If "peaks", finds time points
automatically by checking for local maxima in Global Field Power.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collec-
ted in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct
layout file is inferred from the data; if no appropriate layout
file was found, the layout is automatically generated from the
sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.max(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | None
Colormap to use. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3 (ms).
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : scalar
Side length of the topomaps in inches (only applies when plotting
multiple topomaps at a time).
cbar_fmt : str
String format for colorbar values.
time_format : str
String format for topomap values. Defaults to ``"%01d ms"``.
proj : bool | 'interactive'
If true SSP projections are applied before display. If
'interactive', a check box for reversible selection of SSP
projection vectors will be shown.
show : bool
Call pyplot.show() at the end.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals:
``dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)``.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be
drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as ``times`` (unless ``times`` is None). If
instance of Axes, ``times`` must be a float or a list of one float.
Defaults to None.
Returns
-------
fig : instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
return plot_evoked_topomap(
self,
times=times,
ch_type=ch_type,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
sensors=sensors,
colorbar=colorbar,
scale=scale,
scale_time=scale_time,
unit=unit,
res=res,
proj=proj,
size=size,
cbar_fmt=cbar_fmt,
time_format=time_format,
show=show,
show_names=show_names,
title=title,
mask=mask,
mask_params=mask_params,
outlines=outlines,
contours=contours,
image_interp=image_interp,
average=average,
head_pos=head_pos,
axes=axes,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_components(
self,
picks=None,
ch_type=None,
res=64,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=False,
title=None,
show=True,
outlines="head",
contours=6,
image_interp="bilinear",
head_pos=None,
):
"""Project unmixing matrix on interpolated sensor topography.
Parameters
----------
picks : int | array-like | None
The indices of the sources to be plotted.
If None all are plotted in batches of 20.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
res : int
The resolution of the topomap image (n pixels along each side).
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None, 'Reds' is used for all
positive data, otherwise defaults to 'RdBu_r'. If 'interactive',
translates to (None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
title : str | None
Title to use.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will
be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure object.
"""
return plot_ica_components(
self,
picks=picks,
ch_type=ch_type,
res=res,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
sensors=sensors,
colorbar=colorbar,
title=title,
show=show,
outlines=outlines,
contours=contours,
image_interp=image_interp,
head_pos=head_pos,
)
|
def plot_components(
self,
picks=None,
ch_type=None,
res=64,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=False,
title=None,
show=True,
outlines="head",
contours=6,
image_interp="bilinear",
head_pos=None,
):
"""Project unmixing matrix on interpolated sensor topography.
Parameters
----------
picks : int | array-like | None
The indices of the sources to be plotted.
If None all are plotted in batches of 20.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
res : int
The resolution of the topomap image (n pixels along each side).
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
title : str | None
Title to use.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will
be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.pyplot.Figure
The figure object.
"""
return plot_ica_components(
self,
picks=picks,
ch_type=ch_type,
res=res,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
sensors=sensors,
colorbar=colorbar,
title=title,
show=show,
outlines=outlines,
contours=contours,
image_interp=image_interp,
head_pos=head_pos,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot(
self,
picks=None,
baseline=None,
mode="mean",
tmin=None,
tmax=None,
fmin=None,
fmax=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
dB=False,
colorbar=True,
show=True,
title=None,
axes=None,
layout=None,
verbose=None,
):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | 'interactive' | (colormap, bool)
The colormap to use. If tuple, the first value indicates the
colormap to use and the second value is a boolean defining
interactivity. In interactive mode the colors are adjustable by
clicking and dragging the colorbar with left and right mouse
button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range.
Up and down arrows can be used to change the colormap. If
'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small
amount of images.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
info = self.info
data = self.data
n_picks = len(picks)
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = _preproc_tfr(
data,
times,
freqs,
tmin,
tmax,
fmin,
fmax,
mode,
baseline,
vmin,
vmax,
dB,
info["sfreq"],
)
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError("There must be an axes for each picked channel.")
if cmap == "interactive":
cmap = ("RdBu_r", True)
elif not isinstance(cmap, tuple):
cmap = (cmap, True)
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(
self._onselect, baseline=baseline, mode=mode, layout=layout
)
_imshow_tfr(
ax,
0,
tmin,
tmax,
vmin,
vmax,
onselect_callback,
ylim=None,
tfr=data[idx : idx + 1],
freq=freqs,
x_label="Time (ms)",
y_label="Frequency (Hz)",
colorbar=colorbar,
picker=False,
cmap=cmap,
)
if title:
fig.suptitle(title)
# Only draw 1 cbar. For interactive mode we pass the ref to cbar.
colorbar = ax.CB if cmap[1] else False
plt_show(show)
return fig
|
def plot(
self,
picks=None,
baseline=None,
mode="mean",
tmin=None,
tmax=None,
fmin=None,
fmax=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
dB=False,
colorbar=True,
show=True,
title=None,
axes=None,
layout=None,
verbose=None,
):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
layout : Layout | None
Layout instance specifying sensor positions. Used for interactive
plotting of topographies on rectangle selection. If possible, the
correct layout is inferred from the data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
info = self.info
data = self.data
n_picks = len(picks)
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = _preproc_tfr(
data,
times,
freqs,
tmin,
tmax,
fmin,
fmax,
mode,
baseline,
vmin,
vmax,
dB,
info["sfreq"],
)
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) or isinstance(axes, np.ndarray):
if len(axes) != n_picks:
raise RuntimeError("There must be an axes for each picked channel.")
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
onselect_callback = partial(
self._onselect, baseline=baseline, mode=mode, layout=layout
)
_imshow_tfr(
ax,
0,
tmin,
tmax,
vmin,
vmax,
onselect_callback,
ylim=None,
tfr=data[idx : idx + 1],
freq=freqs,
x_label="Time (ms)",
y_label="Frequency (Hz)",
colorbar=colorbar,
picker=False,
cmap=cmap,
)
if title:
fig.suptitle(title)
colorbar = False # only one colorbar for multiple axes
plt_show(show)
return fig
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def _onselect(self, eclick, erelease, baseline, mode, layout):
"""Callback function called by rubber band selector in channel tfr."""
import matplotlib.pyplot as plt
from ..viz import plot_tfr_topomap
if abs(eclick.x - erelease.x) < 0.1 or abs(eclick.y - erelease.y) < 0.1:
return
plt.ion() # turn interactive mode on
tmin = round(min(eclick.xdata, erelease.xdata) / 1000.0, 5) # ms to s
tmax = round(max(eclick.xdata, erelease.xdata) / 1000.0, 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info(
"The selected area is too small. Select a larger time-frequency window."
)
return
types = list()
if "eeg" in self:
types.append("eeg")
if "mag" in self:
types.append("mag")
if "grad" in self:
if (
len(_pair_grad_sensors(self.info, topomap_coords=False, raise_error=False))
>= 2
):
types.append("grad")
elif len(types) == 0:
return # Don't draw a figure for nothing.
fig = figure_nobar()
fig.suptitle(
"{0:.2f} s - {1:.2f} s, {2:.2f} Hz - {3:.2f} Hz".format(tmin, tmax, fmin, fmax),
y=0.04,
)
for idx, ch_type in enumerate(types):
ax = plt.subplot(1, len(types), idx + 1)
plot_tfr_topomap(
self,
ch_type=ch_type,
tmin=tmin,
tmax=tmax,
fmin=fmin,
fmax=fmax,
layout=layout,
baseline=baseline,
mode=mode,
cmap=None,
title=ch_type,
vmin=None,
vmax=None,
axes=ax,
)
|
def _onselect(self, eclick, erelease, baseline, mode, layout):
"""Callback function called by rubber band selector in channel tfr."""
import matplotlib.pyplot as plt
from ..viz import plot_tfr_topomap
if abs(eclick.x - erelease.x) < 0.1 or abs(eclick.y - erelease.y) < 0.1:
return
plt.ion() # turn interactive mode on
tmin = round(min(eclick.xdata, erelease.xdata) / 1000.0, 5) # ms to s
tmax = round(max(eclick.xdata, erelease.xdata) / 1000.0, 5)
fmin = round(min(eclick.ydata, erelease.ydata), 5) # Hz
fmax = round(max(eclick.ydata, erelease.ydata), 5)
tmin = min(self.times, key=lambda x: abs(x - tmin)) # find closest
tmax = min(self.times, key=lambda x: abs(x - tmax))
fmin = min(self.freqs, key=lambda x: abs(x - fmin))
fmax = min(self.freqs, key=lambda x: abs(x - fmax))
if tmin == tmax or fmin == fmax:
logger.info(
"The selected area is too small. Select a larger time-frequency window."
)
return
types = list()
if "eeg" in self:
types.append("eeg")
if "mag" in self:
types.append("mag")
if "grad" in self:
types.append("grad")
fig = figure_nobar()
fig.suptitle(
"{:.2f} s - {:.2f} s, {:.2f} Hz - {:.2f} Hz".format(tmin, tmax, fmin, fmax),
y=0.04,
)
for idx, ch_type in enumerate(types):
ax = plt.subplot(1, len(types), idx + 1)
plot_tfr_topomap(
self,
ch_type=ch_type,
tmin=tmin,
tmax=tmax,
fmin=fmin,
fmax=fmax,
layout=layout,
baseline=baseline,
mode=mode,
cmap=None,
title=ch_type,
vmin=None,
vmax=None,
axes=ax,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_topo(
self,
picks=None,
baseline=None,
mode="mean",
tmin=None,
tmax=None,
fmin=None,
fmax=None,
vmin=None,
vmax=None,
layout=None,
cmap="RdBu_r",
title=None,
dB=False,
colorbar=True,
layout_scale=0.945,
show=True,
border="none",
fig_facecolor="k",
font_color="w",
):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr, _plot_topo, _imshow_tfr_unified
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = _preproc_tfr(
data,
times,
freqs,
tmin,
tmax,
fmin,
fmax,
mode,
baseline,
vmin,
vmax,
dB,
info["sfreq"],
)
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(
self._onselect, baseline=baseline, mode=mode, layout=layout
)
click_fun = partial(
_imshow_tfr, tfr=data, freq=freqs, cmap=(cmap, True), onselect=onselect_callback
)
imshow = partial(
_imshow_tfr_unified, tfr=data, freq=freqs, cmap=cmap, onselect=onselect_callback
)
fig = _plot_topo(
info=info,
times=times,
show_func=imshow,
click_func=click_fun,
layout=layout,
colorbar=colorbar,
vmin=vmin,
vmax=vmax,
cmap=cmap,
layout_scale=layout_scale,
title=title,
border=border,
x_label="Time (ms)",
y_label="Frequency (Hz)",
fig_facecolor=fig_facecolor,
font_color=font_color,
unified=True,
img=True,
)
plt_show(show)
return fig
|
def plot_topo(
self,
picks=None,
baseline=None,
mode="mean",
tmin=None,
tmax=None,
fmin=None,
fmax=None,
vmin=None,
vmax=None,
layout=None,
cmap="RdBu_r",
title=None,
dB=False,
colorbar=True,
layout_scale=0.945,
show=True,
border="none",
fig_facecolor="k",
font_color="w",
):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr, _plot_topo, _imshow_tfr_unified
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
info, data, picks = _prepare_picks(info, data, picks)
data = data[picks]
data, times, freqs, vmin, vmax = _preproc_tfr(
data,
times,
freqs,
tmin,
tmax,
fmin,
fmax,
mode,
baseline,
vmin,
vmax,
dB,
info["sfreq"],
)
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
onselect_callback = partial(
self._onselect, baseline=baseline, mode=mode, layout=layout
)
click_fun = partial(
_imshow_tfr, tfr=data, freq=freqs, cmap=cmap, onselect=onselect_callback
)
imshow = partial(
_imshow_tfr_unified, tfr=data, freq=freqs, cmap=cmap, onselect=onselect_callback
)
fig = _plot_topo(
info=info,
times=times,
show_func=imshow,
click_func=click_fun,
layout=layout,
colorbar=colorbar,
vmin=vmin,
vmax=vmax,
cmap=cmap,
layout_scale=layout_scale,
title=title,
border=border,
x_label="Time (ms)",
y_label="Frequency (Hz)",
fig_facecolor=fig_facecolor,
font_color=font_color,
unified=True,
img=True,
)
plt_show(show)
return fig
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_topomap(
self,
tmin=None,
tmax=None,
fmin=None,
fmax=None,
ch_type=None,
baseline=None,
mode="mean",
layout=None,
vmin=None,
vmax=None,
cmap=None,
sensors=True,
colorbar=True,
unit=None,
res=64,
size=2,
cbar_fmt="%1.1e",
show_names=False,
title=None,
axes=None,
show=True,
outlines="head",
head_pos=None,
):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap
to use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging
the colorbar with left and right mouse button. Left mouse button
moves the scale up and down and right mouse button adjusts the
range. Hitting space bar resets the range. Up and down arrows can
be used to change the colormap. If None (default), 'Reds' is used
for all positive data, otherwise defaults to 'RdBu_r'. If
'interactive', translates to (None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(
self,
tmin=tmin,
tmax=tmax,
fmin=fmin,
fmax=fmax,
ch_type=ch_type,
baseline=baseline,
mode=mode,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
sensors=sensors,
colorbar=colorbar,
unit=unit,
res=res,
size=size,
cbar_fmt=cbar_fmt,
show_names=show_names,
title=title,
axes=axes,
show=show,
outlines=outlines,
head_pos=head_pos,
)
|
def plot_topomap(
self,
tmin=None,
tmax=None,
fmin=None,
fmax=None,
ch_type=None,
baseline=None,
mode="mean",
layout=None,
vmin=None,
vmax=None,
cmap=None,
sensors=True,
colorbar=True,
unit=None,
res=64,
size=2,
cbar_fmt="%1.1e",
show_names=False,
title=None,
axes=None,
show=True,
outlines="head",
head_pos=None,
):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then first available channel type from order given
above is used. Defaults to None.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range. If None,
and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output
equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None,
the maximum value is used. If callable, the output equals
vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap. If None and the plotted data is all positive, defaults to
'Reds'. If None and data contains also negative values, defaults to
'RdBu_r'. Defaults to None.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(
self,
tmin=tmin,
tmax=tmax,
fmin=fmin,
fmax=fmax,
ch_type=ch_type,
baseline=baseline,
mode=mode,
layout=layout,
vmin=vmin,
vmax=vmax,
cmap=cmap,
sensors=sensors,
colorbar=colorbar,
unit=unit,
res=res,
size=size,
cbar_fmt=cbar_fmt,
show_names=show_names,
title=title,
axes=axes,
show=show,
outlines=outlines,
head_pos=head_pos,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_epochs_image(
epochs,
picks=None,
sigma=0.0,
vmin=None,
vmax=None,
colorbar=True,
order=None,
show=True,
units=None,
scalings=None,
cmap="RdBu_r",
fig=None,
axes=None,
overlay_times=None,
):
"""Plot Event Related Potential / Fields image
Parameters
----------
epochs : instance of Epochs
The epochs.
picks : int | array-like of int | None
The indices of the channels to consider. If None, the first
five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times).
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`.
cmap : matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two axes for
drawing the single trials and evoked responses. If None a new figure is
created. Defaults to None.
axes : list of matplotlib axes | None
List of axes instances to draw the image, erp and colorbar to.
Must be of length three if colorbar is True (with the last list element
being the colorbar axes) or two if colorbar is False. If both fig and
axes are passed an error is raised. Defaults to None.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in seconds
and is added to the image. It is typically useful to display reaction
times. Note that it is defined with respect to the order
of epochs such that overlay_times[0] corresponds to epochs[0].
Returns
-------
figs : lists of matplotlib figures
One figure per channel displayed.
"""
from scipy import ndimage
units = _handle_default("units", units)
scalings = _handle_default("scalings", scalings)
import matplotlib.pyplot as plt
if picks is None:
picks = pick_types(
epochs.info, meg=True, eeg=True, ref_meg=False, exclude="bads"
)[:5]
if set(units.keys()) != set(scalings.keys()):
raise ValueError("Scalings and units must have the same keys.")
picks = np.atleast_1d(picks)
if (fig is not None or axes is not None) and len(picks) > 1:
raise ValueError("Only single pick can be drawn to a figure.")
if axes is not None:
if fig is not None:
raise ValueError(
"Both figure and axes were passed, pleasedecide between the two."
)
from .utils import _validate_if_list_of_axes
oblig_len = 3 if colorbar else 2
_validate_if_list_of_axes(axes, obligatory_len=oblig_len)
ax1, ax2 = axes[:2]
# if axes were passed - we ignore fig param and get figure from axes
fig = ax1.get_figure()
if colorbar:
ax3 = axes[-1]
evoked = epochs.average(picks)
data = epochs.get_data()[:, picks, :]
scale_vmin = True if vmin is None else False
scale_vmax = True if vmax is None else False
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
if overlay_times is not None and len(overlay_times) != len(data):
raise ValueError(
"size of overlay_times parameter (%s) do not "
"match the number of epochs (%s)." % (len(overlay_times), len(data))
)
if overlay_times is not None:
overlay_times = np.array(overlay_times)
times_min = np.min(overlay_times)
times_max = np.max(overlay_times)
if (times_min < epochs.tmin) or (times_max > epochs.tmax):
warn(
"Some values in overlay_times fall outside of the epochs "
"time interval (between %s s and %s s)" % (epochs.tmin, epochs.tmax)
)
figs = list()
for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
if fig is None:
this_fig = plt.figure()
else:
this_fig = fig
figs.append(this_fig)
ch_type = channel_type(epochs.info, idx)
if ch_type not in scalings:
# We know it's not in either scalings or units since keys match
raise KeyError("%s type not in scalings and units" % ch_type)
this_data *= scalings[ch_type]
this_order = order
if callable(order):
this_order = order(epochs.times, this_data)
if this_order is not None and (len(this_order) != len(this_data)):
raise ValueError(
"size of order parameter (%s) does not "
"match the number of epochs (%s)." % (len(this_order), len(this_data))
)
this_overlay_times = None
if overlay_times is not None:
this_overlay_times = overlay_times
if this_order is not None:
this_order = np.asarray(this_order)
this_data = this_data[this_order]
if this_overlay_times is not None:
this_overlay_times = this_overlay_times[this_order]
if sigma > 0.0:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
plt.figure(this_fig.number)
if axes is None:
ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
if colorbar:
ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
this_vmin = vmin * scalings[ch_type] if scale_vmin else vmin
this_vmax = vmax * scalings[ch_type] if scale_vmax else vmax
if cmap == "interactive":
cmap = ("RdBu_r", True)
elif not isinstance(cmap, tuple):
cmap = (cmap, True)
im = ax1.imshow(
this_data,
extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1], 0, len(data)],
aspect="auto",
origin="lower",
interpolation="nearest",
vmin=this_vmin,
vmax=this_vmax,
cmap=cmap[0],
)
if this_overlay_times is not None:
plt.plot(
1e3 * this_overlay_times,
0.5 + np.arange(len(this_data)),
"k",
linewidth=2,
)
ax1.set_title(epochs.ch_names[idx])
ax1.set_ylabel("Epochs")
ax1.axis("auto")
ax1.axis("tight")
ax1.axvline(0, color="m", linewidth=3, linestyle="--")
evoked_data = scalings[ch_type] * evoked.data[i]
ax2.plot(1e3 * evoked.times, evoked_data)
ax2.set_xlabel("Time (ms)")
ax2.set_xlim([1e3 * evoked.times[0], 1e3 * evoked.times[-1]])
ax2.set_ylabel(units[ch_type])
evoked_vmin = min(evoked_data) * 1.1 if scale_vmin else vmin
evoked_vmax = max(evoked_data) * 1.1 if scale_vmax else vmax
if scale_vmin or scale_vmax:
evoked_vmax = max(np.abs([evoked_vmax, evoked_vmin]))
evoked_vmin = -evoked_vmax
ax2.set_ylim([evoked_vmin, evoked_vmax])
ax2.axvline(0, color="m", linewidth=3, linestyle="--")
if colorbar:
cbar = plt.colorbar(im, cax=ax3)
if cmap[1]:
ax1.CB = DraggableColorbar(cbar, im)
tight_layout(fig=this_fig)
plt_show(show)
return figs
|
def plot_epochs_image(
epochs,
picks=None,
sigma=0.0,
vmin=None,
vmax=None,
colorbar=True,
order=None,
show=True,
units=None,
scalings=None,
cmap="RdBu_r",
fig=None,
axes=None,
overlay_times=None,
):
"""Plot Event Related Potential / Fields image
Parameters
----------
epochs : instance of Epochs
The epochs.
picks : int | array-like of int | None
The indices of the channels to consider. If None, the first
five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times).
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`.
cmap : matplotlib colormap
Colormap.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two axes for
drawing the single trials and evoked responses. If None a new figure is
created. Defaults to None.
axes : list of matplotlib axes | None
List of axes instances to draw the image, erp and colorbar to.
Must be of length three if colorbar is True (with the last list element
being the colorbar axes) or two if colorbar is False. If both fig and
axes are passed an error is raised. Defaults to None.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in seconds
and is added to the image. It is typically useful to display reaction
times. Note that it is defined with respect to the order
of epochs such that overlay_times[0] corresponds to epochs[0].
Returns
-------
figs : lists of matplotlib figures
One figure per channel displayed.
"""
from scipy import ndimage
units = _handle_default("units", units)
scalings = _handle_default("scalings", scalings)
import matplotlib.pyplot as plt
if picks is None:
picks = pick_types(
epochs.info, meg=True, eeg=True, ref_meg=False, exclude="bads"
)[:5]
if set(units.keys()) != set(scalings.keys()):
raise ValueError("Scalings and units must have the same keys.")
picks = np.atleast_1d(picks)
if (fig is not None or axes is not None) and len(picks) > 1:
raise ValueError("Only single pick can be drawn to a figure.")
if axes is not None:
if fig is not None:
raise ValueError(
"Both figure and axes were passed, pleasedecide between the two."
)
from .utils import _validate_if_list_of_axes
oblig_len = 3 if colorbar else 2
_validate_if_list_of_axes(axes, obligatory_len=oblig_len)
ax1, ax2 = axes[:2]
# if axes were passed - we ignore fig param and get figure from axes
fig = ax1.get_figure()
if colorbar:
ax3 = axes[-1]
evoked = epochs.average(picks)
data = epochs.get_data()[:, picks, :]
scale_vmin = True if vmin is None else False
scale_vmax = True if vmax is None else False
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
if overlay_times is not None and len(overlay_times) != len(data):
raise ValueError(
"size of overlay_times parameter (%s) do not "
"match the number of epochs (%s)." % (len(overlay_times), len(data))
)
if overlay_times is not None:
overlay_times = np.array(overlay_times)
times_min = np.min(overlay_times)
times_max = np.max(overlay_times)
if (times_min < epochs.tmin) or (times_max > epochs.tmax):
warn(
"Some values in overlay_times fall outside of the epochs "
"time interval (between %s s and %s s)" % (epochs.tmin, epochs.tmax)
)
figs = list()
for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
if fig is None:
this_fig = plt.figure()
else:
this_fig = fig
figs.append(this_fig)
ch_type = channel_type(epochs.info, idx)
if ch_type not in scalings:
# We know it's not in either scalings or units since keys match
raise KeyError("%s type not in scalings and units" % ch_type)
this_data *= scalings[ch_type]
this_order = order
if callable(order):
this_order = order(epochs.times, this_data)
if this_order is not None and (len(this_order) != len(this_data)):
raise ValueError(
"size of order parameter (%s) does not "
"match the number of epochs (%s)." % (len(this_order), len(this_data))
)
this_overlay_times = None
if overlay_times is not None:
this_overlay_times = overlay_times
if this_order is not None:
this_order = np.asarray(this_order)
this_data = this_data[this_order]
if this_overlay_times is not None:
this_overlay_times = this_overlay_times[this_order]
if sigma > 0.0:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
plt.figure(this_fig.number)
if axes is None:
ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
if colorbar:
ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
if scale_vmin:
vmin *= scalings[ch_type]
if scale_vmax:
vmax *= scalings[ch_type]
im = ax1.imshow(
this_data,
extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1], 0, len(data)],
aspect="auto",
origin="lower",
interpolation="nearest",
vmin=vmin,
vmax=vmax,
cmap=cmap,
)
if this_overlay_times is not None:
plt.plot(
1e3 * this_overlay_times,
0.5 + np.arange(len(this_data)),
"k",
linewidth=2,
)
ax1.set_title(epochs.ch_names[idx])
ax1.set_ylabel("Epochs")
ax1.axis("auto")
ax1.axis("tight")
ax1.axvline(0, color="m", linewidth=3, linestyle="--")
evoked_data = scalings[ch_type] * evoked.data[i]
ax2.plot(1e3 * evoked.times, evoked_data)
ax2.set_xlabel("Time (ms)")
ax2.set_xlim([1e3 * evoked.times[0], 1e3 * evoked.times[-1]])
ax2.set_ylabel(units[ch_type])
evoked_vmin = min(evoked_data) * 1.1 if scale_vmin else vmin
evoked_vmax = max(evoked_data) * 1.1 if scale_vmax else vmax
if scale_vmin or scale_vmax:
evoked_vmax = max(np.abs([evoked_vmax, evoked_vmin]))
evoked_vmin = -evoked_vmax
ax2.set_ylim([evoked_vmin, evoked_vmax])
ax2.axvline(0, color="m", linewidth=3, linestyle="--")
if colorbar:
plt.colorbar(im, cax=ax3)
tight_layout(fig=this_fig)
plt_show(show)
return figs
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def _butterfly_onselect(xmin, xmax, ch_types, evoked, text=None):
"""Function for drawing topomaps from the selected area."""
import matplotlib.pyplot as plt
ch_types = [type for type in ch_types if type in ("eeg", "grad", "mag")]
if (
"grad" in ch_types
and len(
_pair_grad_sensors(evoked.info, topomap_coords=False, raise_error=False)
)
< 2
):
ch_types.remove("grad")
if len(ch_types) == 0:
return
vert_lines = list()
if text is not None:
text.set_visible(True)
ax = text.axes
ylim = ax.get_ylim()
vert_lines.append(ax.plot([xmin, xmin], ylim, zorder=0, color="red"))
vert_lines.append(ax.plot([xmax, xmax], ylim, zorder=0, color="red"))
fill = ax.fill_betweenx(ylim, x1=xmin, x2=xmax, alpha=0.2, color="green")
evoked_fig = plt.gcf()
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
times = evoked.times
xmin *= 0.001
minidx = np.abs(times - xmin).argmin()
xmax *= 0.001
maxidx = np.abs(times - xmax).argmin()
fig, axarr = plt.subplots(
1, len(ch_types), squeeze=False, figsize=(3 * len(ch_types), 3)
)
for idx, ch_type in enumerate(ch_types):
picks, pos, merge_grads, _, ch_type = _prepare_topo_plot(
evoked, ch_type, layout=None
)
data = evoked.data[picks, minidx:maxidx]
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
title = "%s RMS" % ch_type
else:
title = ch_type
data = np.average(data, axis=1)
axarr[0][idx].set_title(title)
plot_topomap(data, pos, axes=axarr[0][idx], show=False)
fig.suptitle("Average over %.2fs - %.2fs" % (xmin, xmax), fontsize=15, y=0.1)
tight_layout(pad=2.0, fig=fig)
plt_show()
if text is not None:
text.set_visible(False)
close_callback = partial(_topo_closed, ax=ax, lines=vert_lines, fill=fill)
fig.canvas.mpl_connect("close_event", close_callback)
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
|
def _butterfly_onselect(xmin, xmax, ch_types, evoked, text=None):
"""Function for drawing topomaps from the selected area."""
import matplotlib.pyplot as plt
ch_types = [type for type in ch_types if type in ("eeg", "grad", "mag")]
vert_lines = list()
if text is not None:
text.set_visible(True)
ax = text.axes
ylim = ax.get_ylim()
vert_lines.append(ax.plot([xmin, xmin], ylim, zorder=0, color="red"))
vert_lines.append(ax.plot([xmax, xmax], ylim, zorder=0, color="red"))
fill = ax.fill_betweenx(ylim, x1=xmin, x2=xmax, alpha=0.2, color="green")
evoked_fig = plt.gcf()
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
times = evoked.times
xmin *= 0.001
minidx = np.abs(times - xmin).argmin()
xmax *= 0.001
maxidx = np.abs(times - xmax).argmin()
fig, axarr = plt.subplots(
1, len(ch_types), squeeze=False, figsize=(3 * len(ch_types), 3)
)
for idx, ch_type in enumerate(ch_types):
picks, pos, merge_grads, _, ch_type = _prepare_topo_plot(
evoked, ch_type, layout=None
)
data = evoked.data[picks, minidx:maxidx]
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
title = "%s RMS" % ch_type
else:
title = ch_type
data = np.average(data, axis=1)
axarr[0][idx].set_title(title)
plot_topomap(data, pos, axes=axarr[0][idx], show=False)
fig.suptitle("Average over %.2fs - %.2fs" % (xmin, xmax), fontsize=15, y=0.1)
tight_layout(pad=2.0, fig=fig)
plt_show()
if text is not None:
text.set_visible(False)
close_callback = partial(_topo_closed, ax=ax, lines=vert_lines, fill=fill)
fig.canvas.mpl_connect("close_event", close_callback)
evoked_fig.canvas.draw()
evoked_fig.canvas.flush_events()
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def _plot_evoked(
evoked,
picks,
exclude,
unit,
show,
ylim,
proj,
xlim,
hline,
units,
scalings,
titles,
axes,
plot_type,
cmap=None,
gfp=False,
window_title=None,
spatial_colors=False,
set_tight_layout=True,
selectable=True,
zorder="unsorted",
):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings)
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude of each channel at a given time point
(x axis: time, y axis: channel). In 'image' mode, the plot is not
interactive.
"""
import matplotlib.pyplot as plt
from matplotlib import patheffects
from matplotlib.widgets import SpanSelector
info = evoked.info
if axes is not None and proj == "interactive":
raise RuntimeError(
"Currently only single axis figures are supported"
" for interactive SSP selection."
)
if isinstance(gfp, string_types) and gfp != "only":
raise ValueError('gfp must be boolean or "only". Got %s' % gfp)
if cmap == "interactive":
cmap = (None, True)
elif not isinstance(cmap, tuple):
cmap = (cmap, True)
scalings = _handle_default("scalings", scalings)
titles = _handle_default("titles", titles)
units = _handle_default("units", units)
# Valid data types ordered for consistency
valid_channel_types = [
"eeg",
"grad",
"mag",
"seeg",
"eog",
"ecg",
"emg",
"dipole",
"gof",
"bio",
"ecog",
]
if picks is None:
picks = list(range(info["nchan"]))
bad_ch_idx = [
info["ch_names"].index(ch) for ch in info["bads"] if ch in info["ch_names"]
]
if len(exclude) > 0:
if isinstance(exclude, string_types) and exclude == "bads":
exclude = bad_ch_idx
elif isinstance(exclude, list) and all(
isinstance(ch, string_types) for ch in exclude
):
exclude = [info["ch_names"].index(ch) for ch in exclude]
else:
raise ValueError('exclude has to be a list of channel names or "bads"')
picks = list(set(picks).difference(exclude))
picks = np.array(picks)
types = np.array([channel_type(info, idx) for idx in picks])
n_channel_types = 0
ch_types_used = []
for t in valid_channel_types:
if t in types:
n_channel_types += 1
ch_types_used.append(t)
axes_init = axes # remember if axes were given as input
fig = None
if axes is None:
fig, axes = plt.subplots(n_channel_types, 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if axes_init is not None:
fig = axes[0].get_figure()
if window_title is not None:
fig.canvas.set_window_title(window_title)
if not len(axes) == n_channel_types:
raise ValueError(
"Number of axes (%g) must match number of channel "
"types (%d: %s)" % (len(axes), n_channel_types, sorted(ch_types_used))
)
# instead of projecting during each iteration let's use the mixin here.
if proj is True and evoked.proj is not True:
evoked = evoked.copy()
evoked.apply_proj()
times = 1e3 * evoked.times # time in milliseconds
texts = list()
idxs = list()
lines = list()
selectors = list() # for keeping reference to span_selectors
path_effects = [patheffects.withStroke(linewidth=2, foreground="w", alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w", alpha=0.75)]
for ax, t in zip(axes, ch_types_used):
line_list = list() # 'line_list' contains the lines for this axes
ch_unit = units[t]
this_scaling = scalings[t]
if unit is False:
this_scaling = 1.0
ch_unit = "NA" # no unit
idx = list(picks[types == t])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * evoked.data[idx, :]
# Parameters for butterfly interactive plots
if plot_type == "butterfly":
text = ax.annotate(
"Loading...",
xy=(0.01, 0.1),
xycoords="axes fraction",
fontsize=20,
color="green",
zorder=3,
)
text.set_visible(False)
if selectable:
callback_onselect = partial(
_butterfly_onselect,
ch_types=ch_types_used,
evoked=evoked,
text=text,
)
blit = False if plt.get_backend() == "MacOSX" else True
selectors.append(
SpanSelector(
ax,
callback_onselect,
"horizontal",
minspan=10,
useblit=blit,
rectprops=dict(alpha=0.5, facecolor="red"),
)
)
gfp_only = isinstance(gfp, string_types) and gfp == "only"
if not gfp_only:
if spatial_colors:
chs = [info["chs"][i] for i in idx]
locs3d = np.array([ch["loc"][:3] for ch in chs])
x, y, z = locs3d.T
colors = _rgb(info, x, y, z)
if t in ("meg", "mag", "grad", "eeg"):
layout = find_layout(info, ch_type=t, exclude=[])
else:
layout = find_layout(info, None, exclude=[])
# drop channels that are not in the data
used_nm = np.array(_clean_names(info["ch_names"]))[idx]
names = np.asarray(
[name for name in used_nm if name in layout.names]
)
name_idx = [layout.names.index(name) for name in names]
if len(name_idx) < len(chs):
warn(
"Could not find layout for all the channels. "
"Legend for spatial colors not drawn."
)
else:
# find indices for bads
bads = [
np.where(names == bad)[0][0]
for bad in info["bads"]
if bad in names
]
pos, outlines = _check_outlines(
layout.pos[:, :2], "skirt", None
)
pos = pos[name_idx]
_plot_legend(pos, colors, ax, bads, outlines)
else:
colors = ["k"] * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = "r"
if zorder == "std":
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == "unsorted":
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = (
'`zorder` must be a function, "std" or "unsorted", not {0}.'
)
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(
times,
D[ch_idx],
picker=3.0,
zorder=1 + z,
color=colors[ch_idx],
)[0]
)
if gfp: # 'only' or boolean True
gfp_color = 3 * (0.0,) if spatial_colors else (0.0, 1.0, 0.0)
this_gfp = np.sqrt((D * D).mean(axis=0))
this_ylim = (
ax.get_ylim()
if (ylim is None or t not in ylim.keys())
else ylim[t]
)
if not gfp_only:
y_offset = this_ylim[0]
else:
y_offset = 0.0
this_gfp += y_offset
ax.fill_between(
times,
y_offset,
this_gfp,
color="none",
facecolor=gfp_color,
zorder=1,
alpha=0.25,
)
line_list.append(
ax.plot(times, this_gfp, color=gfp_color, zorder=3)[0]
)
ax.text(
times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
"GFP",
zorder=4,
color=gfp_color,
path_effects=gfp_path_effects,
)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors:
line.set_linestyle("--")
ax.set_ylabel("data (%s)" % ch_unit)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(
ax.text(
0,
0,
"blank",
zorder=3,
verticalalignment="baseline",
horizontalalignment="left",
fontweight="bold",
alpha=0,
)
)
elif plot_type == "image":
im = ax.imshow(
D,
interpolation="nearest",
origin="lower",
extent=[times[0], times[-1], 0, D.shape[0]],
aspect="auto",
cmap=cmap[0],
)
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
ax.set_ylabel("channels (%s)" % "index")
else:
raise ValueError(
"plot_type has to be 'butterfly' or 'image'.Got %s." % plot_type
)
if xlim is not None:
if xlim == "tight":
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and t in ylim:
if plot_type == "butterfly":
ax.set_ylim(ylim[t])
elif plot_type == "image":
im.set_clim(ylim[t])
ax.set_title(
titles[t] + " (%d channel%s)" % (len(D), "s" if len(D) > 1 else "")
)
ax.set_xlabel("time (ms)")
if (plot_type == "butterfly") and (hline is not None):
for h in hline:
c = "r" if not spatial_colors else "grey"
ax.axhline(h, linestyle="--", linewidth=2, color=c)
lines.append(line_list)
if plot_type == "butterfly":
params = dict(
axes=axes,
texts=texts,
lines=lines,
ch_names=info["ch_names"],
idxs=idxs,
need_draw=False,
path_effects=path_effects,
selectors=selectors,
)
fig.canvas.mpl_connect("pick_event", partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect(
"button_press_event", partial(_butterfly_on_button_press, params=params)
)
if axes_init is None:
plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
if proj == "interactive":
_check_delayed_ssp(evoked)
params = dict(
evoked=evoked,
fig=fig,
projs=info["projs"],
axes=axes,
types=types,
units=units,
scalings=scalings,
unit=unit,
ch_types_used=ch_types_used,
picks=picks,
plot_update_proj_callback=_plot_update_evoked,
plot_type=plot_type,
)
_draw_proj_checkbox(None, params)
plt_show(show)
fig.canvas.draw() # for axes plots update axes.
if set_tight_layout:
tight_layout(fig=fig)
return fig
|
def _plot_evoked(
evoked,
picks,
exclude,
unit,
show,
ylim,
proj,
xlim,
hline,
units,
scalings,
titles,
axes,
plot_type,
cmap=None,
gfp=False,
window_title=None,
spatial_colors=False,
set_tight_layout=True,
selectable=True,
zorder="unsorted",
):
"""Aux function for plot_evoked and plot_evoked_image (cf. docstrings)
Extra param is:
plot_type : str, value ('butterfly' | 'image')
The type of graph to plot: 'butterfly' plots each channel as a line
(x axis: time, y axis: amplitude). 'image' plots a 2D image where
color depicts the amplitude of each channel at a given time point
(x axis: time, y axis: channel). In 'image' mode, the plot is not
interactive.
"""
import matplotlib.pyplot as plt
from matplotlib import patheffects
from matplotlib.widgets import SpanSelector
info = evoked.info
if axes is not None and proj == "interactive":
raise RuntimeError(
"Currently only single axis figures are supported"
" for interactive SSP selection."
)
if isinstance(gfp, string_types) and gfp != "only":
raise ValueError('gfp must be boolean or "only". Got %s' % gfp)
scalings = _handle_default("scalings", scalings)
titles = _handle_default("titles", titles)
units = _handle_default("units", units)
# Valid data types ordered for consistency
valid_channel_types = [
"eeg",
"grad",
"mag",
"seeg",
"eog",
"ecg",
"emg",
"dipole",
"gof",
"bio",
"ecog",
]
if picks is None:
picks = list(range(info["nchan"]))
bad_ch_idx = [
info["ch_names"].index(ch) for ch in info["bads"] if ch in info["ch_names"]
]
if len(exclude) > 0:
if isinstance(exclude, string_types) and exclude == "bads":
exclude = bad_ch_idx
elif isinstance(exclude, list) and all(
isinstance(ch, string_types) for ch in exclude
):
exclude = [info["ch_names"].index(ch) for ch in exclude]
else:
raise ValueError('exclude has to be a list of channel names or "bads"')
picks = list(set(picks).difference(exclude))
picks = np.array(picks)
types = np.array([channel_type(info, idx) for idx in picks])
n_channel_types = 0
ch_types_used = []
for t in valid_channel_types:
if t in types:
n_channel_types += 1
ch_types_used.append(t)
axes_init = axes # remember if axes were given as input
fig = None
if axes is None:
fig, axes = plt.subplots(n_channel_types, 1)
if isinstance(axes, plt.Axes):
axes = [axes]
elif isinstance(axes, np.ndarray):
axes = list(axes)
if axes_init is not None:
fig = axes[0].get_figure()
if window_title is not None:
fig.canvas.set_window_title(window_title)
if not len(axes) == n_channel_types:
raise ValueError(
"Number of axes (%g) must match number of channel "
"types (%d: %s)" % (len(axes), n_channel_types, sorted(ch_types_used))
)
# instead of projecting during each iteration let's use the mixin here.
if proj is True and evoked.proj is not True:
evoked = evoked.copy()
evoked.apply_proj()
times = 1e3 * evoked.times # time in milliseconds
texts = list()
idxs = list()
lines = list()
selectors = list() # for keeping reference to span_selectors
path_effects = [patheffects.withStroke(linewidth=2, foreground="w", alpha=0.75)]
gfp_path_effects = [patheffects.withStroke(linewidth=5, foreground="w", alpha=0.75)]
for ax, t in zip(axes, ch_types_used):
line_list = list() # 'line_list' contains the lines for this axes
ch_unit = units[t]
this_scaling = scalings[t]
if unit is False:
this_scaling = 1.0
ch_unit = "NA" # no unit
idx = list(picks[types == t])
idxs.append(idx)
if len(idx) > 0:
# Set amplitude scaling
D = this_scaling * evoked.data[idx, :]
# Parameters for butterfly interactive plots
if plot_type == "butterfly":
text = ax.annotate(
"Loading...",
xy=(0.01, 0.1),
xycoords="axes fraction",
fontsize=20,
color="green",
zorder=3,
)
text.set_visible(False)
if selectable:
callback_onselect = partial(
_butterfly_onselect,
ch_types=ch_types_used,
evoked=evoked,
text=text,
)
blit = False if plt.get_backend() == "MacOSX" else True
selectors.append(
SpanSelector(
ax,
callback_onselect,
"horizontal",
minspan=10,
useblit=blit,
rectprops=dict(alpha=0.5, facecolor="red"),
)
)
gfp_only = isinstance(gfp, string_types) and gfp == "only"
if not gfp_only:
if spatial_colors:
chs = [info["chs"][i] for i in idx]
locs3d = np.array([ch["loc"][:3] for ch in chs])
x, y, z = locs3d.T
colors = _rgb(info, x, y, z)
if t in ("meg", "mag", "grad", "eeg"):
layout = find_layout(info, ch_type=t, exclude=[])
else:
layout = find_layout(info, None, exclude=[])
# drop channels that are not in the data
used_nm = np.array(_clean_names(info["ch_names"]))[idx]
names = np.asarray(
[name for name in used_nm if name in layout.names]
)
name_idx = [layout.names.index(name) for name in names]
if len(name_idx) < len(chs):
warn(
"Could not find layout for all the channels. "
"Legend for spatial colors not drawn."
)
else:
# find indices for bads
bads = [
np.where(names == bad)[0][0]
for bad in info["bads"]
if bad in names
]
pos, outlines = _check_outlines(
layout.pos[:, :2], "skirt", None
)
pos = pos[name_idx]
_plot_legend(pos, colors, ax, bads, outlines)
else:
colors = ["k"] * len(idx)
for i in bad_ch_idx:
if i in idx:
colors[idx.index(i)] = "r"
if zorder == "std":
# find the channels with the least activity
# to map them in front of the more active ones
z_ord = D.std(axis=1).argsort()
elif zorder == "unsorted":
z_ord = list(range(D.shape[0]))
elif not callable(zorder):
error = (
'`zorder` must be a function, "std" or "unsorted", not {0}.'
)
raise TypeError(error.format(type(zorder)))
else:
z_ord = zorder(D)
# plot channels
for ch_idx, z in enumerate(z_ord):
line_list.append(
ax.plot(
times,
D[ch_idx],
picker=3.0,
zorder=1 + z,
color=colors[ch_idx],
)[0]
)
if gfp: # 'only' or boolean True
gfp_color = 3 * (0.0,) if spatial_colors else (0.0, 1.0, 0.0)
this_gfp = np.sqrt((D * D).mean(axis=0))
this_ylim = (
ax.get_ylim()
if (ylim is None or t not in ylim.keys())
else ylim[t]
)
if not gfp_only:
y_offset = this_ylim[0]
else:
y_offset = 0.0
this_gfp += y_offset
ax.fill_between(
times,
y_offset,
this_gfp,
color="none",
facecolor=gfp_color,
zorder=1,
alpha=0.25,
)
line_list.append(
ax.plot(times, this_gfp, color=gfp_color, zorder=3)[0]
)
ax.text(
times[0] + 0.01 * (times[-1] - times[0]),
this_gfp[0] + 0.05 * np.diff(ax.get_ylim())[0],
"GFP",
zorder=4,
color=gfp_color,
path_effects=gfp_path_effects,
)
for ii, line in zip(idx, line_list):
if ii in bad_ch_idx:
line.set_zorder(2)
if spatial_colors:
line.set_linestyle("--")
ax.set_ylabel("data (%s)" % ch_unit)
# for old matplotlib, we actually need this to have a bounding
# box (!), so we have to put some valid text here, change
# alpha and path effects later
texts.append(
ax.text(
0,
0,
"blank",
zorder=3,
verticalalignment="baseline",
horizontalalignment="left",
fontweight="bold",
alpha=0,
)
)
elif plot_type == "image":
im = ax.imshow(
D,
interpolation="nearest",
origin="lower",
extent=[times[0], times[-1], 0, D.shape[0]],
aspect="auto",
cmap=cmap,
)
cbar = plt.colorbar(im, ax=ax)
cbar.ax.set_title(ch_unit)
ax.set_ylabel("channels (%s)" % "index")
else:
raise ValueError(
"plot_type has to be 'butterfly' or 'image'.Got %s." % plot_type
)
if xlim is not None:
if xlim == "tight":
xlim = (times[0], times[-1])
ax.set_xlim(xlim)
if ylim is not None and t in ylim:
if plot_type == "butterfly":
ax.set_ylim(ylim[t])
elif plot_type == "image":
im.set_clim(ylim[t])
ax.set_title(
titles[t] + " (%d channel%s)" % (len(D), "s" if len(D) > 1 else "")
)
ax.set_xlabel("time (ms)")
if (plot_type == "butterfly") and (hline is not None):
for h in hline:
c = "r" if not spatial_colors else "grey"
ax.axhline(h, linestyle="--", linewidth=2, color=c)
lines.append(line_list)
if plot_type == "butterfly":
params = dict(
axes=axes,
texts=texts,
lines=lines,
ch_names=info["ch_names"],
idxs=idxs,
need_draw=False,
path_effects=path_effects,
selectors=selectors,
)
fig.canvas.mpl_connect("pick_event", partial(_butterfly_onpick, params=params))
fig.canvas.mpl_connect(
"button_press_event", partial(_butterfly_on_button_press, params=params)
)
if axes_init is None:
plt.subplots_adjust(0.175, 0.08, 0.94, 0.94, 0.2, 0.63)
if proj == "interactive":
_check_delayed_ssp(evoked)
params = dict(
evoked=evoked,
fig=fig,
projs=info["projs"],
axes=axes,
types=types,
units=units,
scalings=scalings,
unit=unit,
ch_types_used=ch_types_used,
picks=picks,
plot_update_proj_callback=_plot_update_evoked,
plot_type=plot_type,
)
_draw_proj_checkbox(None, params)
plt_show(show)
fig.canvas.draw() # for axes plots update axes.
if set_tight_layout:
tight_layout(fig=fig)
return fig
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_evoked_image(
evoked,
picks=None,
exclude="bads",
unit=True,
show=True,
clim=None,
xlim="tight",
proj=False,
units=None,
scalings=None,
titles=None,
axes=None,
cmap="RdBu_r",
):
"""Plot evoked data as images
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
clim : dict | None
clim for plots (after scaling has been applied). e.g.
clim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the clim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axis | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
cmap : matplotlib colormap | (colormap, bool) | 'interactive'
Colormap. If tuple, the first value indicates the colormap to use and
the second value is a boolean defining interactivity. In interactive
mode the colors are adjustable by clicking and dragging the colorbar
with left and right mouse button. Left mouse button moves the scale up
and down and right mouse button adjusts the range. Hitting space bar
resets the scale. Up and down arrows can be used to change the
colormap. If 'interactive', translates to ('RdBu_r', True). Defaults to
'RdBu_r'.
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the images.
"""
return _plot_evoked(
evoked=evoked,
picks=picks,
exclude=exclude,
unit=unit,
show=show,
ylim=clim,
proj=proj,
xlim=xlim,
hline=None,
units=units,
scalings=scalings,
titles=titles,
axes=axes,
plot_type="image",
cmap=cmap,
)
|
def plot_evoked_image(
evoked,
picks=None,
exclude="bads",
unit=True,
show=True,
clim=None,
xlim="tight",
proj=False,
units=None,
scalings=None,
titles=None,
axes=None,
cmap="RdBu_r",
):
"""Plot evoked data as images
Parameters
----------
evoked : instance of Evoked
The evoked data
picks : array-like of int | None
The indices of channels to plot. If None show all.
exclude : list of str | 'bads'
Channels names to exclude from being shown. If 'bads', the
bad channels are excluded.
unit : bool
Scale plot with channel (SI) unit.
show : bool
Show figure if True.
clim : dict | None
clim for plots (after scaling has been applied). e.g.
clim = dict(eeg=[-20, 20])
Valid keys are eeg, mag, grad, misc. If None, the clim parameter
for each channel equals the pyplot default.
xlim : 'tight' | tuple | None
xlim for plots.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
titles : dict | None
The titles associated with the channels. If None, defaults to
`dict(eeg='EEG', grad='Gradiometers', mag='Magnetometers')`.
axes : instance of Axis | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channel types. If instance of
Axes, there must be only one channel type plotted.
cmap : matplotlib colormap
Colormap.
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure containing the images.
"""
return _plot_evoked(
evoked=evoked,
picks=picks,
exclude=exclude,
unit=unit,
show=show,
ylim=clim,
proj=proj,
xlim=xlim,
hline=None,
units=units,
scalings=scalings,
titles=titles,
axes=axes,
plot_type="image",
cmap=cmap,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def _imshow_tfr(
ax,
ch_idx,
tmin,
tmax,
vmin,
vmax,
onselect,
ylim=None,
tfr=None,
freq=None,
vline=None,
x_label=None,
y_label=None,
colorbar=False,
picker=True,
cmap=("RdBu_r", True),
title=None,
hline=None,
):
"""Aux function to show time-freq map on topo"""
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
extent = (tmin, tmax, freq[0], freq[-1])
cmap, interactive_cmap = cmap
img = ax.imshow(
tfr[ch_idx],
extent=extent,
aspect="auto",
origin="lower",
vmin=vmin,
vmax=vmax,
picker=picker,
cmap=cmap,
)
if isinstance(ax, plt.Axes):
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
else:
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
if isinstance(colorbar, DraggableColorbar):
cbar = colorbar.cbar # this happens with multiaxes case
else:
cbar = plt.colorbar(mappable=img)
if interactive_cmap:
ax.CB = DraggableColorbar(cbar, img)
if title:
plt.title(title)
if not isinstance(ax, plt.Axes):
ax = plt.gca()
ax.RS = RectangleSelector(ax, onselect=onselect) # reference must be kept
|
def _imshow_tfr(
ax,
ch_idx,
tmin,
tmax,
vmin,
vmax,
onselect,
ylim=None,
tfr=None,
freq=None,
vline=None,
x_label=None,
y_label=None,
colorbar=False,
picker=True,
cmap="RdBu_r",
title=None,
hline=None,
):
"""Aux function to show time-freq map on topo"""
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
extent = (tmin, tmax, freq[0], freq[-1])
img = ax.imshow(
tfr[ch_idx],
extent=extent,
aspect="auto",
origin="lower",
vmin=vmin,
vmax=vmax,
picker=picker,
cmap=cmap,
)
if isinstance(ax, plt.Axes):
if x_label is not None:
ax.set_xlabel(x_label)
if y_label is not None:
ax.set_ylabel(y_label)
else:
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar(mappable=img)
if title:
plt.title(title)
if not isinstance(ax, plt.Axes):
ax = plt.gca()
ax.RS = RectangleSelector(ax, onselect=onselect) # reference must be kept
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_projs_topomap(
projs,
layout=None,
cmap=None,
sensors=True,
colorbar=False,
res=64,
size=1,
show=True,
outlines="head",
contours=6,
image_interp="bilinear",
axes=None,
):
"""Plot topographic maps of SSP projections
Parameters
----------
projs : list of Projection
The projections
layout : None | Layout | list of Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). Or a list of Layout if projections
are from different sensor types.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode (only works if ``colorbar=True``) the colors are
adjustable by clicking and dragging the colorbar with left and right
mouse button. Left mouse button moves the scale up and down and right
mouse button adjusts the range. Hitting space bar resets the range. Up
and down arrows can be used to change the colormap. If None (default),
'Reds' is used for all positive data, otherwise defaults to 'RdBu_r'.
If 'interactive', translates to (None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
res : int
The resolution of the topomap image (n pixels along each side).
size : scalar
Side length of the topomaps in inches (only applies when plotting
multiple topomaps at a time).
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of projectors. If instance of Axes,
there must be only one projector. Defaults to None.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
if layout is None:
from ..channels import read_layout
layout = read_layout("Vectorview-all")
if not isinstance(layout, list):
layout = [layout]
n_projs = len(projs)
nrows = math.floor(math.sqrt(n_projs))
ncols = math.ceil(n_projs / nrows)
if cmap == "interactive":
cmap = (None, True)
elif not isinstance(cmap, tuple):
cmap = (cmap, True)
if axes is None:
plt.figure()
axes = list()
for idx in range(len(projs)):
ax = plt.subplot(nrows, ncols, idx + 1)
axes.append(ax)
elif isinstance(axes, plt.Axes):
axes = [axes]
if len(axes) != len(projs):
raise RuntimeError("There must be an axes for each picked projector.")
for proj_idx, proj in enumerate(projs):
axes[proj_idx].set_title(proj["desc"][:10] + "...")
ch_names = _clean_names(proj["data"]["col_names"])
data = proj["data"]["data"].ravel()
idx = []
for l in layout:
is_vv = l.kind.startswith("Vectorview")
if is_vv:
from ..channels.layout import _pair_grad_sensors_from_ch_names
grad_pairs = _pair_grad_sensors_from_ch_names(ch_names)
if grad_pairs:
ch_names = [ch_names[i] for i in grad_pairs]
idx = [l.names.index(c) for c in ch_names if c in l.names]
if len(idx) == 0:
continue
pos = l.pos[idx]
if is_vv and grad_pairs:
from ..channels.layout import _merge_grad_data
shape = (len(idx) // 2, 2, -1)
pos = pos.reshape(shape).mean(axis=1)
data = _merge_grad_data(data[grad_pairs]).ravel()
break
if len(idx):
im = plot_topomap(
data,
pos[:, :2],
vmax=None,
cmap=cmap[0],
sensors=sensors,
res=res,
axes=axes[proj_idx],
outlines=outlines,
contours=contours,
image_interp=image_interp,
show=False,
)[0]
if colorbar:
divider = make_axes_locatable(axes[proj_idx])
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, cmap=cmap)
if cmap[1]:
axes[proj_idx].CB = DraggableColorbar(cbar, im)
else:
raise RuntimeError(
"Cannot find a proper layout for projection %s" % proj["desc"]
)
tight_layout(fig=axes[0].get_figure())
plt_show(show)
return axes[0].get_figure()
|
def plot_projs_topomap(
projs,
layout=None,
cmap=None,
sensors=True,
colorbar=False,
res=64,
size=1,
show=True,
outlines="head",
contours=6,
image_interp="bilinear",
axes=None,
):
"""Plot topographic maps of SSP projections
Parameters
----------
projs : list of Projection
The projections
layout : None | Layout | list of Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). Or a list of Layout if projections
are from different sensor types.
cmap : matplotlib colormap | None
Colormap to use. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
res : int
The resolution of the topomap image (n pixels along each side).
size : scalar
Side length of the topomaps in inches (only applies when plotting
multiple topomaps at a time).
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of projectors. If instance of Axes,
there must be only one projector. Defaults to None.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if layout is None:
from ..channels import read_layout
layout = read_layout("Vectorview-all")
if not isinstance(layout, list):
layout = [layout]
n_projs = len(projs)
nrows = math.floor(math.sqrt(n_projs))
ncols = math.ceil(n_projs / nrows)
if axes is None:
plt.figure()
axes = list()
for idx in range(len(projs)):
ax = plt.subplot(nrows, ncols, idx + 1)
axes.append(ax)
elif isinstance(axes, plt.Axes):
axes = [axes]
if len(axes) != len(projs):
raise RuntimeError("There must be an axes for each picked projector.")
for proj_idx, proj in enumerate(projs):
axes[proj_idx].set_title(proj["desc"][:10] + "...")
ch_names = _clean_names(proj["data"]["col_names"])
data = proj["data"]["data"].ravel()
idx = []
for l in layout:
is_vv = l.kind.startswith("Vectorview")
if is_vv:
from ..channels.layout import _pair_grad_sensors_from_ch_names
grad_pairs = _pair_grad_sensors_from_ch_names(ch_names)
if grad_pairs:
ch_names = [ch_names[i] for i in grad_pairs]
idx = [l.names.index(c) for c in ch_names if c in l.names]
if len(idx) == 0:
continue
pos = l.pos[idx]
if is_vv and grad_pairs:
from ..channels.layout import _merge_grad_data
shape = (len(idx) // 2, 2, -1)
pos = pos.reshape(shape).mean(axis=1)
data = _merge_grad_data(data[grad_pairs]).ravel()
break
if len(idx):
plot_topomap(
data,
pos[:, :2],
vmax=None,
cmap=cmap,
sensors=sensors,
res=res,
axes=axes[proj_idx],
outlines=outlines,
contours=contours,
image_interp=image_interp,
show=False,
)
if colorbar:
plt.colorbar()
else:
raise RuntimeError(
"Cannot find a proper layout for projection %s" % proj["desc"]
)
tight_layout(fig=axes[0].get_figure())
plt_show(show)
return axes[0].get_figure()
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_ica_components(
ica,
picks=None,
ch_type=None,
res=64,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=False,
title=None,
show=True,
outlines="head",
contours=6,
image_interp="bilinear",
head_pos=None,
):
"""Project unmixing matrix on interpolated sensor topogrpahy.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
picks : int | array-like | None
The indices of the sources to be plotted.
If None all are plotted in batches of 20.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
res : int
The resolution of the topomap image (n pixels along each side).
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True). Defaults to 'RdBu_r'.
.. warning:: Interactive mode works smoothly only for a small amount
of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
title : str | None
Title to use.
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.pyplot.Figure or list
The figure object(s).
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid import make_axes_locatable
from ..channels import _get_ch_type
if picks is None: # plot components by sets of 20
ch_type = _get_ch_type(ica, ch_type)
n_components = ica.mixing_matrix_.shape[1]
p = 20
figs = []
for k in range(0, n_components, p):
picks = range(k, min(k + p, n_components))
fig = plot_ica_components(
ica,
picks=picks,
ch_type=ch_type,
res=res,
layout=layout,
vmax=vmax,
cmap=cmap,
sensors=sensors,
colorbar=colorbar,
title=title,
show=show,
outlines=outlines,
contours=contours,
image_interp=image_interp,
)
figs.append(fig)
return figs
elif np.isscalar(picks):
picks = [picks]
ch_type = _get_ch_type(ica, ch_type)
if cmap == "interactive":
cmap = ("RdBu_r", True)
elif not isinstance(cmap, tuple):
if len(picks) > 2:
warn(
"Disabling interactive colorbar for multiple axes. Turn "
"interactivity on explicitly by passing cmap as a tuple."
)
cmap = (cmap, False)
else:
cmap = (cmap, True)
data = np.dot(
ica.mixing_matrix_[:, picks].T, ica.pca_components_[: ica.n_components_]
)
if ica.info is None:
raise RuntimeError(
"The ICA's measurement info is missing. Please "
"fit the ICA or add the corresponding info object."
)
data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(ica, ch_type, layout)
pos, outlines = _check_outlines(pos, outlines, head_pos)
if outlines not in (None, "head"):
image_mask, pos = _make_image_mask(outlines, pos, res)
else:
image_mask = None
data = np.atleast_2d(data)
data = data[:, data_picks]
# prepare data for iteration
fig, axes = _prepare_trellis(len(data), max_col=5)
if title is None:
title = "ICA components"
fig.suptitle(title)
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_, ax in zip(picks, data, axes):
ax.set_title("IC #%03d" % ii, fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
vmin_, vmax_ = _setup_vmin_vmax(data_, vmin, vmax)
im = plot_topomap(
data_.flatten(),
pos,
vmin=vmin_,
vmax=vmax_,
res=res,
axes=ax,
cmap=cmap[0],
outlines=outlines,
image_mask=image_mask,
contours=contours,
image_interp=image_interp,
show=False,
)[0]
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, format="%3.2f", cmap=cmap)
cbar.ax.tick_params(labelsize=12)
cbar.set_ticks((vmin_, vmax_))
cbar.ax.set_title("AU", fontsize=10)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
_hide_frame(ax)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.95)
fig.canvas.draw()
plt_show(show)
return fig
|
def plot_ica_components(
ica,
picks=None,
ch_type=None,
res=64,
layout=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
sensors=True,
colorbar=False,
title=None,
show=True,
outlines="head",
contours=6,
image_interp="bilinear",
head_pos=None,
):
"""Project unmixing matrix on interpolated sensor topogrpahy.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
picks : int | array-like | None
The indices of the sources to be plotted.
If None all are plotted in batches of 20.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
res : int
The resolution of the topomap image (n pixels along each side).
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap
Colormap.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
title : str | None
Title to use.
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.pyplot.Figure or list
The figure object(s).
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid import make_axes_locatable
from ..channels import _get_ch_type
if picks is None: # plot components by sets of 20
ch_type = _get_ch_type(ica, ch_type)
n_components = ica.mixing_matrix_.shape[1]
p = 20
figs = []
for k in range(0, n_components, p):
picks = range(k, min(k + p, n_components))
fig = plot_ica_components(
ica,
picks=picks,
ch_type=ch_type,
res=res,
layout=layout,
vmax=vmax,
cmap=cmap,
sensors=sensors,
colorbar=colorbar,
title=title,
show=show,
outlines=outlines,
contours=contours,
image_interp=image_interp,
)
figs.append(fig)
return figs
elif np.isscalar(picks):
picks = [picks]
ch_type = _get_ch_type(ica, ch_type)
data = np.dot(
ica.mixing_matrix_[:, picks].T, ica.pca_components_[: ica.n_components_]
)
if ica.info is None:
raise RuntimeError(
"The ICA's measurement info is missing. Please "
"fit the ICA or add the corresponding info object."
)
data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(ica, ch_type, layout)
pos, outlines = _check_outlines(pos, outlines, head_pos)
if outlines not in (None, "head"):
image_mask, pos = _make_image_mask(outlines, pos, res)
else:
image_mask = None
data = np.atleast_2d(data)
data = data[:, data_picks]
# prepare data for iteration
fig, axes = _prepare_trellis(len(data), max_col=5)
if title is None:
title = "ICA components"
fig.suptitle(title)
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_, ax in zip(picks, data, axes):
ax.set_title("IC #%03d" % ii, fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
vmin_, vmax_ = _setup_vmin_vmax(data_, vmin, vmax)
im = plot_topomap(
data_.flatten(),
pos,
vmin=vmin_,
vmax=vmax_,
res=res,
axes=ax,
cmap=cmap,
outlines=outlines,
image_mask=image_mask,
contours=contours,
image_interp=image_interp,
show=False,
)[0]
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, format="%3.2f", cmap=cmap)
cbar.ax.tick_params(labelsize=12)
cbar.set_ticks((vmin_, vmax_))
cbar.ax.set_title("AU", fontsize=10)
_hide_frame(ax)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.95)
fig.canvas.draw()
plt_show(show)
return fig
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_tfr_topomap(
tfr,
tmin=None,
tmax=None,
fmin=None,
fmax=None,
ch_type=None,
baseline=None,
mode="mean",
layout=None,
vmin=None,
vmax=None,
cmap=None,
sensors=True,
colorbar=True,
unit=None,
res=64,
size=2,
cbar_fmt="%1.1e",
show_names=False,
title=None,
axes=None,
show=True,
outlines="head",
head_pos=None,
):
"""Plot topographic maps of specific time-frequency intervals of TFR data
Parameters
----------
tfr : AvereageTFR
The AvereageTFR object.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file
was found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output equals
vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None, the
maximum value is used. If callable, the output equals vmax(data).
Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None (default), 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True).
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle will
be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : str | None
The unit of the channel type used for colorbar labels.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g., to
delete the prefix 'MEG ' from all channel names, pass the function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axis | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(tfr, ch_type)
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
picks, pos, merge_grads, names, _ = _prepare_topo_plot(tfr, ch_type, layout)
if not show_names:
names = None
data = tfr.data
data = rescale(data, tfr.times, baseline, mode, copy=True)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(tfr.times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(tfr.freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
data = data[picks, ifmin:ifmax, itmin:itmax]
data = np.mean(np.mean(data, axis=2), axis=1)[:, np.newaxis]
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
norm = False if np.min(data) < 0 else True
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
if cmap is None or cmap == "interactive":
cmap = ("Reds", True) if norm else ("RdBu_r", True)
elif not isinstance(cmap, tuple):
cmap = (cmap, True)
if axes is None:
fig = plt.figure()
ax = fig.gca()
else:
fig = axes.figure
ax = axes
_hide_frame(ax)
if title is not None:
ax.set_title(title)
fig_wrapper = list()
selection_callback = partial(
_onselect,
tfr=tfr,
pos=pos,
ch_type=ch_type,
itmin=itmin,
itmax=itmax,
ifmin=ifmin,
ifmax=ifmax,
cmap=cmap[0],
fig=fig_wrapper,
layout=layout,
)
im, _ = plot_topomap(
data[:, 0],
pos,
vmin=vmin,
vmax=vmax,
axes=ax,
cmap=cmap[0],
image_interp="bilinear",
contours=False,
names=names,
show_names=show_names,
show=False,
onselect=selection_callback,
)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, format=cbar_fmt, cmap=cmap[0])
cbar.set_ticks((vmin, vmax))
cbar.ax.tick_params(labelsize=12)
cbar.ax.set_title("AU")
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
plt_show(show)
return fig
|
def plot_tfr_topomap(
tfr,
tmin=None,
tmax=None,
fmin=None,
fmax=None,
ch_type=None,
baseline=None,
mode="mean",
layout=None,
vmin=None,
vmax=None,
cmap=None,
sensors=True,
colorbar=True,
unit=None,
res=64,
size=2,
cbar_fmt="%1.1e",
show_names=False,
title=None,
axes=None,
show=True,
outlines="head",
head_pos=None,
):
"""Plot topographic maps of specific time-frequency intervals of TFR data
Parameters
----------
tfr : AvereageTFR
The AvereageTFR object.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file
was found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output equals
vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None, the
maximum value is used. If callable, the output equals vmax(data).
Defaults to None.
cmap : matplotlib colormap | None
Colormap. If None and the plotted data is all positive, defaults to
'Reds'. If None and data contains also negative values, defaults to
'RdBu_r'. Defaults to None.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle will
be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : str | None
The unit of the channel type used for colorbar labels.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g., to
delete the prefix 'MEG ' from all channel names, pass the function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axis | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(tfr, ch_type)
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
picks, pos, merge_grads, names, _ = _prepare_topo_plot(tfr, ch_type, layout)
if not show_names:
names = None
data = tfr.data
data = rescale(data, tfr.times, baseline, mode, copy=True)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(tfr.times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(tfr.freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
data = data[picks, ifmin:ifmax, itmin:itmax]
data = np.mean(np.mean(data, axis=2), axis=1)[:, np.newaxis]
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
norm = False if np.min(data) < 0 else True
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
if cmap is None:
cmap = "Reds" if norm else "RdBu_r"
if axes is None:
fig = plt.figure()
ax = fig.gca()
else:
fig = axes.figure
ax = axes
_hide_frame(ax)
if title is not None:
ax.set_title(title)
fig_wrapper = list()
selection_callback = partial(
_onselect,
tfr=tfr,
pos=pos,
ch_type=ch_type,
itmin=itmin,
itmax=itmax,
ifmin=ifmin,
ifmax=ifmax,
cmap=cmap,
fig=fig_wrapper,
layout=layout,
)
im, _ = plot_topomap(
data[:, 0],
pos,
vmin=vmin,
vmax=vmax,
axes=ax,
cmap=cmap,
image_interp="bilinear",
contours=False,
names=names,
show_names=show_names,
show=False,
onselect=selection_callback,
)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, format=cbar_fmt, cmap=cmap)
cbar.set_ticks((vmin, vmax))
cbar.ax.tick_params(labelsize=12)
cbar.ax.set_title("AU")
plt_show(show)
return fig
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_evoked_topomap(
evoked,
times="auto",
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap=None,
sensors=True,
colorbar=True,
scale=None,
scale_time=1e3,
unit=None,
res=64,
size=1,
cbar_fmt="%3.1f",
time_format="%01d ms",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
axes=None,
):
"""Plot topographic maps of specific time points of evoked data
Parameters
----------
evoked : Evoked
The Evoked object.
times : float | array of floats | "auto" | "peaks".
The time point(s) to plot. If "auto", the number of ``axes`` determines
the amount of time point(s). If ``axes`` is also None, 10 topographies
will be shown with a regular time spacing between the first and last
time instant. If "peaks", finds time points automatically by checking
for local maxima in global field power.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found, the
layout is automatically generated from the sensor locations.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None (default), 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True).
.. warning:: Interactive mode works smoothly only for a small amount
of topomaps.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3 (ms).
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
time_format : str
String format for topomap values. Defaults to "%01d ms"
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g., to
delete the prefix 'MEG ' from all channel names, pass the function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
average : float | None
The time window around a given time to be used for averaging (seconds).
For example, 0.01 would translate into window that starts 5 ms before
and ends 5 ms after a given time point. Defaults to None, which means
no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of the
same length as ``times`` (unless ``times`` is None). If instance of
Axes, ``times`` must be a float or a list of one float.
Defaults to None.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(evoked, ch_type)
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable # noqa
mask_params = _handle_default("mask_params", mask_params)
mask_params["markersize"] *= size / 2.0
mask_params["markeredgewidth"] *= size / 2.0
picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
evoked, ch_type, layout
)
# project before picks
if proj is True and evoked.proj is not True:
data = evoked.copy().apply_proj().data
else:
data = evoked.data
evoked = evoked.copy().pick_channels([evoked.ch_names[pick] for pick in picks])
if axes is not None:
if isinstance(axes, plt.Axes):
axes = [axes]
times = _process_times(evoked, times, n_peaks=len(axes))
else:
times = _process_times(evoked, times, n_peaks=None)
space = 1 / (2.0 * evoked.info["sfreq"])
if max(times) > max(evoked.times) + space or min(times) < min(evoked.times) - space:
raise ValueError(
"Times should be between {0:0.3f} and {1:0.3f}.".format(
evoked.times[0], evoked.times[-1]
)
)
n_times = len(times)
nax = n_times + bool(colorbar)
width = size * nax
height = size + max(0, 0.1 * (4 - size)) + bool(title) * 0.5
if axes is None:
plt.figure(figsize=(width, height))
axes = list()
for ax_idx in range(len(times)):
if colorbar: # Make room for the colorbar
axes.append(plt.subplot(1, n_times + 1, ax_idx + 1))
else:
axes.append(plt.subplot(1, n_times, ax_idx + 1))
elif colorbar:
warn(
"Colorbar is drawn to the rightmost column of the figure. Be "
"sure to provide enough space for it or turn it off with "
"colorbar=False."
)
if len(axes) != n_times:
raise RuntimeError("Axes and times must be equal in sizes.")
if ch_type.startswith("planar"):
key = "grad"
else:
key = ch_type
scale = _handle_default("scalings", scale)[key]
unit = _handle_default("units", unit)[key]
if not show_names:
names = None
w_frame = plt.rcParams["figure.subplot.wspace"] / (2 * nax)
top_frame = max((0.05 if title is None else 0.25), 0.2 / size)
fig = axes[0].get_figure()
fig.subplots_adjust(left=w_frame, right=1 - w_frame, bottom=0, top=1 - top_frame)
# find first index that's >= (to rounding error) to each time point
time_idx = [
np.where(
_time_mask(evoked.times, tmin=t, tmax=None, sfreq=evoked.info["sfreq"])
)[0][0]
for t in times
]
if average is None:
data = data[np.ix_(picks, time_idx)]
elif isinstance(average, float):
if not average > 0:
raise ValueError(
"The average parameter must be positive. You passed a negative value"
)
data_ = np.zeros((len(picks), len(time_idx)))
ave_time = float(average) / 2.0
iter_times = evoked.times[time_idx]
for ii, (idx, tmin_, tmax_) in enumerate(
zip(time_idx, iter_times - ave_time, iter_times + ave_time)
):
my_range = (tmin_ < evoked.times) & (evoked.times < tmax_)
data_[:, ii] = data[picks][:, my_range].mean(-1)
data = data_
else:
raise ValueError(
"The average parameter must be None or a float.Check your input."
)
data *= scale
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
images, contours_ = [], []
if mask is not None:
_picks = picks[:: 2 if ch_type not in ["mag", "eeg"] else 1]
mask_ = mask[np.ix_(_picks, time_idx)]
pos, outlines = _check_outlines(pos, outlines, head_pos)
if outlines is not None:
image_mask, pos = _make_image_mask(outlines, pos, res)
else:
image_mask = None
vlims = [
_setup_vmin_vmax(data[:, i], vmin, vmax, norm=merge_grads)
for i in range(len(times))
]
vmin = np.min(vlims)
vmax = np.max(vlims)
if cmap == "interactive":
cmap = (None, True)
elif not isinstance(cmap, tuple):
if len(times) > 2:
warn(
"Disabling interactive colorbar for multiple axes. Turn "
"interactivity on explicitly by passing cmap as a tuple."
)
cmap = (cmap, False)
else:
cmap = (cmap, True)
for idx, time in enumerate(times):
tp, cn = plot_topomap(
data[:, idx],
pos,
vmin=vmin,
vmax=vmax,
sensors=sensors,
res=res,
names=names,
show_names=show_names,
cmap=cmap[0],
mask=mask_[:, idx] if mask is not None else None,
mask_params=mask_params,
axes=axes[idx],
outlines=outlines,
image_mask=image_mask,
contours=contours,
image_interp=image_interp,
show=False,
)
images.append(tp)
if cn is not None:
contours_.append(cn)
if time_format is not None:
axes[idx].set_title(time_format % (time * scale_time))
if title is not None:
plt.suptitle(title, verticalalignment="top", size="x-large")
if colorbar:
# works both when fig axes pre-defined and when not
n_fig_axes = max(nax, len(fig.get_axes()))
cax = plt.subplot(1, n_fig_axes + 1, n_fig_axes + 1)
# resize the colorbar (by default the color fills the whole axes)
cpos = cax.get_position()
if size <= 1:
cpos.x0 = 1 - (0.7 + 0.1 / size) / n_fig_axes
cpos.x1 = cpos.x0 + 0.1 / n_fig_axes
cpos.y0 = 0.2
cpos.y1 = 0.7
cax.set_position(cpos)
if unit is not None:
cax.set_title(unit)
cbar = fig.colorbar(images[-1], ax=cax, cax=cax, format=cbar_fmt)
cbar.set_ticks([cbar.vmin, 0, cbar.vmax])
if cmap[1]:
for im in images:
im.axes.CB = DraggableColorbar(cbar, im)
if proj == "interactive":
_check_delayed_ssp(evoked)
params = dict(
evoked=evoked,
fig=fig,
projs=evoked.info["projs"],
picks=picks,
images=images,
contours=contours_,
time_idx=time_idx,
scale=scale,
merge_grads=merge_grads,
res=res,
pos=pos,
image_mask=image_mask,
plot_update_proj_callback=_plot_update_evoked_topomap,
)
_draw_proj_checkbox(None, params)
plt_show(show)
return fig
|
def plot_evoked_topomap(
evoked,
times="auto",
ch_type=None,
layout=None,
vmin=None,
vmax=None,
cmap=None,
sensors=True,
colorbar=True,
scale=None,
scale_time=1e3,
unit=None,
res=64,
size=1,
cbar_fmt="%3.1f",
time_format="%01d ms",
proj=False,
show=True,
show_names=False,
title=None,
mask=None,
mask_params=None,
outlines="head",
contours=6,
image_interp="bilinear",
average=None,
head_pos=None,
axes=None,
):
"""Plot topographic maps of specific time points of evoked data
Parameters
----------
evoked : Evoked
The Evoked object.
times : float | array of floats | "auto" | "peaks".
The time point(s) to plot. If "auto", the number of ``axes`` determines
the amount of time point(s). If ``axes`` is also None, 10 topographies
will be shown with a regular time spacing between the first and last
time instant. If "peaks", finds time points automatically by checking
for local maxima in global field power.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found, the
layout is automatically generated from the sensor locations.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap | None
Colormap to use. If None, 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3 (ms).
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
time_format : str
String format for topomap values. Defaults to "%01d ms"
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g., to
delete the prefix 'MEG ' from all channel names, pass the function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
average : float | None
The time window around a given time to be used for averaging (seconds).
For example, 0.01 would translate into window that starts 5 ms before
and ends 5 ms after a given time point. Defaults to None, which means
no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of the
same length as ``times`` (unless ``times`` is None). If instance of
Axes, ``times`` must be a float or a list of one float.
Defaults to None.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(evoked, ch_type)
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable # noqa
mask_params = _handle_default("mask_params", mask_params)
mask_params["markersize"] *= size / 2.0
mask_params["markeredgewidth"] *= size / 2.0
picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
evoked, ch_type, layout
)
# project before picks
if proj is True and evoked.proj is not True:
data = evoked.copy().apply_proj().data
else:
data = evoked.data
evoked = evoked.copy().pick_channels([evoked.ch_names[pick] for pick in picks])
if axes is not None:
if isinstance(axes, plt.Axes):
axes = [axes]
times = _process_times(evoked, times, n_peaks=len(axes))
else:
times = _process_times(evoked, times, n_peaks=None)
space = 1 / (2.0 * evoked.info["sfreq"])
if max(times) > max(evoked.times) + space or min(times) < min(evoked.times) - space:
raise ValueError(
"Times should be between {0:0.3f} and {1:0.3f}.".format(
evoked.times[0], evoked.times[-1]
)
)
n_times = len(times)
nax = n_times + bool(colorbar)
width = size * nax
height = size + max(0, 0.1 * (4 - size)) + bool(title) * 0.5
if axes is None:
plt.figure(figsize=(width, height))
axes = list()
for ax_idx in range(len(times)):
if colorbar: # Make room for the colorbar
axes.append(plt.subplot(1, n_times + 1, ax_idx + 1))
else:
axes.append(plt.subplot(1, n_times, ax_idx + 1))
elif colorbar:
warn(
"Colorbar is drawn to the rightmost column of the figure. Be "
"sure to provide enough space for it or turn it off with "
"colorbar=False."
)
if len(axes) != n_times:
raise RuntimeError("Axes and times must be equal in sizes.")
if ch_type.startswith("planar"):
key = "grad"
else:
key = ch_type
scale = _handle_default("scalings", scale)[key]
unit = _handle_default("units", unit)[key]
if not show_names:
names = None
w_frame = plt.rcParams["figure.subplot.wspace"] / (2 * nax)
top_frame = max((0.05 if title is None else 0.25), 0.2 / size)
fig = axes[0].get_figure()
fig.subplots_adjust(left=w_frame, right=1 - w_frame, bottom=0, top=1 - top_frame)
# find first index that's >= (to rounding error) to each time point
time_idx = [
np.where(
_time_mask(evoked.times, tmin=t, tmax=None, sfreq=evoked.info["sfreq"])
)[0][0]
for t in times
]
if average is None:
data = data[np.ix_(picks, time_idx)]
elif isinstance(average, float):
if not average > 0:
raise ValueError(
"The average parameter must be positive. You passed a negative value"
)
data_ = np.zeros((len(picks), len(time_idx)))
ave_time = float(average) / 2.0
iter_times = evoked.times[time_idx]
for ii, (idx, tmin_, tmax_) in enumerate(
zip(time_idx, iter_times - ave_time, iter_times + ave_time)
):
my_range = (tmin_ < evoked.times) & (evoked.times < tmax_)
data_[:, ii] = data[picks][:, my_range].mean(-1)
data = data_
else:
raise ValueError(
"The average parameter must be None or a float.Check your input."
)
data *= scale
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
images, contours_ = [], []
if mask is not None:
_picks = picks[:: 2 if ch_type not in ["mag", "eeg"] else 1]
mask_ = mask[np.ix_(_picks, time_idx)]
pos, outlines = _check_outlines(pos, outlines, head_pos)
if outlines is not None:
image_mask, pos = _make_image_mask(outlines, pos, res)
else:
image_mask = None
vlims = [
_setup_vmin_vmax(data[:, i], vmin, vmax, norm=merge_grads)
for i in range(len(times))
]
vmin = np.min(vlims)
vmax = np.max(vlims)
for idx, time in enumerate(times):
tp, cn = plot_topomap(
data[:, idx],
pos,
vmin=vmin,
vmax=vmax,
sensors=sensors,
res=res,
names=names,
show_names=show_names,
cmap=cmap,
mask=mask_[:, idx] if mask is not None else None,
mask_params=mask_params,
axes=axes[idx],
outlines=outlines,
image_mask=image_mask,
contours=contours,
image_interp=image_interp,
show=False,
)
images.append(tp)
if cn is not None:
contours_.append(cn)
if time_format is not None:
axes[idx].set_title(time_format % (time * scale_time))
if title is not None:
plt.suptitle(title, verticalalignment="top", size="x-large")
if colorbar:
# works both when fig axes pre-defined and when not
n_fig_axes = max(nax, len(fig.get_axes()))
cax = plt.subplot(1, n_fig_axes + 1, n_fig_axes + 1)
# resize the colorbar (by default the color fills the whole axes)
cpos = cax.get_position()
if size <= 1:
cpos.x0 = 1 - (0.7 + 0.1 / size) / n_fig_axes
cpos.x1 = cpos.x0 + 0.1 / n_fig_axes
cpos.y0 = 0.2
cpos.y1 = 0.7
cax.set_position(cpos)
if unit is not None:
cax.set_title(unit)
cbar = fig.colorbar(images[-1], ax=cax, cax=cax, format=cbar_fmt)
cbar.set_ticks([cbar.vmin, 0, cbar.vmax])
if proj == "interactive":
_check_delayed_ssp(evoked)
params = dict(
evoked=evoked,
fig=fig,
projs=evoked.info["projs"],
picks=picks,
images=images,
contours=contours_,
time_idx=time_idx,
scale=scale,
merge_grads=merge_grads,
res=res,
pos=pos,
image_mask=image_mask,
plot_update_proj_callback=_plot_update_evoked_topomap,
)
_draw_proj_checkbox(None, params)
plt_show(show)
return fig
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def _plot_topomap_multi_cbar(
data,
pos,
ax,
title=None,
unit=None,
vmin=None,
vmax=None,
cmap=None,
outlines="head",
colorbar=False,
cbar_fmt="%3.3f",
):
"""Aux Function"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
_hide_frame(ax)
vmin = np.min(data) if vmin is None else vmin
vmax = np.max(data) if vmax is None else vmax
if cmap == "interactive":
cmap = (None, True)
elif not isinstance(cmap, tuple):
cmap = (cmap, True)
if title is not None:
ax.set_title(title, fontsize=10)
im, _ = plot_topomap(
data,
pos,
vmin=vmin,
vmax=vmax,
axes=ax,
cmap=cmap[0],
image_interp="bilinear",
contours=False,
outlines=outlines,
show=False,
)
if colorbar is True:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="10%", pad=0.25)
cbar = plt.colorbar(im, cax=cax, format=cbar_fmt)
cbar.set_ticks((vmin, vmax))
if unit is not None:
cbar.ax.set_title(unit, fontsize=8)
cbar.ax.tick_params(labelsize=8)
if cmap[1]:
ax.CB = DraggableColorbar(cbar, im)
|
def _plot_topomap_multi_cbar(
data,
pos,
ax,
title=None,
unit=None,
vmin=None,
vmax=None,
cmap="RdBu_r",
colorbar=False,
cbar_fmt="%3.3f",
):
"""Aux Function"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
_hide_frame(ax)
vmin = np.min(data) if vmin is None else vmin
vmax = np.max(data) if vmax is None else vmax
if title is not None:
ax.set_title(title, fontsize=10)
im, _ = plot_topomap(
data,
pos,
vmin=vmin,
vmax=vmax,
axes=ax,
cmap=cmap,
image_interp="bilinear",
contours=False,
show=False,
)
if colorbar is True:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="10%", pad=0.25)
cbar = plt.colorbar(im, cax=cax, format=cbar_fmt)
cbar.set_ticks((vmin, vmax))
if unit is not None:
cbar.ax.set_title(unit, fontsize=8)
cbar.ax.tick_params(labelsize=8)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_epochs_psd_topomap(
epochs,
bands=None,
vmin=None,
vmax=None,
tmin=None,
tmax=None,
proj=False,
bandwidth=None,
adaptive=False,
low_bias=True,
normalization="length",
ch_type=None,
layout=None,
cmap="RdBu_r",
agg_fun=None,
dB=False,
n_jobs=1,
normalize=False,
cbar_fmt="%0.3f",
outlines="head",
show=True,
verbose=None,
):
"""Plot the topomap of the power spectral density across epochs
Parameters
----------
epochs : instance of Epochs
The epochs object
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None np.min(data) is used. If callable, the output equals
vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4 Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the RMS for each pair is plotted. If None, then first
available channel type from order given above is used. Defaults to
None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None (default), 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True).
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize is
False.
n_jobs : int
Number of jobs to run in parallel.
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(epochs, ch_type)
picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
epochs, ch_type, layout
)
psds, freqs = psd_multitaper(
epochs,
tmin=tmin,
tmax=tmax,
bandwidth=bandwidth,
adaptive=adaptive,
low_bias=low_bias,
normalization=normalization,
picks=picks,
proj=proj,
n_jobs=n_jobs,
)
psds = np.mean(psds, axis=0)
if merge_grads:
from ..channels.layout import _merge_grad_data
psds = _merge_grad_data(psds)
return plot_psds_topomap(
psds=psds,
freqs=freqs,
pos=pos,
agg_fun=agg_fun,
vmin=vmin,
vmax=vmax,
bands=bands,
cmap=cmap,
dB=dB,
normalize=normalize,
cbar_fmt=cbar_fmt,
outlines=outlines,
show=show,
)
|
def plot_epochs_psd_topomap(
epochs,
bands=None,
vmin=None,
vmax=None,
tmin=None,
tmax=None,
proj=False,
bandwidth=None,
adaptive=False,
low_bias=True,
normalization="length",
ch_type=None,
layout=None,
cmap="RdBu_r",
agg_fun=None,
dB=False,
n_jobs=1,
normalize=False,
cbar_fmt="%0.3f",
outlines="head",
show=True,
verbose=None,
):
"""Plot the topomap of the power spectral density across epochs
Parameters
----------
epochs : instance of Epochs
The epochs object
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None np.min(data) is used. If callable, the output equals
vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4 Hz.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the RMS for each pair is plotted. If None, then first
available channel type from order given above is used. Defaults to
None.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize is
False.
n_jobs : int
Number of jobs to run in parallel.
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(epochs, ch_type)
picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
epochs, ch_type, layout
)
psds, freqs = psd_multitaper(
epochs,
tmin=tmin,
tmax=tmax,
bandwidth=bandwidth,
adaptive=adaptive,
low_bias=low_bias,
normalization=normalization,
picks=picks,
proj=proj,
n_jobs=n_jobs,
)
psds = np.mean(psds, axis=0)
if merge_grads:
from ..channels.layout import _merge_grad_data
psds = _merge_grad_data(psds)
return plot_psds_topomap(
psds=psds,
freqs=freqs,
pos=pos,
agg_fun=agg_fun,
vmin=vmin,
vmax=vmax,
bands=bands,
cmap=cmap,
dB=dB,
normalize=normalize,
cbar_fmt=cbar_fmt,
outlines=outlines,
show=show,
)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def plot_psds_topomap(
psds,
freqs,
pos,
agg_fun=None,
vmin=None,
vmax=None,
bands=None,
cmap=None,
dB=True,
normalize=False,
cbar_fmt="%0.3f",
outlines="head",
show=True,
):
"""Plot spatial maps of PSDs
Parameters
----------
psds : np.ndarray of float, shape (n_channels, n_freqs)
Power spectral densities
freqs : np.ndarray of float, shape (n_freqs)
Frequencies used to compute psds.
pos : numpy.ndarray of float, shape (n_sensors, 2)
The positions of the sensors.
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None np.min(data) is used. If callable, the output equals
vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
cmap : matplotlib colormap | (colormap, bool) | 'interactive' | None
Colormap to use. If tuple, the first value indicates the colormap to
use and the second value is a boolean defining interactivity. In
interactive mode the colors are adjustable by clicking and dragging the
colorbar with left and right mouse button. Left mouse button moves the
scale up and down and right mouse button adjusts the range. Hitting
space bar resets the range. Up and down arrows can be used to change
the colormap. If None (default), 'Reds' is used for all positive data,
otherwise defaults to 'RdBu_r'. If 'interactive', translates to
(None, True).
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize is
False.
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
import matplotlib.pyplot as plt
if bands is None:
bands = [
(0, 4, "Delta"),
(4, 8, "Theta"),
(8, 12, "Alpha"),
(12, 30, "Beta"),
(30, 45, "Gamma"),
]
if agg_fun is None:
agg_fun = np.sum if normalize is True else np.mean
if normalize is True:
psds /= psds.sum(axis=-1)[..., None]
assert np.allclose(psds.sum(axis=-1), 1.0)
n_axes = len(bands)
fig, axes = plt.subplots(1, n_axes, figsize=(2 * n_axes, 1.5))
if n_axes == 1:
axes = [axes]
for ax, (fmin, fmax, title) in zip(axes, bands):
freq_mask = (fmin < freqs) & (freqs < fmax)
if freq_mask.sum() == 0:
raise RuntimeError(
'No frequencies in band "%s" (%s, %s)' % (title, fmin, fmax)
)
data = agg_fun(psds[:, freq_mask], axis=1)
if dB is True and normalize is False:
data = 10 * np.log10(data)
unit = "dB"
else:
unit = "power"
_plot_topomap_multi_cbar(
data,
pos,
ax,
title=title,
vmin=vmin,
vmax=vmax,
cmap=cmap,
outlines=outlines,
colorbar=True,
unit=unit,
cbar_fmt=cbar_fmt,
)
tight_layout(fig=fig)
fig.canvas.draw()
plt_show(show)
return fig
|
def plot_psds_topomap(
psds,
freqs,
pos,
agg_fun=None,
vmin=None,
vmax=None,
bands=None,
cmap="RdBu_r",
dB=True,
normalize=False,
cbar_fmt="%0.3f",
outlines="head",
show=True,
):
"""Plot spatial maps of PSDs
Parameters
----------
psds : np.ndarray of float, shape (n_channels, n_freqs)
Power spectral densities
freqs : np.ndarray of float, shape (n_freqs)
Frequencies used to compute psds.
pos : numpy.ndarray of float, shape (n_sensors, 2)
The positions of the sensors.
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None np.min(data) is used. If callable, the output equals
vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize is
False.
normalize : bool
If True, each band will be divided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
import matplotlib.pyplot as plt
if bands is None:
bands = [
(0, 4, "Delta"),
(4, 8, "Theta"),
(8, 12, "Alpha"),
(12, 30, "Beta"),
(30, 45, "Gamma"),
]
if agg_fun is None:
agg_fun = np.sum if normalize is True else np.mean
if normalize is True:
psds /= psds.sum(axis=-1)[..., None]
assert np.allclose(psds.sum(axis=-1), 1.0)
n_axes = len(bands)
fig, axes = plt.subplots(1, n_axes, figsize=(2 * n_axes, 1.5))
if n_axes == 1:
axes = [axes]
for ax, (fmin, fmax, title) in zip(axes, bands):
freq_mask = (fmin < freqs) & (freqs < fmax)
if freq_mask.sum() == 0:
raise RuntimeError(
'No frequencies in band "%s" (%s, %s)' % (title, fmin, fmax)
)
data = agg_fun(psds[:, freq_mask], axis=1)
if dB is True and normalize is False:
data = 10 * np.log10(data)
unit = "dB"
else:
unit = "power"
_plot_topomap_multi_cbar(
data,
pos,
ax,
title=title,
vmin=vmin,
vmax=vmax,
cmap=cmap,
colorbar=True,
unit=unit,
cbar_fmt=cbar_fmt,
)
tight_layout(fig=fig)
fig.canvas.draw()
plt_show(show)
return fig
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def _onselect(
eclick,
erelease,
tfr,
pos,
ch_type,
itmin,
itmax,
ifmin,
ifmax,
cmap,
fig,
layout=None,
):
"""Callback called from topomap for drawing average tfr over channels."""
import matplotlib.pyplot as plt
pos, _ = _check_outlines(pos, outlines="head", head_pos=None)
ax = eclick.inaxes
xmin = min(eclick.xdata, erelease.xdata)
xmax = max(eclick.xdata, erelease.xdata)
ymin = min(eclick.ydata, erelease.ydata)
ymax = max(eclick.ydata, erelease.ydata)
indices = [
i
for i in range(len(pos))
if pos[i][0] < xmax
and pos[i][0] > xmin
and pos[i][1] < ymax
and pos[i][1] > ymin
]
for idx, circle in enumerate(ax.artists):
if idx in indices:
circle.set_color("r")
else:
circle.set_color("black")
plt.gcf().canvas.draw()
if not indices:
return
data = tfr.data
if ch_type == "mag":
picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
elif ch_type == "grad":
from ..channels.layout import _pair_grad_sensors
grads = _pair_grad_sensors(tfr.info, layout=layout, topomap_coords=False)
idxs = list()
for idx in indices:
idxs.append(grads[idx * 2])
idxs.append(grads[idx * 2 + 1]) # pair of grads
data = np.mean(data[idxs, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[x] for x in idxs]
elif ch_type == "eeg":
picks = pick_types(tfr.info, meg=False, eeg=True, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
logger.info("Averaging TFR over channels " + str(chs))
if len(fig) == 0:
fig.append(figure_nobar())
if not plt.fignum_exists(fig[0].number):
fig[0] = figure_nobar()
ax = fig[0].add_subplot(111)
itmax = len(tfr.times) - 1 if itmax is None else min(itmax, len(tfr.times) - 1)
ifmax = len(tfr.freqs) - 1 if ifmax is None else min(ifmax, len(tfr.freqs) - 1)
if itmin is None:
itmin = 0
if ifmin is None:
ifmin = 0
extent = (
tfr.times[itmin] * 1e3,
tfr.times[itmax] * 1e3,
tfr.freqs[ifmin],
tfr.freqs[ifmax],
)
title = "Average over %d %s channels." % (len(chs), ch_type)
ax.set_title(title)
ax.set_xlabel("Time (ms)")
ax.set_ylabel("Frequency (Hz)")
img = ax.imshow(data, extent=extent, aspect="auto", origin="lower", cmap=cmap)
if len(fig[0].get_axes()) < 2:
fig[0].get_axes()[1].cbar = fig[0].colorbar(mappable=img)
else:
fig[0].get_axes()[1].cbar.on_mappable_changed(mappable=img)
fig[0].canvas.draw()
plt.figure(fig[0].number)
plt_show(True)
|
def _onselect(
eclick,
erelease,
tfr,
pos,
ch_type,
itmin,
itmax,
ifmin,
ifmax,
cmap,
fig,
layout=None,
):
"""Callback called from topomap for drawing average tfr over channels."""
import matplotlib.pyplot as plt
pos, _ = _check_outlines(pos, outlines="head", head_pos=None)
ax = eclick.inaxes
xmin = min(eclick.xdata, erelease.xdata)
xmax = max(eclick.xdata, erelease.xdata)
ymin = min(eclick.ydata, erelease.ydata)
ymax = max(eclick.ydata, erelease.ydata)
indices = [
i
for i in range(len(pos))
if pos[i][0] < xmax
and pos[i][0] > xmin
and pos[i][1] < ymax
and pos[i][1] > ymin
]
for idx, circle in enumerate(ax.artists):
if idx in indices:
circle.set_color("r")
else:
circle.set_color("black")
plt.gcf().canvas.draw()
if not indices:
return
data = tfr.data
if ch_type == "mag":
picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
elif ch_type == "grad":
picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
from ..channels.layout import _pair_grad_sensors
grads = _pair_grad_sensors(tfr.info, layout=layout, topomap_coords=False)
idxs = list()
for idx in indices:
idxs.append(grads[idx * 2])
idxs.append(grads[idx * 2 + 1]) # pair of grads
data = np.mean(data[idxs, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[x] for x in idxs]
elif ch_type == "eeg":
picks = pick_types(tfr.info, meg=False, eeg=True, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
logger.info("Averaging TFR over channels " + str(chs))
if len(fig) == 0:
fig.append(figure_nobar())
if not plt.fignum_exists(fig[0].number):
fig[0] = figure_nobar()
ax = fig[0].add_subplot(111)
itmax = min(itmax, len(tfr.times) - 1)
ifmax = min(ifmax, len(tfr.freqs) - 1)
extent = (
tfr.times[itmin] * 1e3,
tfr.times[itmax] * 1e3,
tfr.freqs[ifmin],
tfr.freqs[ifmax],
)
title = "Average over %d %s channels." % (len(chs), ch_type)
ax.set_title(title)
ax.set_xlabel("Time (ms)")
ax.set_ylabel("Frequency (Hz)")
img = ax.imshow(data, extent=extent, aspect="auto", origin="lower", cmap=cmap)
if len(fig[0].get_axes()) < 2:
fig[0].get_axes()[1].cbar = fig[0].colorbar(mappable=img)
else:
fig[0].get_axes()[1].cbar.on_mappable_changed(mappable=img)
fig[0].canvas.draw()
plt.figure(fig[0].number)
plt_show(True)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def _hide_frame(ax):
"""Helper to hide axis frame for topomaps."""
ax.get_yticks()
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
|
def _hide_frame(ax):
"""Helper to hide axis frame for topomaps."""
ax.set_xticks([])
ax.set_yticks([])
ax.set_frame_on(False)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def _topomap_animation(
evoked,
ch_type="mag",
times=None,
frame_rate=None,
butterfly=False,
blit=True,
show=True,
):
"""Make animation of evoked data as topomap timeseries. Animation can be
paused/resumed with left mouse button. Left and right arrow keys can be
used to move backward or forward in time.
Parameters
----------
evoked : instance of Evoked
The evoked data.
ch_type : str | None
Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg'.
If None, first available channel type from ('mag', 'grad', 'eeg') is
used. Defaults to None.
times : array of floats | None
The time points to plot. If None, 10 evenly spaced samples are
calculated over the evoked time series. Defaults to None.
frame_rate : int | None
Frame rate for the animation in Hz. If None, frame rate = sfreq / 10.
Defaults to None.
butterfly : bool
Whether to plot the data as butterfly plot under the topomap.
Defaults to False.
blit : bool
Whether to use blit to optimize drawing. In general, it is recommended
to use blit in combination with ``show=True``. If you intend to save
the animation it is better to disable blit. For MacOSX blit is always
disabled. Defaults to True.
show : bool
Whether to show the animation. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
The figure.
anim : instance of matplotlib FuncAnimation
Animation of the topomap.
Notes
-----
.. versionadded:: 0.12.0
"""
from matplotlib import pyplot as plt, animation
if ch_type is None:
ch_type = _picks_by_type(evoked.info)[0][0]
if ch_type not in ("mag", "grad", "eeg"):
raise ValueError(
"Channel type not supported. Supported channel "
"types include 'mag', 'grad' and 'eeg'."
)
if times is None:
times = np.linspace(evoked.times[0], evoked.times[-1], 10)
times = np.array(times)
if times.ndim != 1:
raise ValueError("times must be 1D, got %d dimensions" % times.ndim)
if max(times) > evoked.times[-1] or min(times) < evoked.times[0]:
raise ValueError("All times must be inside the evoked time series.")
frames = [np.abs(evoked.times - time).argmin() for time in times]
blit = False if plt.get_backend() == "MacOSX" else blit
picks, pos, merge_grads, _, ch_type = _prepare_topo_plot(
evoked, ch_type=ch_type, layout=None
)
data = evoked.data[picks, :]
data *= _handle_default("scalings")[ch_type]
fig = plt.figure()
offset = 0.0 if blit else 0.4 # XXX: blit changes the sizes for some reason
ax = plt.axes(
[0.0 + offset / 2.0, 0.0 + offset / 2.0, 1.0 - offset, 1.0 - offset],
xlim=(-1, 1),
ylim=(-1, 1),
)
if butterfly:
ax_line = plt.axes(
[0.2, 0.05, 0.6, 0.1], xlim=(evoked.times[0], evoked.times[-1])
)
else:
ax_line = None
if isinstance(frames, int):
frames = np.linspace(0, len(evoked.times) - 1, frames).astype(int)
ax_cbar = plt.axes([0.85, 0.1, 0.05, 0.8])
ax_cbar.set_title(_handle_default("units")[ch_type], fontsize=10)
params = {
"data": data,
"pos": pos,
"all_times": evoked.times,
"frame": 0,
"frames": frames,
"butterfly": butterfly,
"blit": blit,
"pause": False,
"times": times,
}
init_func = partial(
_init_anim,
ax=ax,
ax_cbar=ax_cbar,
ax_line=ax_line,
params=params,
merge_grads=merge_grads,
)
animate_func = partial(_animate, ax=ax, ax_line=ax_line, params=params)
pause_func = partial(_pause_anim, params=params)
fig.canvas.mpl_connect("button_press_event", pause_func)
key_press_func = partial(_key_press, params=params)
fig.canvas.mpl_connect("key_press_event", key_press_func)
if frame_rate is None:
frame_rate = evoked.info["sfreq"] / 10.0
interval = 1000 / frame_rate # interval is in ms
anim = animation.FuncAnimation(
fig,
animate_func,
init_func=init_func,
frames=len(frames),
interval=interval,
blit=blit,
)
fig.mne_animation = anim # to make sure anim is not garbage collected
plt_show(show, block=False)
if "line" in params:
# Finally remove the vertical line so it does not appear in saved fig.
params["line"].remove()
return fig, anim
|
def _topomap_animation(
evoked,
ch_type="mag",
times=None,
frame_rate=None,
butterfly=False,
blit=True,
show=True,
):
"""Make animation of evoked data as topomap timeseries. Animation can be
paused/resumed with left mouse button. Left and right arrow keys can be
used to move backward or forward in time.
Parameters
----------
evoked : instance of Evoked
The evoked data.
ch_type : str | None
Channel type to plot. Accepted data types: 'mag', 'grad', 'eeg'.
If None, first available channel type from ('mag', 'grad', 'eeg') is
used. Defaults to None.
times : array of floats | None
The time points to plot. If None, 10 evenly spaced samples are
calculated over the evoked time series. Defaults to None.
frame_rate : int | None
Frame rate for the animation in Hz. If None, frame rate = sfreq / 10.
Defaults to None.
butterfly : bool
Whether to plot the data as butterfly plot under the topomap.
Defaults to False.
blit : bool
Whether to use blit to optimize drawing. In general, it is recommended
to use blit in combination with ``show=True``. If you intend to save
the animation it is better to disable blit. For MacOSX blit is always
disabled. Defaults to True.
show : bool
Whether to show the animation. Defaults to True.
Returns
-------
fig : instance of matplotlib figure
The figure.
anim : instance of matplotlib FuncAnimation
Animation of the topomap.
Notes
-----
.. versionadded:: 0.12.0
"""
from matplotlib import pyplot as plt, animation
if ch_type is None:
ch_type = _picks_by_type(evoked.info)[0][0]
if ch_type not in ("mag", "grad", "eeg"):
raise ValueError(
"Channel type not supported. Supported channel "
"types include 'mag', 'grad' and 'eeg'."
)
if times is None:
times = np.linspace(evoked.times[0], evoked.times[-1], 10)
times = np.array(times)
if times.ndim != 1:
raise ValueError("times must be 1D, got %d dimensions" % times.ndim)
if max(times) > evoked.times[-1] or min(times) < evoked.times[0]:
raise ValueError("All times must be inside the evoked time series.")
frames = [np.abs(evoked.times - time).argmin() for time in times]
blit = False if plt.get_backend() == "MacOSX" else blit
picks, pos, merge_grads, _, ch_type = _prepare_topo_plot(
evoked, ch_type=ch_type, layout=None
)
data = evoked.data[picks, :]
data *= _handle_default("scalings")[ch_type]
fig = plt.figure()
offset = 0.0 if blit else 0.4 # XXX: blit changes the sizes for some reason
ax = plt.axes(
[0.0 + offset / 2.0, 0.0 + offset / 2.0, 1.0 - offset, 1.0 - offset],
xlim=(-1, 1),
ylim=(-1, 1),
)
if butterfly:
ax_line = plt.axes(
[0.2, 0.05, 0.6, 0.1], xlim=(evoked.times[0], evoked.times[-1])
)
else:
ax_line = None
if isinstance(frames, int):
frames = np.linspace(0, len(evoked.times) - 1, frames).astype(int)
ax_cbar = plt.axes([0.85, 0.1, 0.05, 0.8])
ax_cbar.set_title(_handle_default("units")[ch_type], fontsize=10)
params = {
"data": data,
"pos": pos,
"all_times": evoked.times,
"frame": 0,
"frames": frames,
"butterfly": butterfly,
"blit": blit,
"pause": False,
"times": times,
}
init_func = partial(
_init_anim,
ax=ax,
ax_cbar=ax_cbar,
ax_line=ax_line,
params=params,
merge_grads=merge_grads,
)
animate_func = partial(_animate, ax=ax, ax_line=ax_line, params=params)
pause_func = partial(_pause_anim, params=params)
fig.canvas.mpl_connect("button_press_event", pause_func)
key_press_func = partial(_key_press, params=params)
fig.canvas.mpl_connect("key_press_event", key_press_func)
if frame_rate is None:
frame_rate = evoked.info["sfreq"] / 10.0
interval = 1000 / frame_rate # interval is in ms
anim = animation.FuncAnimation(
fig,
animate_func,
init_func=init_func,
frames=len(frames),
interval=interval,
blit=blit,
)
fig.mne_animation = anim # to make sure anim is not garbage collected
if show:
plt.show()
if "line" in params:
# Finally remove the vertical line so it does not appear in saved fig.
params["line"].remove()
return fig, anim
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def _fake_click(fig, ax, point, xform="ax", button=1, kind="press"):
"""Helper to fake a click at a relative point within axes."""
if xform == "ax":
x, y = ax.transAxes.transform_point(point)
elif xform == "data":
x, y = ax.transData.transform_point(point)
else:
raise ValueError("unknown transform")
if kind == "press":
func = partial(fig.canvas.button_press_event, x=x, y=y, button=button)
elif kind == "release":
func = partial(fig.canvas.button_release_event, x=x, y=y, button=button)
elif kind == "motion":
func = partial(fig.canvas.motion_notify_event, x=x, y=y)
try:
func(guiEvent=None)
except Exception: # for old MPL
func()
|
def _fake_click(fig, ax, point, xform="ax", button=1):
"""Helper to fake a click at a relative point within axes."""
if xform == "ax":
x, y = ax.transAxes.transform_point(point)
elif xform == "data":
x, y = ax.transData.transform_point(point)
else:
raise ValueError("unknown transform")
try:
fig.canvas.button_press_event(x, y, button, False, None)
except Exception: # for old MPL
fig.canvas.button_press_event(x, y, button, False)
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def __init__(self, cbar, mappable):
import matplotlib.pyplot as plt
self.cbar = cbar
self.mappable = mappable
self.press = None
self.cycle = sorted([i for i in dir(plt.cm) if hasattr(getattr(plt.cm, i), "N")])
self.index = self.cycle.index(cbar.get_cmap().name)
self.lims = (self.cbar.norm.vmin, self.cbar.norm.vmax)
self.connect()
|
def __init__(self, imdata, **kwargs):
"""Display the image for clicking."""
from matplotlib.pyplot import figure
self.coords = []
self.imdata = imdata
self.fig = figure()
self.ax = self.fig.add_subplot(111)
self.ymax = self.imdata.shape[0]
self.xmax = self.imdata.shape[1]
self.im = self.ax.imshow(
imdata,
aspect="auto",
extent=(0, self.xmax, 0, self.ymax),
picker=True,
**kwargs,
)
self.ax.axis("off")
self.fig.canvas.mpl_connect("pick_event", self.onclick)
plt_show()
|
https://github.com/mne-tools/mne-python/issues/3063
|
n [15]: Traceback (most recent call last):
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 291, in mouseReleaseEvent
FigureCanvasBase.button_release_event(self, x, y, button)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/backend_bases.py", line 1891, in button_release_event
self.callbacks.process(s, event)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 533, in process
proxy(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/cbook.py", line 408, in __call__
return mtd(*args, **kwargs)
File "/tsi/doctorants/mainak/anaconda/lib/python2.7/site-packages/matplotlib/widgets.py", line 1250, in release
self.onselect(vmin, vmax)
File "/tsi/doctorants/mainak/mne-python/mne/viz/evoked.py", line 90, in _butterfly_onselect
layout=None)
File "/tsi/doctorants/mainak/mne-python/mne/viz/topomap.py", line 49, in _prepare_topo_plot
picks, pos = _pair_grad_sensors(info, layout)
File "/tsi/doctorants/mainak/mne-python/mne/channels/layout.py", line 689, in _pair_grad_sensors
raise ValueError("No 'grad' channel pairs found.")
ValueError: No 'grad' channel pairs found.
|
ValueError
|
def _plot_corrmap(
data,
subjs,
indices,
ch_type,
ica,
label,
show,
outlines,
layout,
cmap,
contours,
template=True,
):
"""Customized ica.plot_components for corrmap"""
if not template:
title = "Detected components"
if label is not None:
title += " of type " + label
else:
title = "Supplied template"
picks = list(range(len(data)))
p = 20
if len(picks) > p: # plot components by sets of 20
n_components = len(picks)
figs = [
_plot_corrmap(
data[k : k + p],
subjs[k : k + p],
indices[k : k + p],
ch_type,
ica,
label,
show,
outlines=outlines,
layout=layout,
cmap=cmap,
contours=contours,
)
for k in range(0, n_components, p)
]
return figs
elif np.isscalar(picks):
picks = [picks]
data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(ica, ch_type, layout)
pos, outlines = _check_outlines(pos, outlines)
data = np.atleast_2d(data)
data = data[:, data_picks]
# prepare data for iteration
fig, axes = _prepare_trellis(len(picks), max_col=5)
fig.suptitle(title)
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_, ax, subject, idx in zip(picks, data, axes, subjs, indices):
if template:
ttl = "Subj. {0}, IC {1}".format(subject, idx)
ax.set_title(ttl, fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
vmin_, vmax_ = _setup_vmin_vmax(data_, None, None)
plot_topomap(
data_.flatten(),
pos,
vmin=vmin_,
vmax=vmax_,
res=64,
axis=ax,
cmap=cmap,
outlines=outlines,
image_mask=None,
contours=contours,
show=False,
image_interp="bilinear",
)[0]
_hide_frame(ax)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.8)
fig.canvas.draw()
plt_show(show)
return fig
|
def _plot_corrmap(
data,
subjs,
indices,
ch_type,
ica,
label,
show,
outlines,
layout,
cmap,
contours,
template=True,
):
"""Customized ica.plot_components for corrmap"""
if not template:
title = "Detected components"
if label is not None:
title += " of type " + label
else:
title = "Supplied template"
picks = list(range(len(data)))
p = 20
if len(picks) > p: # plot components by sets of 20
n_components = len(picks)
figs = [
_plot_corrmap(
data[k : k + p],
subjs[k : k + p],
indices[k : k + p],
ch_type,
ica,
label,
show,
outlines=outlines,
layout=layout,
cmap=cmap,
contours=contours,
)
for k in range(0, n_components, p)
]
return figs
elif np.isscalar(picks):
picks = [picks]
data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(ica, ch_type, layout)
pos, outlines = _check_outlines(pos, outlines)
data = np.atleast_2d(data)
data = data[:, data_picks]
# prepare data for iteration
fig, axes = _prepare_trellis(len(picks), max_col=5)
fig.suptitle(title)
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_, ax, subject, idx in zip(picks, data, axes, subjs, indices):
if template:
ttl = "Subj. {0}, IC {1}".format(subject, idx)
ax.set_title(ttl, fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
vmin_, vmax_ = _setup_vmin_vmax(data_, None, None)
plot_topomap(
data_.flatten(),
pos,
vmin=vmin_,
vmax=vmax_,
res=64,
axis=ax,
cmap=cmap,
outlines=outlines,
image_mask=None,
contours=contours,
show=False,
image_interp="bilinear",
)[0]
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.8)
fig.canvas.draw()
plt_show(show)
return fig
|
https://github.com/mne-tools/mne-python/issues/3118
|
In [51]: ica.plot_components()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-51-31baf63ee5ff> in <module>()
----> 1 ica.plot_components()
/Applications/packages/mne-python/mne/preprocessing/ica.py in plot_components(self, picks, ch_type, res, layout, vmin, vmax, cmap, sensors, colorbar, title, show, outlines, contours, image_interp, head_pos)
1384 outlines=outlines, contours=contours,
1385 image_interp=image_interp,
-> 1386 head_pos=head_pos)
1387
1388 def plot_sources(self, inst, picks=None, exclude=None, start=None,
/Applications/packages/mne-python/mne/viz/topomap.py in plot_ica_components(ica, picks, ch_type, res, layout, vmin, vmax, cmap, sensors, colorbar, title, show, outlines, contours, image_interp, head_pos)
748 show=show, outlines=outlines,
749 contours=contours,
--> 750 image_interp=image_interp)
751 figs.append(fig)
752 return figs
/Applications/packages/mne-python/mne/viz/topomap.py in plot_ica_components(ica, picks, ch_type, res, layout, vmin, vmax, cmap, sensors, colorbar, title, show, outlines, contours, image_interp, head_pos)
799 tight_layout(fig=fig)
800 fig.subplots_adjust(top=0.95)
--> 801 fig.canvas.draw()
802 plt_show(show)
803 return fig
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5agg.pyc in draw(self)
146 # causes problems with code that uses the result of the
147 # draw() to update plot elements.
--> 148 FigureCanvasAgg.draw(self)
149 self._priv_update()
150
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_agg.pyc in draw(self)
467
468 try:
--> 469 self.figure.draw(self.renderer)
470 finally:
471 RendererAgg.lock.release()
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/artist.pyc in draw_wrapper(artist, renderer, *args, **kwargs)
57 def draw_wrapper(artist, renderer, *args, **kwargs):
58 before(artist, renderer)
---> 59 draw(artist, renderer, *args, **kwargs)
60 after(artist, renderer)
61
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.pyc in draw(self, renderer)
1015 if self.get_tight_layout() and self.axes:
1016 try:
-> 1017 self.tight_layout(renderer, **self._tight_parameters)
1018 except ValueError:
1019 pass
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.pyc in tight_layout(self, renderer, pad, h_pad, w_pad, rect)
1661 renderer,
1662 pad=pad, h_pad=h_pad, w_pad=w_pad,
-> 1663 rect=rect)
1664
1665 self.subplots_adjust(**kwargs)
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.pyc in get_tight_layout_figure(fig, axes_list, subplotspec_list, renderer, pad, h_pad, w_pad, rect)
350 subplot_list=subplot_list,
351 ax_bbox_list=ax_bbox_list,
--> 352 pad=pad, h_pad=h_pad, w_pad=w_pad)
353
354 if rect is not None:
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.pyc in auto_adjust_subplotpars(fig, renderer, nrows_ncols, num1num2_list, subplot_list, ax_bbox_list, pad, h_pad, w_pad, rect)
129 tight_bbox_raw = union([ax.get_tightbbox(renderer) for ax in subplots])
130 tight_bbox = TransformedBbox(tight_bbox_raw,
--> 131 fig.transFigure.inverted())
132
133 row1, col1 = divmod(num1, cols)
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/transforms.pyc in __init__(self, bbox, transform, **kwargs)
1055 *transform*: a 2D :class:`Transform`
1056 """
-> 1057 assert bbox.is_bbox
1058 assert isinstance(transform, Transform)
1059 assert transform.input_dims == 2
In [52]: ica.plot_components()
AttributeError: 'NoneType' object has no attribute 'is_bbox'
In [52]: Traceback (most recent call last):
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 427, in idle_draw
self.draw()
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5agg.py", line 148, in draw
FigureCanvasAgg.draw(self)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_agg.py", line 469, in draw
self.figure.draw(self.renderer)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/artist.py", line 59, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.py", line 1017, in draw
self.tight_layout(renderer, **self._tight_parameters)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.py", line 1663, in tight_layout
rect=rect)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.py", line 352, in get_tight_layout_figure
pad=pad, h_pad=h_pad, w_pad=w_pad)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.py", line 131, in auto_adjust_subplotpars
fig.transFigure.inverted())
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/transforms.py", line 1057, in __init__
assert bbox.is_bbox
AttributeError: 'NoneType' object has no attribute 'is_bbox'
If you suspect this is an IPython bug, please report it at:
https://github.com/ipython/ipython/issues
or send an email to the mailing list at ipython-dev@scipy.org
You can print a more detailed traceback right now with "%tb", or use "%debug"
to interactively debug it.
Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
%config Application.verbose_crash=True
|
AttributeError
|
def _label_clicked(pos, params):
"""Function for plotting independent components on click to label."""
import matplotlib.pyplot as plt
offsets = np.array(params["offsets"]) + params["offsets"][0]
line_idx = np.searchsorted(offsets, pos[1]) + params["ch_start"]
if line_idx >= len(params["picks"]):
return
ic_idx = [params["picks"][line_idx]]
types = list()
info = params["ica"].info
if len(pick_types(info, meg=False, eeg=True, ref_meg=False)) > 0:
types.append("eeg")
if len(pick_types(info, meg="mag", ref_meg=False)) > 0:
types.append("mag")
if len(pick_types(info, meg="grad", ref_meg=False)) > 0:
types.append("grad")
ica = params["ica"]
data = np.dot(
ica.mixing_matrix_[:, ic_idx].T, ica.pca_components_[: ica.n_components_]
)
data = np.atleast_2d(data)
fig, axes = _prepare_trellis(len(types), max_col=3)
for ch_idx, ch_type in enumerate(types):
try:
data_picks, pos, merge_grads, _, _ = _prepare_topo_plot(ica, ch_type, None)
except Exception as exc:
warn(exc)
plt.close(fig)
return
this_data = data[:, data_picks]
ax = axes[ch_idx]
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_ in zip(ic_idx, this_data):
ax.set_title("IC #%03d " % ii + ch_type, fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
plot_topomap(data_.flatten(), pos, axis=ax, show=False)
_hide_frame(ax)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.95)
fig.canvas.draw()
plt_show(True)
|
def _label_clicked(pos, params):
"""Function for plotting independent components on click to label."""
import matplotlib.pyplot as plt
offsets = np.array(params["offsets"]) + params["offsets"][0]
line_idx = np.searchsorted(offsets, pos[1]) + params["ch_start"]
if line_idx >= len(params["picks"]):
return
ic_idx = [params["picks"][line_idx]]
types = list()
info = params["ica"].info
if len(pick_types(info, meg=False, eeg=True, ref_meg=False)) > 0:
types.append("eeg")
if len(pick_types(info, meg="mag", ref_meg=False)) > 0:
types.append("mag")
if len(pick_types(info, meg="grad", ref_meg=False)) > 0:
types.append("grad")
ica = params["ica"]
data = np.dot(
ica.mixing_matrix_[:, ic_idx].T, ica.pca_components_[: ica.n_components_]
)
data = np.atleast_2d(data)
fig, axes = _prepare_trellis(len(types), max_col=3)
for ch_idx, ch_type in enumerate(types):
try:
data_picks, pos, merge_grads, _, _ = _prepare_topo_plot(ica, ch_type, None)
except Exception as exc:
warn(exc)
plt.close(fig)
return
this_data = data[:, data_picks]
ax = axes[ch_idx]
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_ in zip(ic_idx, this_data):
ax.set_title("IC #%03d " % ii + ch_type, fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
plot_topomap(data_.flatten(), pos, axis=ax, show=False)
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.95)
fig.canvas.draw()
plt_show(True)
|
https://github.com/mne-tools/mne-python/issues/3118
|
In [51]: ica.plot_components()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-51-31baf63ee5ff> in <module>()
----> 1 ica.plot_components()
/Applications/packages/mne-python/mne/preprocessing/ica.py in plot_components(self, picks, ch_type, res, layout, vmin, vmax, cmap, sensors, colorbar, title, show, outlines, contours, image_interp, head_pos)
1384 outlines=outlines, contours=contours,
1385 image_interp=image_interp,
-> 1386 head_pos=head_pos)
1387
1388 def plot_sources(self, inst, picks=None, exclude=None, start=None,
/Applications/packages/mne-python/mne/viz/topomap.py in plot_ica_components(ica, picks, ch_type, res, layout, vmin, vmax, cmap, sensors, colorbar, title, show, outlines, contours, image_interp, head_pos)
748 show=show, outlines=outlines,
749 contours=contours,
--> 750 image_interp=image_interp)
751 figs.append(fig)
752 return figs
/Applications/packages/mne-python/mne/viz/topomap.py in plot_ica_components(ica, picks, ch_type, res, layout, vmin, vmax, cmap, sensors, colorbar, title, show, outlines, contours, image_interp, head_pos)
799 tight_layout(fig=fig)
800 fig.subplots_adjust(top=0.95)
--> 801 fig.canvas.draw()
802 plt_show(show)
803 return fig
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5agg.pyc in draw(self)
146 # causes problems with code that uses the result of the
147 # draw() to update plot elements.
--> 148 FigureCanvasAgg.draw(self)
149 self._priv_update()
150
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_agg.pyc in draw(self)
467
468 try:
--> 469 self.figure.draw(self.renderer)
470 finally:
471 RendererAgg.lock.release()
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/artist.pyc in draw_wrapper(artist, renderer, *args, **kwargs)
57 def draw_wrapper(artist, renderer, *args, **kwargs):
58 before(artist, renderer)
---> 59 draw(artist, renderer, *args, **kwargs)
60 after(artist, renderer)
61
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.pyc in draw(self, renderer)
1015 if self.get_tight_layout() and self.axes:
1016 try:
-> 1017 self.tight_layout(renderer, **self._tight_parameters)
1018 except ValueError:
1019 pass
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.pyc in tight_layout(self, renderer, pad, h_pad, w_pad, rect)
1661 renderer,
1662 pad=pad, h_pad=h_pad, w_pad=w_pad,
-> 1663 rect=rect)
1664
1665 self.subplots_adjust(**kwargs)
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.pyc in get_tight_layout_figure(fig, axes_list, subplotspec_list, renderer, pad, h_pad, w_pad, rect)
350 subplot_list=subplot_list,
351 ax_bbox_list=ax_bbox_list,
--> 352 pad=pad, h_pad=h_pad, w_pad=w_pad)
353
354 if rect is not None:
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.pyc in auto_adjust_subplotpars(fig, renderer, nrows_ncols, num1num2_list, subplot_list, ax_bbox_list, pad, h_pad, w_pad, rect)
129 tight_bbox_raw = union([ax.get_tightbbox(renderer) for ax in subplots])
130 tight_bbox = TransformedBbox(tight_bbox_raw,
--> 131 fig.transFigure.inverted())
132
133 row1, col1 = divmod(num1, cols)
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/transforms.pyc in __init__(self, bbox, transform, **kwargs)
1055 *transform*: a 2D :class:`Transform`
1056 """
-> 1057 assert bbox.is_bbox
1058 assert isinstance(transform, Transform)
1059 assert transform.input_dims == 2
In [52]: ica.plot_components()
AttributeError: 'NoneType' object has no attribute 'is_bbox'
In [52]: Traceback (most recent call last):
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 427, in idle_draw
self.draw()
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5agg.py", line 148, in draw
FigureCanvasAgg.draw(self)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_agg.py", line 469, in draw
self.figure.draw(self.renderer)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/artist.py", line 59, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.py", line 1017, in draw
self.tight_layout(renderer, **self._tight_parameters)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.py", line 1663, in tight_layout
rect=rect)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.py", line 352, in get_tight_layout_figure
pad=pad, h_pad=h_pad, w_pad=w_pad)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.py", line 131, in auto_adjust_subplotpars
fig.transFigure.inverted())
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/transforms.py", line 1057, in __init__
assert bbox.is_bbox
AttributeError: 'NoneType' object has no attribute 'is_bbox'
If you suspect this is an IPython bug, please report it at:
https://github.com/ipython/ipython/issues
or send an email to the mailing list at ipython-dev@scipy.org
You can print a more detailed traceback right now with "%tb", or use "%debug"
to interactively debug it.
Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
%config Application.verbose_crash=True
|
AttributeError
|
def _prepare_trellis(n_cells, max_col):
"""Aux function"""
import matplotlib.pyplot as plt
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
# XXX: Previously done by ax.set_visible(False), but because of mpl
# bug, we just hide the frame.
from .topomap import _hide_frame
_hide_frame(ax)
return fig, axes
|
def _prepare_trellis(n_cells, max_col):
"""Aux function"""
import matplotlib.pyplot as plt
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
ax.set_visible(False)
return fig, axes
|
https://github.com/mne-tools/mne-python/issues/3118
|
In [51]: ica.plot_components()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-51-31baf63ee5ff> in <module>()
----> 1 ica.plot_components()
/Applications/packages/mne-python/mne/preprocessing/ica.py in plot_components(self, picks, ch_type, res, layout, vmin, vmax, cmap, sensors, colorbar, title, show, outlines, contours, image_interp, head_pos)
1384 outlines=outlines, contours=contours,
1385 image_interp=image_interp,
-> 1386 head_pos=head_pos)
1387
1388 def plot_sources(self, inst, picks=None, exclude=None, start=None,
/Applications/packages/mne-python/mne/viz/topomap.py in plot_ica_components(ica, picks, ch_type, res, layout, vmin, vmax, cmap, sensors, colorbar, title, show, outlines, contours, image_interp, head_pos)
748 show=show, outlines=outlines,
749 contours=contours,
--> 750 image_interp=image_interp)
751 figs.append(fig)
752 return figs
/Applications/packages/mne-python/mne/viz/topomap.py in plot_ica_components(ica, picks, ch_type, res, layout, vmin, vmax, cmap, sensors, colorbar, title, show, outlines, contours, image_interp, head_pos)
799 tight_layout(fig=fig)
800 fig.subplots_adjust(top=0.95)
--> 801 fig.canvas.draw()
802 plt_show(show)
803 return fig
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5agg.pyc in draw(self)
146 # causes problems with code that uses the result of the
147 # draw() to update plot elements.
--> 148 FigureCanvasAgg.draw(self)
149 self._priv_update()
150
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_agg.pyc in draw(self)
467
468 try:
--> 469 self.figure.draw(self.renderer)
470 finally:
471 RendererAgg.lock.release()
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/artist.pyc in draw_wrapper(artist, renderer, *args, **kwargs)
57 def draw_wrapper(artist, renderer, *args, **kwargs):
58 before(artist, renderer)
---> 59 draw(artist, renderer, *args, **kwargs)
60 after(artist, renderer)
61
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.pyc in draw(self, renderer)
1015 if self.get_tight_layout() and self.axes:
1016 try:
-> 1017 self.tight_layout(renderer, **self._tight_parameters)
1018 except ValueError:
1019 pass
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.pyc in tight_layout(self, renderer, pad, h_pad, w_pad, rect)
1661 renderer,
1662 pad=pad, h_pad=h_pad, w_pad=w_pad,
-> 1663 rect=rect)
1664
1665 self.subplots_adjust(**kwargs)
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.pyc in get_tight_layout_figure(fig, axes_list, subplotspec_list, renderer, pad, h_pad, w_pad, rect)
350 subplot_list=subplot_list,
351 ax_bbox_list=ax_bbox_list,
--> 352 pad=pad, h_pad=h_pad, w_pad=w_pad)
353
354 if rect is not None:
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.pyc in auto_adjust_subplotpars(fig, renderer, nrows_ncols, num1num2_list, subplot_list, ax_bbox_list, pad, h_pad, w_pad, rect)
129 tight_bbox_raw = union([ax.get_tightbbox(renderer) for ax in subplots])
130 tight_bbox = TransformedBbox(tight_bbox_raw,
--> 131 fig.transFigure.inverted())
132
133 row1, col1 = divmod(num1, cols)
/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/transforms.pyc in __init__(self, bbox, transform, **kwargs)
1055 *transform*: a 2D :class:`Transform`
1056 """
-> 1057 assert bbox.is_bbox
1058 assert isinstance(transform, Transform)
1059 assert transform.input_dims == 2
In [52]: ica.plot_components()
AttributeError: 'NoneType' object has no attribute 'is_bbox'
In [52]: Traceback (most recent call last):
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5.py", line 427, in idle_draw
self.draw()
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_qt5agg.py", line 148, in draw
FigureCanvasAgg.draw(self)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/backends/backend_agg.py", line 469, in draw
self.figure.draw(self.renderer)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/artist.py", line 59, in draw_wrapper
draw(artist, renderer, *args, **kwargs)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.py", line 1017, in draw
self.tight_layout(renderer, **self._tight_parameters)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/figure.py", line 1663, in tight_layout
rect=rect)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.py", line 352, in get_tight_layout_figure
pad=pad, h_pad=h_pad, w_pad=w_pad)
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/tight_layout.py", line 131, in auto_adjust_subplotpars
fig.transFigure.inverted())
File "/Users/teon/anaconda/lib/python2.7/site-packages/matplotlib/transforms.py", line 1057, in __init__
assert bbox.is_bbox
AttributeError: 'NoneType' object has no attribute 'is_bbox'
If you suspect this is an IPython bug, please report it at:
https://github.com/ipython/ipython/issues
or send an email to the mailing list at ipython-dev@scipy.org
You can print a more detailed traceback right now with "%tb", or use "%debug"
to interactively debug it.
Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
%config Application.verbose_crash=True
|
AttributeError
|
def rehint(self):
if self._pixbuf:
height, width = self._resize_max(
original_height=self._pixbuf.get_height(),
original_width=self._pixbuf.get_width(),
max_height=self.native.get_allocated_height(),
max_width=self.native.get_allocated_width(),
)
dpr = self.native.get_scale_factor()
scaled_pixbuf = self._pixbuf.scale_simple(
width * dpr, height * dpr, GdkPixbuf.InterpType.BILINEAR
)
surface = Gdk.cairo_surface_create_from_pixbuf(
scaled_pixbuf,
0,
self.native.get_window(), # scale: 0 = same as window
)
self._image.set_from_surface(surface)
|
def rehint(self):
height, width = self._resize_max(
original_height=self._pixbuf.get_height(),
original_width=self._pixbuf.get_width(),
max_height=self.native.get_allocated_height(),
max_width=self.native.get_allocated_width(),
)
scaled_pixbuf = self._pixbuf.scale_simple(
width, height, GdkPixbuf.InterpType.BILINEAR
)
self._image.set_from_pixbuf(scaled_pixbuf)
|
https://github.com/beeware/toga/issues/1062
|
Traceback (most recent call last):
File "/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/app.py", line 93, in gtk_startup
self.interface.startup()
File "/media/psf/Home/Python/toga/examples/imageview/imageview/app.py", line 18, in startup
imageview_from_path = toga.ImageView(image_from_path)
File "/home/samschott/.local/lib/python3.8/site-packages/toga/widgets/imageview.py", line 25, in __init__
self._impl = self.factory.ImageView(interface=self)
File "/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/widgets/base.py", line 12, in __init__
self.interface.style.reapply()
File "/home/samschott/.local/lib/python3.8/site-packages/travertino/declaration.py", line 88, in reapply
self.apply(style, getattr(self, style))
File "/home/samschott/.local/lib/python3.8/site-packages/toga/style/pack.py", line 104, in apply
self._applicator.set_font(
File "/home/samschott/.local/lib/python3.8/site-packages/toga/style/applicator.py", line 25, in set_font
self.widget._impl.rehint()
File "/home/samschott/.local/lib/python3.8/site-packages/toga_gtk/widgets/imageview.py", line 20, in rehint
original_height=self._pixbuf.get_height(),
AttributeError: 'NoneType' object has no attribute 'get_height'
|
AttributeError
|
def winforms_item_selection_changed(self, sender, e):
# update selection interface property
self.interface._selection = self._selected_rows()
if e.IsSelected and self.interface.on_select:
self.interface.on_select(self.interface, row=self.interface.data[e.ItemIndex])
|
def winforms_item_selection_changed(self, sender, e):
# update selection interface property
self.interface._selection = self._selected_rows()
if e.IsSelected:
self.interface.on_select(self.interface, row=self.interface.data[e.ItemIndex])
|
https://github.com/beeware/toga/issues/994
|
Traceback (most recent call last):
File "C:\\Users\\brcan\\Desktop\\snippets\\.venv\\lib\\site-packages\\toga_winforms\\widgets\\table.py", line 74, in _native_item_selection_changed
self.interface.on_select(self.interface, row=self.interface.data[e.ItemIndex])
TypeError : 'NoneType' object is not callable
|
TypeError
|
def __init__(
self,
preprocess_net: nn.Module,
action_shape: Sequence[int],
hidden_sizes: Sequence[int] = (),
max_action: float = 1.0,
device: Union[str, int, torch.device] = "cpu",
preprocess_net_output_dim: Optional[int] = None,
) -> None:
super().__init__()
self.device = device
self.preprocess = preprocess_net
self.output_dim = np.prod(action_shape)
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.last = MLP(input_dim, self.output_dim, hidden_sizes, device=self.device)
self._max = max_action
|
def __init__(
self,
preprocess_net: nn.Module,
action_shape: Sequence[int],
hidden_sizes: Sequence[int] = (),
max_action: float = 1.0,
device: Union[str, int, torch.device] = "cpu",
preprocess_net_output_dim: Optional[int] = None,
) -> None:
super().__init__()
self.device = device
self.preprocess = preprocess_net
self.output_dim = np.prod(action_shape)
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.last = MLP(input_dim, self.output_dim, hidden_sizes)
self._max = max_action
|
https://github.com/thu-ml/tianshou/issues/286
|
% python test/discrete/test_sac.py (git)-[master]-
tianshou/data/buffer.py:348: UserWarning: ListReplayBuffer will be replaced in version 0.4.0.
warnings.warn("ListReplayBuffer will be replaced in version 0.4.0.")
Epoch #1: 0%| | 0/1000 [00:00<?, ?it/s]
Traceback (most recent call last):
File "test/discrete/test_sac.py", line 121, in <module>
test_discrete_sac()
File "test/discrete/test_sac.py", line 104, in test_discrete_sac
result = offpolicy_trainer(
File "tianshou/trainer/offpolicy.py", line 92, in offpolicy_trainer
result = train_collector.collect(n_step=collect_per_step)
File "tianshou/data/collector.py", line 270, in collect
self.data.act = to_numpy(result.act)
File "tianshou/data/utils/converter.py", line 18, in to_numpy
return x.detach().cpu().numpy()
RuntimeError: CUDA error: an illegal memory access was encountered
|
RuntimeError
|
def __init__(
self,
preprocess_net: nn.Module,
hidden_sizes: Sequence[int] = (),
device: Union[str, int, torch.device] = "cpu",
preprocess_net_output_dim: Optional[int] = None,
) -> None:
super().__init__()
self.device = device
self.preprocess = preprocess_net
self.output_dim = 1
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.last = MLP(input_dim, 1, hidden_sizes, device=self.device)
|
def __init__(
self,
preprocess_net: nn.Module,
hidden_sizes: Sequence[int] = (),
device: Union[str, int, torch.device] = "cpu",
preprocess_net_output_dim: Optional[int] = None,
) -> None:
super().__init__()
self.device = device
self.preprocess = preprocess_net
self.output_dim = 1
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.last = MLP(input_dim, 1, hidden_sizes)
|
https://github.com/thu-ml/tianshou/issues/286
|
% python test/discrete/test_sac.py (git)-[master]-
tianshou/data/buffer.py:348: UserWarning: ListReplayBuffer will be replaced in version 0.4.0.
warnings.warn("ListReplayBuffer will be replaced in version 0.4.0.")
Epoch #1: 0%| | 0/1000 [00:00<?, ?it/s]
Traceback (most recent call last):
File "test/discrete/test_sac.py", line 121, in <module>
test_discrete_sac()
File "test/discrete/test_sac.py", line 104, in test_discrete_sac
result = offpolicy_trainer(
File "tianshou/trainer/offpolicy.py", line 92, in offpolicy_trainer
result = train_collector.collect(n_step=collect_per_step)
File "tianshou/data/collector.py", line 270, in collect
self.data.act = to_numpy(result.act)
File "tianshou/data/utils/converter.py", line 18, in to_numpy
return x.detach().cpu().numpy()
RuntimeError: CUDA error: an illegal memory access was encountered
|
RuntimeError
|
def __init__(
self,
preprocess_net: nn.Module,
action_shape: Sequence[int],
hidden_sizes: Sequence[int] = (),
max_action: float = 1.0,
device: Union[str, int, torch.device] = "cpu",
unbounded: bool = False,
conditioned_sigma: bool = False,
preprocess_net_output_dim: Optional[int] = None,
) -> None:
super().__init__()
self.preprocess = preprocess_net
self.device = device
self.output_dim = np.prod(action_shape)
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.mu = MLP(input_dim, self.output_dim, hidden_sizes, device=self.device)
self._c_sigma = conditioned_sigma
if conditioned_sigma:
self.sigma = MLP(input_dim, self.output_dim, hidden_sizes, device=self.device)
else:
self.sigma_param = nn.Parameter(torch.zeros(self.output_dim, 1))
self._max = max_action
self._unbounded = unbounded
|
def __init__(
self,
preprocess_net: nn.Module,
action_shape: Sequence[int],
hidden_sizes: Sequence[int] = (),
max_action: float = 1.0,
device: Union[str, int, torch.device] = "cpu",
unbounded: bool = False,
conditioned_sigma: bool = False,
preprocess_net_output_dim: Optional[int] = None,
) -> None:
super().__init__()
self.preprocess = preprocess_net
self.device = device
self.output_dim = np.prod(action_shape)
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.mu = MLP(input_dim, self.output_dim, hidden_sizes)
self._c_sigma = conditioned_sigma
if conditioned_sigma:
self.sigma = MLP(input_dim, self.output_dim, hidden_sizes)
else:
self.sigma_param = nn.Parameter(torch.zeros(self.output_dim, 1))
self._max = max_action
self._unbounded = unbounded
|
https://github.com/thu-ml/tianshou/issues/286
|
% python test/discrete/test_sac.py (git)-[master]-
tianshou/data/buffer.py:348: UserWarning: ListReplayBuffer will be replaced in version 0.4.0.
warnings.warn("ListReplayBuffer will be replaced in version 0.4.0.")
Epoch #1: 0%| | 0/1000 [00:00<?, ?it/s]
Traceback (most recent call last):
File "test/discrete/test_sac.py", line 121, in <module>
test_discrete_sac()
File "test/discrete/test_sac.py", line 104, in test_discrete_sac
result = offpolicy_trainer(
File "tianshou/trainer/offpolicy.py", line 92, in offpolicy_trainer
result = train_collector.collect(n_step=collect_per_step)
File "tianshou/data/collector.py", line 270, in collect
self.data.act = to_numpy(result.act)
File "tianshou/data/utils/converter.py", line 18, in to_numpy
return x.detach().cpu().numpy()
RuntimeError: CUDA error: an illegal memory access was encountered
|
RuntimeError
|
def __init__(
self,
preprocess_net: nn.Module,
action_shape: Sequence[int],
hidden_sizes: Sequence[int] = (),
softmax_output: bool = True,
preprocess_net_output_dim: Optional[int] = None,
device: Union[str, int, torch.device] = "cpu",
) -> None:
super().__init__()
self.device = device
self.preprocess = preprocess_net
self.output_dim = np.prod(action_shape)
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.last = MLP(input_dim, self.output_dim, hidden_sizes, device=self.device)
self.softmax_output = softmax_output
|
def __init__(
self,
preprocess_net: nn.Module,
action_shape: Sequence[int],
hidden_sizes: Sequence[int] = (),
softmax_output: bool = True,
preprocess_net_output_dim: Optional[int] = None,
) -> None:
super().__init__()
self.preprocess = preprocess_net
self.output_dim = np.prod(action_shape)
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.last = MLP(input_dim, self.output_dim, hidden_sizes)
self.softmax_output = softmax_output
|
https://github.com/thu-ml/tianshou/issues/286
|
% python test/discrete/test_sac.py (git)-[master]-
tianshou/data/buffer.py:348: UserWarning: ListReplayBuffer will be replaced in version 0.4.0.
warnings.warn("ListReplayBuffer will be replaced in version 0.4.0.")
Epoch #1: 0%| | 0/1000 [00:00<?, ?it/s]
Traceback (most recent call last):
File "test/discrete/test_sac.py", line 121, in <module>
test_discrete_sac()
File "test/discrete/test_sac.py", line 104, in test_discrete_sac
result = offpolicy_trainer(
File "tianshou/trainer/offpolicy.py", line 92, in offpolicy_trainer
result = train_collector.collect(n_step=collect_per_step)
File "tianshou/data/collector.py", line 270, in collect
self.data.act = to_numpy(result.act)
File "tianshou/data/utils/converter.py", line 18, in to_numpy
return x.detach().cpu().numpy()
RuntimeError: CUDA error: an illegal memory access was encountered
|
RuntimeError
|
def __init__(
self,
preprocess_net: nn.Module,
hidden_sizes: Sequence[int] = (),
last_size: int = 1,
preprocess_net_output_dim: Optional[int] = None,
device: Union[str, int, torch.device] = "cpu",
) -> None:
super().__init__()
self.device = device
self.preprocess = preprocess_net
self.output_dim = last_size
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.last = MLP(input_dim, last_size, hidden_sizes, device=self.device)
|
def __init__(
self,
preprocess_net: nn.Module,
hidden_sizes: Sequence[int] = (),
last_size: int = 1,
preprocess_net_output_dim: Optional[int] = None,
) -> None:
super().__init__()
self.preprocess = preprocess_net
self.output_dim = last_size
input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
self.last = MLP(input_dim, last_size, hidden_sizes)
|
https://github.com/thu-ml/tianshou/issues/286
|
% python test/discrete/test_sac.py (git)-[master]-
tianshou/data/buffer.py:348: UserWarning: ListReplayBuffer will be replaced in version 0.4.0.
warnings.warn("ListReplayBuffer will be replaced in version 0.4.0.")
Epoch #1: 0%| | 0/1000 [00:00<?, ?it/s]
Traceback (most recent call last):
File "test/discrete/test_sac.py", line 121, in <module>
test_discrete_sac()
File "test/discrete/test_sac.py", line 104, in test_discrete_sac
result = offpolicy_trainer(
File "tianshou/trainer/offpolicy.py", line 92, in offpolicy_trainer
result = train_collector.collect(n_step=collect_per_step)
File "tianshou/data/collector.py", line 270, in collect
self.data.act = to_numpy(result.act)
File "tianshou/data/utils/converter.py", line 18, in to_numpy
return x.detach().cpu().numpy()
RuntimeError: CUDA error: an illegal memory access was encountered
|
RuntimeError
|
def post_process_fn(
self, batch: Batch, buffer: ReplayBuffer, indice: np.ndarray
) -> None:
"""Post-process the data from the provided replay buffer.
Typical usage is to update the sampling weight in prioritized
experience replay. Used in :meth:`update`.
"""
if hasattr(buffer, "update_weight") and hasattr(batch, "weight"):
buffer.update_weight(indice, batch.weight)
|
def post_process_fn(
self, batch: Batch, buffer: ReplayBuffer, indice: np.ndarray
) -> None:
"""Post-process the data from the provided replay buffer.
Typical usage is to update the sampling weight in prioritized
experience replay. Used in :meth:`update`.
"""
if isinstance(buffer, PrioritizedReplayBuffer) and hasattr(batch, "weight"):
buffer.update_weight(indice, batch.weight)
|
https://github.com/thu-ml/tianshou/issues/215
|
Epoch #1: 0%| | 0/1 [00:21<?, ?it/s]
Traceback (most recent call last):
File "D:/PycharmProjects/Stable-BaselineTrading/Tianshou/TD3.py", line 85, in <module>
save_fn=lambda p: torch.save(p.state_dict(), save_dir))
File "D:\PycharmProjects\Stable-BaselineTrading\Tianshou\Trainer\offpolicy.py", line 110, in offpolicy_trainer
losses = policy.update(batch_size, train_collector.buffer)
File "C:\Users\zkx74\Anaconda3\envs\RL\lib\site-packages\tianshou\policy\base.py", line 147, in update
result = self.learn(batch, *args, **kwargs)
File "C:\Users\zkx74\Anaconda3\envs\RL\lib\site-packages\tianshou\policy\modelfree\td3.py", line 123, in learn
critic1_loss = (td1.pow(2) * weight).mean()
File "C:\Users\zkx74\Anaconda3\envs\RL\lib\site-packages\torch\tensor.py", line 480, in __array__
return self.numpy()
TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
|
TypeError
|
def compute_nstep_return(
batch: Batch,
buffer: ReplayBuffer,
indice: np.ndarray,
target_q_fn: Callable[[ReplayBuffer, np.ndarray], torch.Tensor],
gamma: float = 0.99,
n_step: int = 1,
rew_norm: bool = False,
) -> Batch:
r"""Compute n-step return for Q-learning targets.
.. math::
G_t = \sum_{i = t}^{t + n - 1} \gamma^{i - t}(1 - d_i)r_i +
\gamma^n (1 - d_{t + n}) Q_{\mathrm{target}}(s_{t + n})
where :math:`\gamma` is the discount factor,
:math:`\gamma \in [0, 1]`, :math:`d_t` is the done flag of step
:math:`t`.
:param batch: a data batch, which is equal to buffer[indice].
:type batch: :class:`~tianshou.data.Batch`
:param buffer: a data buffer which contains several full-episode data
chronologically.
:type buffer: :class:`~tianshou.data.ReplayBuffer`
:param indice: sampled timestep.
:type indice: numpy.ndarray
:param function target_q_fn: a function receives :math:`t+n-1` step's
data and compute target Q value.
:param float gamma: the discount factor, should be in [0, 1], defaults
to 0.99.
:param int n_step: the number of estimation step, should be an int
greater than 0, defaults to 1.
:param bool rew_norm: normalize the reward to Normal(0, 1), defaults
to False.
:return: a Batch. The result will be stored in batch.returns as a
torch.Tensor with shape (bsz, ).
"""
rew = buffer.rew
if rew_norm:
bfr = rew[: min(len(buffer), 1000)] # avoid large buffer
mean, std = bfr.mean(), bfr.std()
if np.isclose(std, 0, 1e-2):
mean, std = 0.0, 1.0
else:
mean, std = 0.0, 1.0
buf_len = len(buffer)
terminal = (indice + n_step - 1) % buf_len
target_q_torch = target_q_fn(buffer, terminal).flatten() # (bsz, )
target_q = to_numpy(target_q_torch)
target_q = _nstep_return(
rew, buffer.done, target_q, indice, gamma, n_step, len(buffer), mean, std
)
batch.returns = to_torch_as(target_q, target_q_torch)
if hasattr(batch, "weight"): # prio buffer update
batch.weight = to_torch_as(batch.weight, target_q_torch)
return batch
|
def compute_nstep_return(
batch: Batch,
buffer: ReplayBuffer,
indice: np.ndarray,
target_q_fn: Callable[[ReplayBuffer, np.ndarray], torch.Tensor],
gamma: float = 0.99,
n_step: int = 1,
rew_norm: bool = False,
) -> Batch:
r"""Compute n-step return for Q-learning targets.
.. math::
G_t = \sum_{i = t}^{t + n - 1} \gamma^{i - t}(1 - d_i)r_i +
\gamma^n (1 - d_{t + n}) Q_{\mathrm{target}}(s_{t + n})
where :math:`\gamma` is the discount factor,
:math:`\gamma \in [0, 1]`, :math:`d_t` is the done flag of step
:math:`t`.
:param batch: a data batch, which is equal to buffer[indice].
:type batch: :class:`~tianshou.data.Batch`
:param buffer: a data buffer which contains several full-episode data
chronologically.
:type buffer: :class:`~tianshou.data.ReplayBuffer`
:param indice: sampled timestep.
:type indice: numpy.ndarray
:param function target_q_fn: a function receives :math:`t+n-1` step's
data and compute target Q value.
:param float gamma: the discount factor, should be in [0, 1], defaults
to 0.99.
:param int n_step: the number of estimation step, should be an int
greater than 0, defaults to 1.
:param bool rew_norm: normalize the reward to Normal(0, 1), defaults
to False.
:return: a Batch. The result will be stored in batch.returns as a
torch.Tensor with shape (bsz, ).
"""
rew = buffer.rew
if rew_norm:
bfr = rew[: min(len(buffer), 1000)] # avoid large buffer
mean, std = bfr.mean(), bfr.std()
if np.isclose(std, 0, 1e-2):
mean, std = 0.0, 1.0
else:
mean, std = 0.0, 1.0
buf_len = len(buffer)
terminal = (indice + n_step - 1) % buf_len
target_q_torch = target_q_fn(buffer, terminal).flatten() # (bsz, )
target_q = to_numpy(target_q_torch)
target_q = _nstep_return(
rew, buffer.done, target_q, indice, gamma, n_step, len(buffer), mean, std
)
batch.returns = to_torch_as(target_q, target_q_torch)
# prio buffer update
if isinstance(buffer, PrioritizedReplayBuffer):
batch.weight = to_torch_as(batch.weight, target_q_torch)
return batch
|
https://github.com/thu-ml/tianshou/issues/215
|
Epoch #1: 0%| | 0/1 [00:21<?, ?it/s]
Traceback (most recent call last):
File "D:/PycharmProjects/Stable-BaselineTrading/Tianshou/TD3.py", line 85, in <module>
save_fn=lambda p: torch.save(p.state_dict(), save_dir))
File "D:\PycharmProjects\Stable-BaselineTrading\Tianshou\Trainer\offpolicy.py", line 110, in offpolicy_trainer
losses = policy.update(batch_size, train_collector.buffer)
File "C:\Users\zkx74\Anaconda3\envs\RL\lib\site-packages\tianshou\policy\base.py", line 147, in update
result = self.learn(batch, *args, **kwargs)
File "C:\Users\zkx74\Anaconda3\envs\RL\lib\site-packages\tianshou\policy\modelfree\td3.py", line 123, in learn
critic1_loss = (td1.pow(2) * weight).mean()
File "C:\Users\zkx74\Anaconda3\envs\RL\lib\site-packages\torch\tensor.py", line 480, in __array__
return self.numpy()
TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
|
TypeError
|
def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, float]:
weight = batch.pop("weight", 1.0)
target_q = batch.returns.flatten()
act = to_torch(batch.act[:, np.newaxis], device=target_q.device, dtype=torch.long)
# critic 1
current_q1 = self.critic1(batch.obs).gather(1, act).flatten()
td1 = current_q1 - target_q
critic1_loss = (td1.pow(2) * weight).mean()
self.critic1_optim.zero_grad()
critic1_loss.backward()
self.critic1_optim.step()
# critic 2
current_q2 = self.critic2(batch.obs).gather(1, act).flatten()
td2 = current_q2 - target_q
critic2_loss = (td2.pow(2) * weight).mean()
self.critic2_optim.zero_grad()
critic2_loss.backward()
self.critic2_optim.step()
batch.weight = (td1 + td2) / 2.0 # prio-buffer
# actor
dist = self(batch).dist
entropy = dist.entropy()
with torch.no_grad():
current_q1a = self.critic1(batch.obs)
current_q2a = self.critic2(batch.obs)
q = torch.min(current_q1a, current_q2a)
actor_loss = -(self._alpha * entropy + (dist.probs * q).sum(dim=-1)).mean()
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
if self._is_auto_alpha:
log_prob = -entropy.detach() + self._target_entropy
alpha_loss = -(self._log_alpha * log_prob).mean()
self._alpha_optim.zero_grad()
alpha_loss.backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.detach().exp()
self.sync_weight()
result = {
"loss/actor": actor_loss.item(),
"loss/critic1": critic1_loss.item(),
"loss/critic2": critic2_loss.item(),
}
if self._is_auto_alpha:
result["loss/alpha"] = alpha_loss.item()
result["alpha"] = self._alpha.item() # type: ignore
return result
|
def learn(self, batch: Batch, **kwargs: Any) -> Dict[str, float]:
weight = batch.pop("weight", 1.0)
target_q = batch.returns.flatten()
act = to_torch(batch.act[:, np.newaxis], device=target_q.device, dtype=torch.long)
# critic 1
current_q1 = self.critic1(batch.obs).gather(1, act).flatten()
td1 = current_q1 - target_q
critic1_loss = (td1.pow(2) * weight).mean()
self.critic1_optim.zero_grad()
critic1_loss.backward()
self.critic1_optim.step()
# critic 2
current_q2 = self.critic2(batch.obs).gather(1, act).flatten()
td2 = current_q2 - target_q
critic2_loss = (td2.pow(2) * weight).mean()
self.critic2_optim.zero_grad()
critic2_loss.backward()
self.critic2_optim.step()
batch.weight = (td1 + td2) / 2.0 # prio-buffer
# actor
dist = self(batch).dist
entropy = dist.entropy()
with torch.no_grad():
current_q1a = self.critic1(batch.obs)
current_q2a = self.critic2(batch.obs)
q = torch.min(current_q1a, current_q2a)
actor_loss = -(self._alpha * entropy + (dist.probs * q).sum(dim=-1)).mean()
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
if self._is_auto_alpha:
log_prob = entropy.detach() - self._target_entropy
alpha_loss = (self._log_alpha * log_prob).mean()
self._alpha_optim.zero_grad()
alpha_loss.backward()
self._alpha_optim.step()
self._alpha = self._log_alpha.detach().exp()
self.sync_weight()
result = {
"loss/actor": actor_loss.item(),
"loss/critic1": critic1_loss.item(),
"loss/critic2": critic2_loss.item(),
}
if self._is_auto_alpha:
result["loss/alpha"] = alpha_loss.item()
result["alpha"] = self._alpha.item() # type: ignore
return result
|
https://github.com/thu-ml/tianshou/issues/215
|
Epoch #1: 0%| | 0/1 [00:21<?, ?it/s]
Traceback (most recent call last):
File "D:/PycharmProjects/Stable-BaselineTrading/Tianshou/TD3.py", line 85, in <module>
save_fn=lambda p: torch.save(p.state_dict(), save_dir))
File "D:\PycharmProjects\Stable-BaselineTrading\Tianshou\Trainer\offpolicy.py", line 110, in offpolicy_trainer
losses = policy.update(batch_size, train_collector.buffer)
File "C:\Users\zkx74\Anaconda3\envs\RL\lib\site-packages\tianshou\policy\base.py", line 147, in update
result = self.learn(batch, *args, **kwargs)
File "C:\Users\zkx74\Anaconda3\envs\RL\lib\site-packages\tianshou\policy\modelfree\td3.py", line 123, in learn
critic1_loss = (td1.pow(2) * weight).mean()
File "C:\Users\zkx74\Anaconda3\envs\RL\lib\site-packages\torch\tensor.py", line 480, in __array__
return self.numpy()
TypeError: can't convert cuda:0 device type tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
|
TypeError
|
def learn(self, batch, batch_size=None, repeat=1, **kwargs):
self._batch = batch_size
r = batch.returns
if self._rew_norm and r.std() > self.__eps:
batch.returns = (r - r.mean()) / r.std()
losses, actor_losses, vf_losses, ent_losses = [], [], [], []
for _ in range(repeat):
for b in batch.split(batch_size):
self.optim.zero_grad()
dist = self(b).dist
v = self.critic(b.obs)
a = torch.tensor(b.act, device=v.device)
r = torch.tensor(b.returns, device=v.device)
a_loss = -(dist.log_prob(a) * (r - v).detach()).mean()
vf_loss = F.mse_loss(r[:, None], v)
ent_loss = dist.entropy().mean()
loss = a_loss + self._w_vf * vf_loss - self._w_ent * ent_loss
loss.backward()
if self._grad_norm:
nn.utils.clip_grad_norm_(
list(self.actor.parameters()) + list(self.critic.parameters()),
max_norm=self._grad_norm,
)
self.optim.step()
actor_losses.append(a_loss.item())
vf_losses.append(vf_loss.item())
ent_losses.append(ent_loss.item())
losses.append(loss.item())
return {
"loss": losses,
"loss/actor": actor_losses,
"loss/vf": vf_losses,
"loss/ent": ent_losses,
}
|
def learn(self, batch, batch_size=None, repeat=1, **kwargs):
self._batch = batch_size
r = batch.returns
if self._rew_norm and r.std() > self.__eps:
batch.returns = (r - r.mean()) / r.std()
losses, actor_losses, vf_losses, ent_losses = [], [], [], []
for _ in range(repeat):
for b in batch.split(batch_size):
self.optim.zero_grad()
dist = self(b).dist
v = self.critic(b.obs)
a = torch.tensor(b.act, device=v.device)
r = torch.tensor(b.returns, device=v.device)
a_loss = -(dist.log_prob(a) * (r - v).detach()).mean()
vf_loss = F.mse_loss(r[:, None], v)
ent_loss = dist.entropy().mean()
loss = a_loss + self._w_vf * vf_loss - self._w_ent * ent_loss
loss.backward()
if self._grad_norm:
nn.utils.clip_grad_norm_(
self.model.parameters(), max_norm=self._grad_norm
)
self.optim.step()
actor_losses.append(a_loss.item())
vf_losses.append(vf_loss.item())
ent_losses.append(ent_loss.item())
losses.append(loss.item())
return {
"loss": losses,
"loss/actor": actor_losses,
"loss/vf": vf_losses,
"loss/ent": ent_losses,
}
|
https://github.com/thu-ml/tianshou/issues/46
|
python test/discrete/test_a2c_with_il.py --max-grad-norm 1
Epoch #1: 0%| | 0/1000 [00:00<?, ?it/s]
Traceback (most recent call last):
File "test/discrete/test_a2c_with_il.py", line 137, in <module>
test_a2c()
File "test/discrete/test_a2c_with_il.py", line 98, in test_a2c
writer=writer)
File "/research/dept6/zlhe/miniconda3/envs/fp/lib/python3.7/site-packages/tianshou/trainer/onpolicy.py", line 89, in onpolicy_trainer
train_collector.sample(0), batch_size, repeat_per_collect)
File "/research/dept6/zlhe/miniconda3/envs/fp/lib/python3.7/site-packages/tianshou/policy/modelfree/a2c.py", line 101, in learn
self.model.parameters(), max_norm=self._grad_norm)
AttributeError: 'NoneType' object has no attribute 'parameters'
|
AttributeError
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = KinectWsjMixDataset(conf["test_dir"], n_src=conf["n_src"], segment=None)
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources, noises = tensors_to_device(test_set[idx], device=model_device)
mix = mix[..., 0]
sources = sources[..., 0]
# noise = noise[..., 0]
if conf["train_conf"]["training"]["loss_alpha"] == 1:
# If Deep clustering only, use DC masks.
est_sources, dic_out = model.dc_head_separate(mix[None, None])
else:
# If Chimera, use mask-inference head masks
est_sources, dic_out = model.separate(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = KinectWsjMixDataset(conf["test_dir"], n_src=conf["n_src"], segment=None)
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources, noises = tensors_to_device(test_set[idx], device=model_device)
mix = mix[..., 0]
sources = sources[..., 0]
# noise = noise[..., 0]
if conf["train_conf"]["training"]["loss_alpha"] == 1:
# If Deep clustering only, use DC masks.
est_sources, dic_out = model.dc_head_separate(mix[None, None])
else:
# If Chimera, use mask-inference head masks
est_sources, dic_out = model.separate(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = ConvTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = LibriMix(
csv_dir=conf["test_dir"],
task=conf["task"],
sample_rate=conf["sample_rate"],
n_src=conf["train_conf"]["data"]["n_src"],
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
eval_save_dir = os.path.join(conf["exp_dir"], conf["out_dir"])
ex_save_dir = os.path.join(eval_save_dir, "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix.unsqueeze(0))
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix.cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
# For each utterance, we get a dictionary with the mixture path,
# the input and output metrics
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mixture_path
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np, conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx), src, conf["sample_rate"]
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(eval_save_dir, "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(eval_save_dir, "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
os.makedirs(os.path.join(conf["exp_dir"], "publish_dir"), exist_ok=True)
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = ConvTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = LibriMix(
csv_dir=conf["test_dir"],
task=conf["task"],
sample_rate=conf["sample_rate"],
n_src=conf["train_conf"]["data"]["n_src"],
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
eval_save_dir = os.path.join(conf["exp_dir"], conf["out_dir"])
ex_save_dir = os.path.join(eval_save_dir, "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix.unsqueeze(0))
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix.cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
# For each utterance, we get a dictionary with the mixture path,
# the input and output metrics
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mixture_path
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np, conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx), src, conf["sample_rate"]
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(eval_save_dir, "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(eval_save_dir, "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
os.makedirs(os.path.join(conf["exp_dir"], "publish_dir"), exist_ok=True)
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = ConvTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
os.makedirs(os.path.join(conf["exp_dir"], "publish_dir"), exist_ok=True)
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = ConvTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
os.makedirs(os.path.join(conf["exp_dir"], "publish_dir"), exist_ok=True)
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = DPRNNTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = DPRNNTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model = load_best_separator_if_available(conf["train_conf"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.separator.n_sources,
segment=None,
normalize_audio=True,
)
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
cnt = 0
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix.unsqueeze(0))
min_len = min(est_sources.shape[-1], sources.shape[-1], mix.shape[-1])
est_sources = est_sources[..., :min_len]
mix, sources = mix[..., :min_len], sources[..., :min_len]
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
cnt += 1
if cnt > 50:
break
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
def main(conf):
model = load_best_separator_if_available(conf["train_conf"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.separator.n_sources,
segment=None,
normalize_audio=True,
)
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
cnt = 0
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix.unsqueeze(0))
min_len = min(est_sources.shape[-1], sources.shape[-1], mix.shape[-1])
est_sources = est_sources[..., :min_len]
mix, sources = mix[..., :min_len], sources[..., :min_len]
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
cnt += 1
if cnt > 50:
break
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamRDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamRDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = Wsj0mixDataset(conf["test_dir"], n_src=conf["n_src"], segment=None)
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
if conf["train_conf"]["training"]["loss_alpha"] == 1:
# If Deep clustering only, use DC masks.
est_sources, dic_out = model.dc_head_separate(mix[None, None])
else:
# If Chimera, use mask-inference head masks
est_sources, dic_out = model.separate(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze(0).cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = Wsj0mixDataset(conf["test_dir"], n_src=conf["n_src"], segment=None)
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
if conf["train_conf"]["training"]["loss_alpha"] == 1:
# If Deep clustering only, use DC masks.
est_sources, dic_out = model.dc_head_separate(mix[None, None])
else:
# If Chimera, use mask-inference head masks
est_sources, dic_out = model.separate(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np,
sources_np,
est_sources_np,
sample_rate=conf["sample_rate"],
metrics_list=compute_metrics,
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = ConvTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameterss()).device
test_set = LibriMix(
csv_dir=conf["test_dir"],
task=conf["task"],
sample_rate=conf["sample_rate"],
n_src=conf["train_conf"]["data"]["n_src"],
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
eval_save_dir = os.path.join(conf["exp_dir"], conf["out_dir"])
ex_save_dir = os.path.join(eval_save_dir, "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix.unsqueeze(0))
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix.cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
# For each utterance, we get a dictionary with the mixture path,
# the input and output metrics
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mixture_path
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np, conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx), src, conf["sample_rate"]
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(eval_save_dir, "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(eval_save_dir, "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = ConvTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameterss()).device
test_set = LibriMix(
csv_dir=conf["test_dir"],
task=conf["task"],
sample_rate=conf["sample_rate"],
n_src=conf["train_conf"]["data"]["n_src"],
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
eval_save_dir = os.path.join(conf["exp_dir"], conf["out_dir"])
ex_save_dir = os.path.join(eval_save_dir, "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix.unsqueeze(0))
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix.cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
# For each utterance, we get a dictionary with the mixture path,
# the input and output metrics
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mixture_path
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np, conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx), src, conf["sample_rate"]
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(eval_save_dir, "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(eval_save_dir, "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = ConvTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = ConvTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = DPRNNTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
def main(conf):
model_path = os.path.join(conf["exp_dir"], "best_model.pth")
model = DPRNNTasNet.from_pretrained(model_path)
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
model_dict = torch.load(model_path, map_location="cpu")
publishable = save_publishable(
os.path.join(conf["exp_dir"], "publish_dir"),
model_dict,
metrics=final_results,
train_conf=train_conf,
)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.masker.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model = load_best_separator_if_available(conf["train_conf"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.separator.n_sources,
segment=None,
normalize_audio=True,
)
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
cnt = 0
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix.unsqueeze(0))
min_len = min(est_sources.shape[-1], sources.shape[-1], mix.shape[-1])
est_sources = est_sources[..., :min_len]
mix, sources = mix[..., :min_len], sources[..., :min_len]
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
cnt += 1
if cnt > 50:
break
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
def main(conf):
model = load_best_separator_if_available(conf["train_conf"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.separator.n_sources,
segment=None,
normalize_audio=True,
)
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
cnt = 0
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix.unsqueeze(0))
min_len = min(est_sources.shape[-1], sources.shape[-1], mix.shape[-1])
est_sources = est_sources[..., :min_len]
mix, sources = mix[..., :min_len], sources[..., :min_len]
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
cnt += 1
if cnt > 50:
break
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamRDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
def main(conf):
model = load_best_model(conf["train_conf"], conf["exp_dir"])
# Handle device placement
if conf["use_gpu"]:
model.cuda()
model_device = next(model.parameters()).device
test_set = WhamRDataset(
conf["test_dir"],
conf["task"],
sample_rate=conf["sample_rate"],
nondefault_nsrc=model.n_src,
segment=None,
) # Uses all segment length
# Used to reorder sources only
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from="pw_mtx")
# Randomly choose the indexes of sentences to save.
ex_save_dir = os.path.join(conf["exp_dir"], "examples/")
if conf["n_save_ex"] == -1:
conf["n_save_ex"] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf["n_save_ex"])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
est_sources = model(mix[None, None])
loss, reordered_sources = loss_func(est_sources, sources[None], return_est=True)
mix_np = mix[None].cpu().data.numpy()
sources_np = sources.squeeze().cpu().data.numpy()
est_sources_np = reordered_sources.squeeze().cpu().data.numpy()
utt_metrics = get_metrics(
mix_np, sources_np, est_sources_np, sample_rate=conf["sample_rate"]
)
utt_metrics["mix_path"] = test_set.mix[idx][0]
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, "ex_{}/".format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0], conf["sample_rate"])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(
local_save_dir + "s{}.wav".format(src_idx + 1),
src,
conf["sample_rate"],
)
for src_idx, est_src in enumerate(est_sources_np):
sf.write(
local_save_dir + "s{}_estimate.wav".format(src_idx + 1),
est_src,
conf["sample_rate"],
)
# Write local metrics to the example folder.
with open(local_save_dir + "metrics.json", "w") as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = pd.DataFrame(series_list)
all_metrics_df.to_csv(os.path.join(conf["exp_dir"], "all_metrics.csv"))
# Print and save summary metrics
final_results = {}
for metric_name in compute_metrics:
input_metric_name = "input_" + metric_name
ldf = all_metrics_df[metric_name] - all_metrics_df[input_metric_name]
final_results[metric_name] = all_metrics_df[metric_name].mean()
final_results[metric_name + "_imp"] = ldf.mean()
print("Overall metrics :")
pprint(final_results)
with open(os.path.join(conf["exp_dir"], "final_metrics.json"), "w") as f:
json.dump(final_results, f, indent=0)
|
https://github.com/asteroid-team/asteroid/issues/139
|
Traceback (most recent call last):
File "eval.py", line 118, in <module>
main(arg_dic)
File "eval.py", line 78, in main
conf['sample_rate'])
File ".../lib/python3.7/site-packages/soundfile.py", line 313, in write
channels = data.shape[1]
IndexError: tuple index out of range
|
IndexError
|
def get_norm_adj_mat(self):
r"""Get the normalized interaction matrix of users and items.
Construct the square matrix from the training data and normalize it
using the laplace matrix.
.. math::
A_{hat} = D^{-0.5} \times A \times D^{-0.5}
Returns:
Sparse tensor of the normalized interaction matrix.
"""
# build adj matrix
A = sp.dok_matrix(
(self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32
)
inter_M = self.interaction_matrix
inter_M_t = self.interaction_matrix.transpose()
data_dict = dict(
zip(zip(inter_M.row, inter_M.col + self.n_users), [1] * inter_M.nnz)
)
data_dict.update(
dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col), [1] * inter_M_t.nnz))
)
A._update(data_dict)
# norm adj matrix
sumArr = (A > 0).sum(axis=1)
# add epsilon to avoid Devide by zero Warning
diag = np.array(sumArr.flatten())[0] + 1e-7
diag = np.power(diag, -0.5)
D = sp.diags(diag)
L = D * A * D
# covert norm_adj matrix to tensor
L = sp.coo_matrix(L)
row = L.row
col = L.col
i = torch.LongTensor([row, col])
data = torch.FloatTensor(L.data)
SparseL = torch.sparse.FloatTensor(i, data, torch.Size(L.shape))
return SparseL
|
def get_norm_adj_mat(self):
r"""Get the normalized interaction matrix of users and items.
Construct the square matrix from the training data and normalize it
using the laplace matrix.
.. math::
A_{hat} = D^{-0.5} \times A \times D^{-0.5}
Returns:
Sparse tensor of the normalized interaction matrix.
"""
# build adj matrix
A = sp.dok_matrix(
(self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32
)
A = A.tolil()
A[: self.n_users, self.n_users :] = self.interaction_matrix
A[self.n_users :, : self.n_users] = self.interaction_matrix.transpose()
A = A.todok()
# norm adj matrix
sumArr = (A > 0).sum(axis=1)
# add epsilon to avoid Devide by zero Warning
diag = np.array(sumArr.flatten())[0] + 1e-7
diag = np.power(diag, -0.5)
D = sp.diags(diag)
L = D * A * D
# covert norm_adj matrix to tensor
L = sp.coo_matrix(L)
row = L.row
col = L.col
i = torch.LongTensor([row, col])
data = torch.FloatTensor(L.data)
SparseL = torch.sparse.FloatTensor(i, data, torch.Size(L.shape))
return SparseL
|
https://github.com/RUCAIBox/RecBole/issues/519
|
20 Nov 23:58 INFO General Hyper Parameters:
gpu_id=0
use_gpu=True
seed=2020 [76/1356]
state=INFO
reproducibility=True
data_path=dataset/gowalla
Training Hyper Parameters:
checkpoint_dir=saved
epochs=300
train_batch_size=4
learner=adam
learning_rate=0.001
training_neg_sample_num=1
eval_step=1
stopping_step=10
Evaluation Hyper Parameters:
eval_setting=RO_RS,full
group_by_user=True
split_ratio=[0.8, 0.1, 0.1]
leave_one_num=2
real_time_process=True
metrics=Recall
topk=100
valid_metric=Recall@100
eval_batch_size=4
Dataset Hyper Parameters:
field_separator=
seq_separator=
USER_ID_FIELD=user_id
ITEM_ID_FIELD=item_id
RATING_FIELD=rating
LABEL_FIELD=label
threshold=None
NEG_PREFIX=neg_
load_col={'inter': ['user_id', 'item_id']}
unload_col=None
additional_feat_suffix=None
max_user_inter_num=None
min_user_inter_num=0
max_item_inter_num=None
min_item_inter_num=0
lowest_val=None
highest_val=None
equal_val=None
not_equal_val=None
drop_filter_field=True
fields_in_same_space=None
fill_nan=True
preload_weight=None
drop_preload_weight=True
normalize_field=None
normalize_all=True
ITEM_LIST_LENGTH_FIELD=item_length
LIST_SUFFIX=_list
MAX_ITEM_LIST_LENGTH=50
POSITION_FIELD=position_id
HEAD_ENTITY_ID_FIELD=head_id
TAIL_ENTITY_ID_FIELD=tail_id
RELATION_ID_FIELD=relation_id
ENTITY_ID_FIELD=entity_id
20 Nov 23:58 INFO gowalla
The number of users: 107093
Average actions of users: 37.17675456616741
The number of items: 1280970
Average actions of items: 3.1080635050496928
The number of inters: 3981333
The sparsity of the dataset: 99.99709779249932%
Remain Fields: ['user_id', 'item_id']
20 Nov 23:58 INFO Build [ModelType.GENERAL] DataLoader for [train] with format [InputType.PAIRWISE]
20 Nov 23:58 INFO Evaluation Setting:
Group by user_id
Ordering: {'strategy': 'shuffle'}
Splitting: {'strategy': 'by_ratio', 'ratios': [0.8, 0.1, 0.1]}
Negative Sampling: {'strategy': 'by', 'distribution': 'uniform', 'by': 1}
20 Nov 23:58 INFO batch_size = [[4]], shuffle = [True]
20 Nov 23:58 INFO Build [ModelType.GENERAL] DataLoader for [evaluation] with format [InputType.POINTWISE]
20 Nov 23:58 INFO Evaluation Setting:
Group by user_id
Ordering: {'strategy': 'shuffle'}
Splitting: {'strategy': 'by_ratio', 'ratios': [0.8, 0.1, 0.1]}
Negative Sampling: {'strategy': 'full', 'distribution': 'uniform'}
20 Nov 23:58 INFO batch_size = [[4, 4]], shuffle = [False]
20 Nov 23:58 WARNING Batch size is changed to 1280970
20 Nov 23:58 WARNING Batch size is changed to 1280970
Traceback (most recent call last):
File "run_recbole.py", line 25, in <module>
run_recbole(model=args.model, dataset=args.dataset, config_file_list=config_file_list)
File "/home/szumowskit1/workspace/RecBole/recbole/quick_start/quick_start.py", line 45, in run_recbole
model = get_model(config['model'])(config, train_data).to(config['device'])
File "/home/szumowskit1/workspace/RecBole/recbole/model/general_recommender/lightgcn.py", line 69, in __init__
self.norm_adj_matrix = self.get_norm_adj_mat().to(self.device)
File "/home/szumowskit1/workspace/RecBole/recbole/model/general_recommender/lightgcn.py", line 90, in get_norm_adj_mat
A[:self.n_users, self.n_users:] = self.interaction_matrix
File "/home/szumowskit1/.venv/recbole/lib/python3.7/site-packages/scipy/sparse/lil.py", line 333, in __setitem__
IndexMixin.__setitem__(self, key, x)
File "/home/szumowskit1/.venv/recbole/lib/python3.7/site-packages/scipy/sparse/_index.py", line 116, in __setitem__
self._set_arrayXarray_sparse(i, j, x)
File "/home/szumowskit1/.venv/recbole/lib/python3.7/site-packages/scipy/sparse/lil.py", line 319, in _set_arrayXarray_sparse
x = np.asarray(x.toarray(), dtype=self.dtype)
File "/home/szumowskit1/.venv/recbole/lib/python3.7/site-packages/scipy/sparse/coo.py", line 321, in toarray
B = self._process_toarray_args(order, out)
File "/home/szumowskit1/.venv/recbole/lib/python3.7/site-packages/scipy/sparse/base.py", line 1185, in _process_toarray_args
return np.zeros(self.shape, dtype=self.dtype, order=order)
MemoryError: Unable to allocate 511. GiB for an array with shape (107093, 1280970) and data type float32
|
MemoryError
|
def get_laplacian_matrix(self):
r"""Get the laplacian matrix of users and items.
.. math::
L = I - D^{-1} \times A
Returns:
Sparse tensor of the laplacian matrix.
"""
# build adj matrix
A = sp.dok_matrix(
(self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32
)
inter_M = self.interaction_matrix
inter_M_t = self.interaction_matrix.transpose()
data_dict = dict(
zip(zip(inter_M.row, inter_M.col + self.n_users), [1] * inter_M.nnz)
)
data_dict.update(
dict(zip(zip(inter_M_t.row + self.n_users, inter_M_t.col), [1] * inter_M_t.nnz))
)
A._update(data_dict)
# norm adj matrix
sumArr = (A > 0).sum(axis=1)
diag = np.array(sumArr.flatten())[0] + 1e-7
diag = np.power(diag, -1)
D = sp.diags(diag)
A_tilde = D * A
# covert norm_adj matrix to tensor
A_tilde = sp.coo_matrix(A_tilde)
row = A_tilde.row
col = A_tilde.col
i = torch.LongTensor([row, col])
data = torch.FloatTensor(A_tilde.data)
A_tilde = torch.sparse.FloatTensor(i, data, torch.Size(A_tilde.shape))
# generate laplace matrix
L = self.get_eye_mat(self.n_items + self.n_users) - A_tilde
return L
|
def get_laplacian_matrix(self):
r"""Get the laplacian matrix of users and items.
.. math::
L = I - D^{-1} \times A
Returns:
Sparse tensor of the laplacian matrix.
"""
# build adj matrix
A = sp.dok_matrix(
(self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32
)
A = A.tolil()
A[: self.n_users, self.n_users :] = self.interaction_matrix
A[self.n_users :, : self.n_users] = self.interaction_matrix.transpose()
A = A.todok()
# norm adj matrix
sumArr = (A > 0).sum(axis=1)
diag = np.array(sumArr.flatten())[0] + 1e-7
diag = np.power(diag, -1)
D = sp.diags(diag)
A_tilde = D * A
# covert norm_adj matrix to tensor
A_tilde = sp.coo_matrix(A_tilde)
row = A_tilde.row
col = A_tilde.col
i = torch.LongTensor([row, col])
data = torch.FloatTensor(A_tilde.data)
A_tilde = torch.sparse.FloatTensor(i, data, torch.Size(A_tilde.shape))
# generate laplace matrix
L = self.get_eye_mat(self.n_items + self.n_users) - A_tilde
return L
|
https://github.com/RUCAIBox/RecBole/issues/519
|
20 Nov 23:58 INFO General Hyper Parameters:
gpu_id=0
use_gpu=True
seed=2020 [76/1356]
state=INFO
reproducibility=True
data_path=dataset/gowalla
Training Hyper Parameters:
checkpoint_dir=saved
epochs=300
train_batch_size=4
learner=adam
learning_rate=0.001
training_neg_sample_num=1
eval_step=1
stopping_step=10
Evaluation Hyper Parameters:
eval_setting=RO_RS,full
group_by_user=True
split_ratio=[0.8, 0.1, 0.1]
leave_one_num=2
real_time_process=True
metrics=Recall
topk=100
valid_metric=Recall@100
eval_batch_size=4
Dataset Hyper Parameters:
field_separator=
seq_separator=
USER_ID_FIELD=user_id
ITEM_ID_FIELD=item_id
RATING_FIELD=rating
LABEL_FIELD=label
threshold=None
NEG_PREFIX=neg_
load_col={'inter': ['user_id', 'item_id']}
unload_col=None
additional_feat_suffix=None
max_user_inter_num=None
min_user_inter_num=0
max_item_inter_num=None
min_item_inter_num=0
lowest_val=None
highest_val=None
equal_val=None
not_equal_val=None
drop_filter_field=True
fields_in_same_space=None
fill_nan=True
preload_weight=None
drop_preload_weight=True
normalize_field=None
normalize_all=True
ITEM_LIST_LENGTH_FIELD=item_length
LIST_SUFFIX=_list
MAX_ITEM_LIST_LENGTH=50
POSITION_FIELD=position_id
HEAD_ENTITY_ID_FIELD=head_id
TAIL_ENTITY_ID_FIELD=tail_id
RELATION_ID_FIELD=relation_id
ENTITY_ID_FIELD=entity_id
20 Nov 23:58 INFO gowalla
The number of users: 107093
Average actions of users: 37.17675456616741
The number of items: 1280970
Average actions of items: 3.1080635050496928
The number of inters: 3981333
The sparsity of the dataset: 99.99709779249932%
Remain Fields: ['user_id', 'item_id']
20 Nov 23:58 INFO Build [ModelType.GENERAL] DataLoader for [train] with format [InputType.PAIRWISE]
20 Nov 23:58 INFO Evaluation Setting:
Group by user_id
Ordering: {'strategy': 'shuffle'}
Splitting: {'strategy': 'by_ratio', 'ratios': [0.8, 0.1, 0.1]}
Negative Sampling: {'strategy': 'by', 'distribution': 'uniform', 'by': 1}
20 Nov 23:58 INFO batch_size = [[4]], shuffle = [True]
20 Nov 23:58 INFO Build [ModelType.GENERAL] DataLoader for [evaluation] with format [InputType.POINTWISE]
20 Nov 23:58 INFO Evaluation Setting:
Group by user_id
Ordering: {'strategy': 'shuffle'}
Splitting: {'strategy': 'by_ratio', 'ratios': [0.8, 0.1, 0.1]}
Negative Sampling: {'strategy': 'full', 'distribution': 'uniform'}
20 Nov 23:58 INFO batch_size = [[4, 4]], shuffle = [False]
20 Nov 23:58 WARNING Batch size is changed to 1280970
20 Nov 23:58 WARNING Batch size is changed to 1280970
Traceback (most recent call last):
File "run_recbole.py", line 25, in <module>
run_recbole(model=args.model, dataset=args.dataset, config_file_list=config_file_list)
File "/home/szumowskit1/workspace/RecBole/recbole/quick_start/quick_start.py", line 45, in run_recbole
model = get_model(config['model'])(config, train_data).to(config['device'])
File "/home/szumowskit1/workspace/RecBole/recbole/model/general_recommender/lightgcn.py", line 69, in __init__
self.norm_adj_matrix = self.get_norm_adj_mat().to(self.device)
File "/home/szumowskit1/workspace/RecBole/recbole/model/general_recommender/lightgcn.py", line 90, in get_norm_adj_mat
A[:self.n_users, self.n_users:] = self.interaction_matrix
File "/home/szumowskit1/.venv/recbole/lib/python3.7/site-packages/scipy/sparse/lil.py", line 333, in __setitem__
IndexMixin.__setitem__(self, key, x)
File "/home/szumowskit1/.venv/recbole/lib/python3.7/site-packages/scipy/sparse/_index.py", line 116, in __setitem__
self._set_arrayXarray_sparse(i, j, x)
File "/home/szumowskit1/.venv/recbole/lib/python3.7/site-packages/scipy/sparse/lil.py", line 319, in _set_arrayXarray_sparse
x = np.asarray(x.toarray(), dtype=self.dtype)
File "/home/szumowskit1/.venv/recbole/lib/python3.7/site-packages/scipy/sparse/coo.py", line 321, in toarray
B = self._process_toarray_args(order, out)
File "/home/szumowskit1/.venv/recbole/lib/python3.7/site-packages/scipy/sparse/base.py", line 1185, in _process_toarray_args
return np.zeros(self.shape, dtype=self.dtype, order=order)
MemoryError: Unable to allocate 511. GiB for an array with shape (107093, 1280970) and data type float32
|
MemoryError
|
def __radd__(self, X):
"""Y.__radd__(X) <==> Y+X"""
return add(self, X)
|
def __radd__(self, X):
"""Y.__radd__(X) <==> Y+X"""
return add(X, self)
|
https://github.com/jonathf/chaospy/issues/25
|
import chaospy as cp
cp.Uniform() + 1
<chaospy.dist.operators.Add at 0x7fb40405e400>
# but this fails
1 + cp.Uniform()
Traceback (most recent call last):
File "chaospy/src/chaospy/dist/backend.py", line 372, in __radd__
return add(X, self)
File "chaospy/src/chaospy/dist/operators.py", line 134, in add
return Add(A=A, B=B)
File "chaospy/src/chaospy/dist/operators.py", line 34, in __init__
_length=length, _advance=True)
File "chaospy/src/chaospy/dist/backend.py", line 115, in __init__
self.dependencies = self.G.run(self.length, "dep")[0]
File "chaospy/src/chaospy/dist/graph.py", line 266, in run
out = self(self.root)
File "chaospy/src/chaospy/dist/graph.py", line 158, in __call__
return self._call(*args, **kwargs)
File "chaospy/src/chaospy/dist/graph.py", line 526, in dep_call
out = dist._dep(self)
File "chaospy/src/chaospy/dist/backend.py", line 345, in _dep
if len(self)==1:
TypeError: only integer arrays with one element can be converted to an index
|
TypeError
|
def __rmul__(self, X):
"""Y.__rmul__(X) <==> Y*X"""
return mul(self, X)
|
def __rmul__(self, X):
"""Y.__rmul__(X) <==> Y*X"""
return mul(X, self)
|
https://github.com/jonathf/chaospy/issues/25
|
import chaospy as cp
cp.Uniform() + 1
<chaospy.dist.operators.Add at 0x7fb40405e400>
# but this fails
1 + cp.Uniform()
Traceback (most recent call last):
File "chaospy/src/chaospy/dist/backend.py", line 372, in __radd__
return add(X, self)
File "chaospy/src/chaospy/dist/operators.py", line 134, in add
return Add(A=A, B=B)
File "chaospy/src/chaospy/dist/operators.py", line 34, in __init__
_length=length, _advance=True)
File "chaospy/src/chaospy/dist/backend.py", line 115, in __init__
self.dependencies = self.G.run(self.length, "dep")[0]
File "chaospy/src/chaospy/dist/graph.py", line 266, in run
out = self(self.root)
File "chaospy/src/chaospy/dist/graph.py", line 158, in __call__
return self._call(*args, **kwargs)
File "chaospy/src/chaospy/dist/graph.py", line 526, in dep_call
out = dist._dep(self)
File "chaospy/src/chaospy/dist/backend.py", line 345, in _dep
if len(self)==1:
TypeError: only integer arrays with one element can be converted to an index
|
TypeError
|
def run(receivers, args, find_receiver, _ignore):
assert receivers
if args.receiver:
receiver_name = args.receiver.lower()
receiver = find_receiver(receivers, receiver_name)
if not receiver:
raise Exception("no receiver found matching '%s'" % receiver_name)
else:
receiver = receivers[0]
assert receiver
receiver.status = _status.ReceiverStatus(receiver, lambda *args, **kwargs: None)
# check if it's necessary to set the notification flags
old_notification_flags = _hidpp10.get_notification_flags(receiver) or 0
if not (old_notification_flags & _hidpp10.NOTIFICATION_FLAG.wireless):
_hidpp10.set_notification_flags(
receiver, old_notification_flags | _hidpp10.NOTIFICATION_FLAG.wireless
)
# get all current devices
known_devices = [dev.number for dev in receiver]
class _HandleWithNotificationHook(int):
def notifications_hook(self, n):
nonlocal known_devices
assert n
if n.devnumber == 0xFF:
_notifications.process(receiver, n)
elif n.sub_id == 0x41 and len(n.data) == _base._SHORT_MESSAGE_SIZE - 4:
kd, known_devices = (
known_devices,
None,
) # only process one connection notification
if kd is not None:
if n.devnumber not in kd:
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
elif receiver.re_pairs:
del receiver[
n.devnumber
] # get rid of information on device re-paired away
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
timeout = 20 # seconds
receiver.handle = _HandleWithNotificationHook(receiver.handle)
receiver.set_lock(False, timeout=timeout)
print("Pairing: turn your new device on (timing out in", timeout, "seconds).")
# the lock-open notification may come slightly later, wait for it a bit
pairing_start = _timestamp()
patience = 5 # seconds
while receiver.status.lock_open or _timestamp() - pairing_start < patience:
n = _base.read(receiver.handle)
if n:
n = _base.make_notification(*n)
if n:
receiver.handle.notifications_hook(n)
if not (old_notification_flags & _hidpp10.NOTIFICATION_FLAG.wireless):
# only clear the flags if they weren't set before, otherwise a
# concurrently running Solaar app might stop working properly
_hidpp10.set_notification_flags(receiver, old_notification_flags)
if receiver.status.new_device:
dev = receiver.status.new_device
print(
"Paired device %d: %s (%s) [%s:%s]"
% (dev.number, dev.name, dev.codename, dev.wpid, dev.serial)
)
else:
error = receiver.status.get(_status.KEYS.ERROR)
if error:
raise Exception("pairing failed: %s" % error)
else:
print("Paired a device") # this is better than an error
|
def run(receivers, args, find_receiver, _ignore):
assert receivers
if args.receiver:
receiver_name = args.receiver.lower()
receiver = find_receiver(receiver_name)
if not receiver:
raise Exception("no receiver found matching '%s'" % receiver_name)
else:
receiver = receivers[0]
assert receiver
receiver.status = _status.ReceiverStatus(receiver, lambda *args, **kwargs: None)
# check if it's necessary to set the notification flags
old_notification_flags = _hidpp10.get_notification_flags(receiver) or 0
if not (old_notification_flags & _hidpp10.NOTIFICATION_FLAG.wireless):
_hidpp10.set_notification_flags(
receiver, old_notification_flags | _hidpp10.NOTIFICATION_FLAG.wireless
)
# get all current devices
known_devices = [dev.number for dev in receiver]
class _HandleWithNotificationHook(int):
def notifications_hook(self, n):
nonlocal known_devices
assert n
if n.devnumber == 0xFF:
_notifications.process(receiver, n)
elif n.sub_id == 0x41 and len(n.data) == _base._SHORT_MESSAGE_SIZE - 4:
kd, known_devices = (
known_devices,
None,
) # only process one connection notification
if kd is not None:
if n.devnumber not in kd:
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
elif receiver.re_pairs:
del receiver[
n.devnumber
] # get rid of information on device re-paired away
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
timeout = 20 # seconds
receiver.handle = _HandleWithNotificationHook(receiver.handle)
receiver.set_lock(False, timeout=timeout)
print("Pairing: turn your new device on (timing out in", timeout, "seconds).")
# the lock-open notification may come slightly later, wait for it a bit
pairing_start = _timestamp()
patience = 5 # seconds
while receiver.status.lock_open or _timestamp() - pairing_start < patience:
n = _base.read(receiver.handle)
if n:
n = _base.make_notification(*n)
if n:
receiver.handle.notifications_hook(n)
if not (old_notification_flags & _hidpp10.NOTIFICATION_FLAG.wireless):
# only clear the flags if they weren't set before, otherwise a
# concurrently running Solaar app might stop working properly
_hidpp10.set_notification_flags(receiver, old_notification_flags)
if receiver.status.new_device:
dev = receiver.status.new_device
print(
"Paired device %d: %s (%s) [%s:%s]"
% (dev.number, dev.name, dev.codename, dev.wpid, dev.serial)
)
else:
error = receiver.status.get(_status.KEYS.ERROR)
if error:
raise Exception("pairing failed: %s" % error)
else:
print("Paired a device") # this is better than an error
|
https://github.com/pwr-Solaar/Solaar/issues/1083
|
$ solaar pair xxxxxxxx
solaar: error: Traceback (most recent call last):
File "/usr/lib/python3.9/site-packages/solaar/cli/__init__.py", line 202, in run
m.run(c, args, _find_receiver, _find_device)
File "/usr/lib/python3.9/site-packages/solaar/cli/pair.py", line 35, in run
receiver = find_receiver(receiver_name)
TypeError: _find_receiver() missing 1 required positional argument: 'name'
|
TypeError
|
def _create_parser():
parser = _argparse.ArgumentParser(
prog=NAME.lower(),
add_help=False,
epilog="For details on individual actions, run `%s <action> --help`."
% NAME.lower(),
)
subparsers = parser.add_subparsers(
title="actions", help="optional action to perform"
)
sp = subparsers.add_parser("show", help="show information about devices")
sp.add_argument(
"device",
nargs="?",
default="all",
help="device to show information about; may be a device number (1..6), a serial number, "
'a substring of a device\'s name, or "all" (the default)',
)
sp.set_defaults(action="show")
sp = subparsers.add_parser("probe", help="probe a receiver (debugging use only)")
sp.add_argument(
"receiver",
nargs="?",
help="select receiver by name substring or serial number when more than one is present",
)
sp.set_defaults(action="probe")
sp = subparsers.add_parser(
"config",
help="read/write device-specific settings",
epilog="Please note that configuration only works on active devices.",
)
sp.add_argument(
"device",
help="device to configure; may be a device number (1..6), a serial number, "
"or a substring of a device's name",
)
sp.add_argument(
"setting",
nargs="?",
help="device-specific setting; leave empty to list available settings",
)
sp.add_argument(
"value_key",
nargs="?",
help="new value for the setting or key for keyed settings",
)
sp.add_argument(
"extra_subkey",
nargs="?",
help="value for keyed or subkey for subkeyed settings",
)
sp.add_argument("extra2", nargs="?", help="value for subkeyed settings")
sp.set_defaults(action="config")
sp = subparsers.add_parser(
"pair",
help="pair a new device",
epilog="The Logitech Unifying Receiver supports up to 6 paired devices at the same time.",
)
sp.add_argument(
"receiver",
nargs="?",
help="select receiver by name substring or serial number when more than one is present",
)
sp.set_defaults(action="pair")
sp = subparsers.add_parser("unpair", help="unpair a device")
sp.add_argument(
"device",
help="device to unpair; may be a device number (1..6), a serial number, "
"or a substring of a device's name.",
)
sp.set_defaults(action="unpair")
return parser, subparsers.choices
|
def _create_parser():
parser = _argparse.ArgumentParser(
prog=NAME.lower(),
add_help=False,
epilog="For details on individual actions, run `%s <action> --help`."
% NAME.lower(),
)
subparsers = parser.add_subparsers(
title="actions", help="optional action to perform"
)
sp = subparsers.add_parser("show", help="show information about devices")
sp.add_argument(
"device",
nargs="?",
default="all",
help="device to show information about; may be a device number (1..6), a serial, "
'a substring of a device\'s name, or "all" (the default)',
)
sp.set_defaults(action="show")
sp = subparsers.add_parser("probe", help="probe a receiver (debugging use only)")
sp.add_argument(
"receiver",
nargs="?",
help="select a certain receiver when more than one is present",
)
sp.set_defaults(action="probe")
sp = subparsers.add_parser(
"config",
help="read/write device-specific settings",
epilog="Please note that configuration only works on active devices.",
)
sp.add_argument(
"device",
help="device to configure; may be a device number (1..6), a device serial, "
"or at least 3 characters of a device's name",
)
sp.add_argument(
"setting",
nargs="?",
help="device-specific setting; leave empty to list available settings",
)
sp.add_argument(
"value_key",
nargs="?",
help="new value for the setting or key for keyed settings",
)
sp.add_argument(
"extra_subkey",
nargs="?",
help="value for keyed or subkey for subkeyed settings",
)
sp.add_argument("extra2", nargs="?", help="value for subkeyed settings")
sp.set_defaults(action="config")
sp = subparsers.add_parser(
"pair",
help="pair a new device",
epilog="The Logitech Unifying Receiver supports up to 6 paired devices at the same time.",
)
sp.add_argument(
"receiver",
nargs="?",
help="select a certain receiver when more than one is present",
)
sp.set_defaults(action="pair")
sp = subparsers.add_parser("unpair", help="unpair a device")
sp.add_argument(
"device",
help="device to unpair; may be a device number (1..6), a serial, "
"or a substring of a device's name.",
)
sp.set_defaults(action="unpair")
return parser, subparsers.choices
|
https://github.com/pwr-Solaar/Solaar/issues/1083
|
$ solaar pair xxxxxxxx
solaar: error: Traceback (most recent call last):
File "/usr/lib/python3.9/site-packages/solaar/cli/__init__.py", line 202, in run
m.run(c, args, _find_receiver, _find_device)
File "/usr/lib/python3.9/site-packages/solaar/cli/pair.py", line 35, in run
receiver = find_receiver(receiver_name)
TypeError: _find_receiver() missing 1 required positional argument: 'name'
|
TypeError
|
def process_notification(device, status, notification, feature):
if not x11:
return
global keys_down, key_down
key_down = None
# need to keep track of keys that are down to find a new key down
if feature == _F.REPROG_CONTROLS_V4 and notification.address == 0x00:
new_keys_down = _unpack("!4H", notification.data[:8])
for key in new_keys_down:
if key and key not in keys_down:
key_down = key
keys_down = new_keys_down
rules.evaluate(feature, notification, device, status, True)
|
def process_notification(device, status, notification, feature):
global keys_down, key_down
key_down = None
# need to keep track of keys that are down to find a new key down
if feature == _F.REPROG_CONTROLS_V4 and notification.address == 0x00:
new_keys_down = _unpack("!4H", notification.data[:8])
for key in new_keys_down:
if key and key not in keys_down:
key_down = key
keys_down = new_keys_down
rules.evaluate(feature, notification, device, status, True)
|
https://github.com/pwr-Solaar/Solaar/issues/1006
|
Nov 19 16:49:15 xxx solaar.desktop[4106058]: * Please make sure that you have an X server running, and that the DISPLAY environment variable is set correctly
Nov 19 16:49:15 xxx solaar.desktop[4106058]: Try one of the following resolutions:
Nov 19 16:49:15 xxx solaar.desktop[4106058]: ImportError: this platform is not supported: ('failed to acquire X connection: Can\'t connect to display ":0": b\'No protocol specifie>
Nov 19 16:49:15 xxx solaar.desktop[4106058]: raise ImportError('this platform is not supported: {}'.format(
Nov 19 16:49:15 xxx solaar.desktop[4106058]: File "/usr/local/lib/python3.8/dist-packages/pynput/_util/__init__.py", line 76, in backend
Nov 19 16:49:15 xxx solaar.desktop[4106058]: backend = backend(__name__)
Nov 19 16:49:15 xxx solaar.desktop[4106058]: File "/usr/local/lib/python3.8/dist-packages/pynput/keyboard/__init__.py", line 31, in <module>
Nov 19 16:49:15 xxx solaar.desktop[4106058]: from . import keyboard
Nov 19 16:49:15 xxx solaar.desktop[4106058]: File "/usr/local/lib/python3.8/dist-packages/pynput/__init__.py", line 40, in <module>
Nov 19 16:49:15 xxx solaar.desktop[4106058]: from pynput import keyboard as _keyboard
Nov 19 16:49:15 xxx solaar.desktop[4106058]: File "/usr/local/lib/python3.8/dist-packages/logitech_receiver/diversion.py", line 30, in <module>
Nov 19 16:49:15 xxx solaar.desktop[4106058]: from . import diversion as _diversion
Nov 19 16:49:15 xxx solaar.desktop[4106058]: File "/usr/local/lib/python3.8/dist-packages/logitech_receiver/notifications.py", line 30, in <module>
Nov 19 16:49:15 xxx solaar.desktop[4106058]: from logitech_receiver import notifications as _notifications
Nov 19 16:49:15 xxx solaar.desktop[4106058]: File "/usr/local/lib/python3.8/dist-packages/solaar/listener.py", line 32, in <module>
Nov 19 16:49:15 xxx solaar.desktop[4106058]: import solaar.listener as listener
Nov 19 16:49:15 xxx solaar.desktop[4106058]: File "/usr/local/lib/python3.8/dist-packages/solaar/gtk.py", line 153, in main
Nov 19 16:49:15 xxx solaar.desktop[4106058]: solaar: error: Traceback (most recent call last):
|
ImportError
|
def run(receivers, args, find_receiver, _ignore):
assert receivers
if args.receiver:
receiver_name = args.receiver.lower()
receiver = find_receiver(receiver_name)
if not receiver:
raise Exception("no receiver found matching '%s'" % receiver_name)
else:
receiver = receivers[0]
assert receiver
receiver.status = _status.ReceiverStatus(receiver, lambda *args, **kwargs: None)
# check if it's necessary to set the notification flags
old_notification_flags = _hidpp10.get_notification_flags(receiver) or 0
if not (old_notification_flags & _hidpp10.NOTIFICATION_FLAG.wireless):
_hidpp10.set_notification_flags(
receiver, old_notification_flags | _hidpp10.NOTIFICATION_FLAG.wireless
)
# get all current devices
known_devices = [dev.number for dev in receiver]
class _HandleWithNotificationHook(int):
def notifications_hook(self, n):
nonlocal known_devices
assert n
if n.devnumber == 0xFF:
_notifications.process(receiver, n)
elif n.sub_id == 0x41 and len(n.data) == _base._SHORT_MESSAGE_SIZE - 4:
kd, known_devices = (
known_devices,
None,
) # only process one connection notification
if kd is not None:
if n.devnumber not in kd:
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
elif receiver.re_pairs:
del receiver[
n.devnumber
] # get rid of information on device re-paired away
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
timeout = 20 # seconds
receiver.handle = _HandleWithNotificationHook(receiver.handle)
receiver.set_lock(False, timeout=timeout)
print("Pairing: turn your new device on (timing out in", timeout, "seconds).")
# the lock-open notification may come slightly later, wait for it a bit
pairing_start = _timestamp()
patience = 5 # seconds
while receiver.status.lock_open or _timestamp() - pairing_start < patience:
n = _base.read(receiver.handle)
if n:
n = _base.make_notification(*n)
if n:
receiver.handle.notifications_hook(n)
if not (old_notification_flags & _hidpp10.NOTIFICATION_FLAG.wireless):
# only clear the flags if they weren't set before, otherwise a
# concurrently running Solaar app might stop working properly
_hidpp10.set_notification_flags(receiver, old_notification_flags)
if receiver.status.new_device:
dev = receiver.status.new_device
print(
"Paired device %d: %s (%s) [%s:%s]"
% (dev.number, dev.name, dev.codename, dev.wpid, dev.serial)
)
else:
error = receiver.status.get(_status.KEYS.ERROR)
if error:
raise Exception("pairing failed: %s" % error)
else:
print("Paired a device") # this is better than an error
|
def run(receivers, args, find_receiver, _ignore):
assert receivers
if args.receiver:
receiver_name = args.receiver.lower()
receiver = find_receiver(receiver_name)
if not receiver:
raise Exception("no receiver found matching '%s'" % receiver_name)
else:
receiver = receivers[0]
assert receiver
receiver.status = _status.ReceiverStatus(receiver, lambda *args, **kwargs: None)
# check if it's necessary to set the notification flags
old_notification_flags = _hidpp10.get_notification_flags(receiver) or 0
if not (old_notification_flags & _hidpp10.NOTIFICATION_FLAG.wireless):
_hidpp10.set_notification_flags(
receiver, old_notification_flags | _hidpp10.NOTIFICATION_FLAG.wireless
)
# get all current devices
known_devices = [dev.number for dev in receiver]
class _HandleWithNotificationHook(int):
def notifications_hook(self, n):
nonlocal known_devices
assert n
if n.devnumber == 0xFF:
_notifications.process(receiver, n)
elif (
n.sub_id == 0x41
): # allow for other protocols! (was and n.address == 0x04)
kd, known_devices = (
known_devices,
None,
) # only process one connection notification
if kd is not None:
if n.devnumber not in kd:
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
elif receiver.re_pairs:
del receiver[
n.devnumber
] # get rid of information on device re-paired away
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
timeout = 20 # seconds
receiver.handle = _HandleWithNotificationHook(receiver.handle)
receiver.set_lock(False, timeout=timeout)
print("Pairing: turn your new device on (timing out in", timeout, "seconds).")
# the lock-open notification may come slightly later, wait for it a bit
pairing_start = _timestamp()
patience = 5 # seconds
while receiver.status.lock_open or _timestamp() - pairing_start < patience:
n = _base.read(receiver.handle)
if n:
n = _base.make_notification(*n)
if n:
receiver.handle.notifications_hook(n)
if not (old_notification_flags & _hidpp10.NOTIFICATION_FLAG.wireless):
# only clear the flags if they weren't set before, otherwise a
# concurrently running Solaar app might stop working properly
_hidpp10.set_notification_flags(receiver, old_notification_flags)
if receiver.status.new_device:
dev = receiver.status.new_device
print(
"Paired device %d: %s (%s) [%s:%s]"
% (dev.number, dev.name, dev.codename, dev.wpid, dev.serial)
)
else:
error = receiver.status.get(_status.KEYS.ERROR)
if error:
raise Exception("pairing failed: %s" % error)
else:
print("Paired a device") # this is better than an error
|
https://github.com/pwr-Solaar/Solaar/issues/958
|
solaar: error: Traceback (most recent call last):
File "/usr/share/solaar/lib/solaar/cli/__init__.py", line 175, in run
m.run(c, args, _find_receiver, _find_device)
File "/usr/share/solaar/lib/solaar/cli/pair.py", line 79, in run
receiver.handle.notifications_hook(n)
File "/usr/share/solaar/lib/solaar/cli/pair.py", line 59, in notifications_hook
receiver.status.new_device = receiver[n.devnumber]
File "/usr/share/solaar/lib/logitech_receiver/receiver.py", line 519, in __getitem__
raise IndexError(key)
IndexError: 0
|
IndexError
|
def notifications_hook(self, n):
nonlocal known_devices
assert n
if n.devnumber == 0xFF:
_notifications.process(receiver, n)
elif n.sub_id == 0x41 and len(n.data) == _base._SHORT_MESSAGE_SIZE - 4:
kd, known_devices = (
known_devices,
None,
) # only process one connection notification
if kd is not None:
if n.devnumber not in kd:
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
elif receiver.re_pairs:
del receiver[
n.devnumber
] # get rid of information on device re-paired away
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
|
def notifications_hook(self, n):
nonlocal known_devices
assert n
if n.devnumber == 0xFF:
_notifications.process(receiver, n)
elif n.sub_id == 0x41: # allow for other protocols! (was and n.address == 0x04)
kd, known_devices = (
known_devices,
None,
) # only process one connection notification
if kd is not None:
if n.devnumber not in kd:
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
elif receiver.re_pairs:
del receiver[
n.devnumber
] # get rid of information on device re-paired away
receiver.status.new_device = receiver.register_new_device(
n.devnumber, n
)
|
https://github.com/pwr-Solaar/Solaar/issues/958
|
solaar: error: Traceback (most recent call last):
File "/usr/share/solaar/lib/solaar/cli/__init__.py", line 175, in run
m.run(c, args, _find_receiver, _find_device)
File "/usr/share/solaar/lib/solaar/cli/pair.py", line 79, in run
receiver.handle.notifications_hook(n)
File "/usr/share/solaar/lib/solaar/cli/pair.py", line 59, in notifications_hook
receiver.status.new_device = receiver[n.devnumber]
File "/usr/share/solaar/lib/logitech_receiver/receiver.py", line 519, in __getitem__
raise IndexError(key)
IndexError: 0
|
IndexError
|
def _notifications_handler(self, n):
assert self.receiver
# if _log.isEnabledFor(_DEBUG):
# _log.debug("%s: handling %s", self.receiver, n)
if n.devnumber == 0xFF:
# a receiver notification
_notifications.process(self.receiver, n)
return
# a device notification
if not (0 < n.devnumber <= self.receiver.max_devices):
if _log.isEnabledFor(_WARNING):
_log.warning(
_(
"Unexpected device number (%s) in notification %s."
% (n.devnumber, n)
)
)
return
already_known = n.devnumber in self.receiver
# FIXME: hacky fix for kernel/hardware race condition
# If the device was just turned on or woken up from sleep, it may not
# be ready to receive commands. The "payload" bit of the wireless
# status notification seems to tell us this. If this is the case, we
# must wait a short amount of time to avoid causing a broken pipe
# error.
device_ready = not bool(ord(n.data[0:1]) & 0x80) or n.sub_id != 0x41
if not device_ready:
time.sleep(0.01)
if n.sub_id == 0x40 and not already_known:
return # disconnecting something that is not known - nothing to do
if n.sub_id == 0x41 and len(n.data) > _base._SHORT_MESSAGE_SIZE - 4:
# DJ pairing notification - ignore - hid++ 1.0 pairing notification is all that is needed
if _log.isEnabledFor(_INFO):
_log.info("ignoring DJ pairing notification", n)
return
elif n.sub_id == 0x41:
if not already_known:
dev = self.receiver.register_new_device(n.devnumber, n)
elif (
self.receiver.status.lock_open
and self.receiver.re_pairs
and not ord(n.data[0:1]) & 0x40
):
dev = self.receiver[n.devnumber]
del self.receiver[
n.devnumber
] # get rid of information on device re-paired away
self._status_changed(dev) # signal that this device has changed
dev = self.receiver.register_new_device(n.devnumber, n)
self.receiver.status.new_device = self.receiver[n.devnumber]
else:
dev = self.receiver[n.devnumber]
else:
dev = self.receiver[n.devnumber]
if not dev:
_log.warn(
"%s: received %s for invalid device %d: %r",
self.receiver,
n,
n.devnumber,
dev,
)
return
# Apply settings every time the device connects
if n.sub_id == 0x41:
if _log.isEnabledFor(_INFO):
_log.info("%s triggered new device %s (%s)", n, dev, dev.kind)
# If there are saved configs, bring the device's settings up-to-date.
# They will be applied when the device is marked as online.
configuration.attach_to(dev)
_status.attach_to(dev, self._status_changed)
# the receiver changed status as well
self._status_changed(self.receiver)
assert dev
assert dev.status is not None
_notifications.process(dev, n)
if self.receiver.status.lock_open and not already_known:
# this should be the first notification after a device was paired
assert n.sub_id == 0x41 and n.address == 0x04
if _log.isEnabledFor(_INFO):
_log.info("%s: pairing detected new device", self.receiver)
self.receiver.status.new_device = dev
elif dev.online is None:
dev.ping()
|
def _notifications_handler(self, n):
assert self.receiver
# if _log.isEnabledFor(_DEBUG):
# _log.debug("%s: handling %s", self.receiver, n)
if n.devnumber == 0xFF:
# a receiver notification
_notifications.process(self.receiver, n)
return
# a device notification
if not (0 < n.devnumber <= self.receiver.max_devices):
if _log.isEnabledFor(_WARNING):
_log.warning(
_(
"Unexpected device number (%s) in notification %s."
% (n.devnumber, n)
)
)
return
already_known = n.devnumber in self.receiver
# FIXME: hacky fix for kernel/hardware race condition
# If the device was just turned on or woken up from sleep, it may not
# be ready to receive commands. The "payload" bit of the wireless
# status notification seems to tell us this. If this is the case, we
# must wait a short amount of time to avoid causing a broken pipe
# error.
device_ready = not bool(ord(n.data[0:1]) & 0x80) or n.sub_id != 0x41
if not device_ready:
time.sleep(0.01)
if n.sub_id == 0x40 and not already_known:
return # disconnecting something that is not known - nothing to do
if n.sub_id == 0x41:
if not already_known:
dev = self.receiver.register_new_device(n.devnumber, n)
elif (
self.receiver.status.lock_open
and self.receiver.re_pairs
and not ord(n.data[0:1]) & 0x40
):
dev = self.receiver[n.devnumber]
del self.receiver[
n.devnumber
] # get rid of information on device re-paired away
self._status_changed(dev) # signal that this device has changed
dev = self.receiver.register_new_device(n.devnumber, n)
self.receiver.status.new_device = self.receiver[n.devnumber]
else:
dev = self.receiver[n.devnumber]
else:
dev = self.receiver[n.devnumber]
if not dev:
_log.warn(
"%s: received %s for invalid device %d: %r",
self.receiver,
n,
n.devnumber,
dev,
)
return
# Apply settings every time the device connects
if n.sub_id == 0x41:
if _log.isEnabledFor(_INFO):
_log.info("%s triggered new device %s (%s)", n, dev, dev.kind)
# If there are saved configs, bring the device's settings up-to-date.
# They will be applied when the device is marked as online.
configuration.attach_to(dev)
_status.attach_to(dev, self._status_changed)
# the receiver changed status as well
self._status_changed(self.receiver)
assert dev
assert dev.status is not None
_notifications.process(dev, n)
if self.receiver.status.lock_open and not already_known:
# this should be the first notification after a device was paired
assert n.sub_id == 0x41 and n.address == 0x04
if _log.isEnabledFor(_INFO):
_log.info("%s: pairing detected new device", self.receiver)
self.receiver.status.new_device = dev
elif dev.online is None:
dev.ping()
|
https://github.com/pwr-Solaar/Solaar/issues/876
|
idefix Solaar> bin/solaar -dd
17:57:28,196 INFO [MainThread] root: language en_US (UTF-8), translations path /home/local/SoftwareDownloads/Solaar/share/locale
17:57:28,251 DEBUG [MainThread] solaar.ui.tray: using AppIndicator3
17:57:28,263 INFO [MainThread] solaar.upower: connected to system dbus, watching for suspend/resume events
17:57:28,285 DEBUG [MainThread] solaar.ui: startup registered=True, remote=False
17:57:28,286 DEBUG [AsyncUI] solaar.tasks: started
17:57:28,286 INFO [MainThread] solaar.ui.notify: starting desktop notifications
17:57:28,290 DEBUG [MainThread] solaar.ui.icons: sys.path[0] = /home/local/SoftwareDownloads/Solaar/lib
17:57:28,291 DEBUG [MainThread] solaar.ui.icons: looking for icons in /home/local/SoftwareDownloads/Solaar/icons
17:57:28,291 DEBUG [MainThread] solaar.ui.icons: looking for icons in /home/local/SoftwareDownloads/Solaar/share/solaar/icons
17:57:28,291 DEBUG [MainThread] solaar.ui.icons: looking for icons in /home/pfps/.local/share/solaar/icons
17:57:28,291 DEBUG [MainThread] solaar.ui.icons: looking for icons in /home/local/SoftwareDownloads/Solaar/lib/share/solaar/icons
17:57:28,291 DEBUG [MainThread] solaar.ui.icons: looking for icons in /home/local/SoftwareDownloads/Solaar/share/solaar/icons
17:57:28,291 DEBUG [MainThread] solaar.ui.icons: looking for icons in /usr/local/share/solaar/icons
17:57:28,291 DEBUG [MainThread] solaar.ui.icons: looking for icons in /usr/share/solaar/icons
17:57:28,291 DEBUG [MainThread] solaar.ui.icons: icon theme paths: ['/home/local/SoftwareDownloads/Solaar/share/solaar/icons', '/home/local/SoftwareDownloads/Solaar/share/solaar/icons', '/home/pfps/.local/share/icons', '/home/pfps/.icons', '/usr/local/share/icons', '/usr/share/icons', '/usr/local/share/pixmaps', '/usr/share/pixmaps']
17:57:28,343 INFO [MainThread] solaar.listener: starting receiver listening threads
17:57:28,347 INFO [MainThread] solaar.listener: receiver event add DeviceInfo(path='/dev/hidraw0', vendor_id='046d', product_id='c52b', serial='', release=b'1210', manufacturer=b'Logitech', product=b'USB Receiver', interface=2, driver='logitech-djreceiver')
17:57:28,348 DEBUG [MainThread] logitech_receiver.base: (16) <= w[10 FF 83B5 030000]
17:57:28,349 DEBUG [MainThread] logitech_receiver.base: (16) => r[11 FF 83B5 031EB692A30106510000000000000000]
17:57:28,349 DEBUG [MainThread] logitech_receiver.base: (16) <= w[10 FF 80B2 000000]
17:57:28,351 DEBUG [MainThread] logitech_receiver.base: (16) => r[10 FF 8F80 B20300]
17:57:28,351 DEBUG [MainThread] logitech_receiver.base: (16) device 0xFF error on request {80B2}: 3 = invalid value
17:57:28,352 INFO [ReceiverListener:hidraw0] logitech_receiver.listener: started with <UnifyingReceiver(/dev/hidraw0,16)> (16)
17:57:28,352 INFO [ReceiverListener:hidraw0] solaar.listener: <UnifyingReceiver(/dev/hidraw0,16)>: notifications listener has started (16)
17:57:28,352 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) <= w[10 FF 8000 100900]
17:57:28,354 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) => r[10 FF 8000 000000]
17:57:28,354 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) <= w[10 FF 8100 000000]
17:57:28,357 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) => r[20 01 4101 5140040000000000000000]
17:57:28,359 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) => r[20 00 4102 0000000000000000000000]
17:57:28,361 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) => r[10 FF 8100 000900]
17:57:28,361 INFO [ReceiverListener:hidraw0] logitech_receiver.receiver: <UnifyingReceiver(/dev/hidraw0,16)>: receiver notifications enabled => ('wireless', 'software present')
17:57:28,361 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) <= w[10 FF 8002 020000]
17:57:28,363 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) => r[10 01 4104 125140]
17:57:28,365 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) => r[10 FF 8002 000000]
17:57:28,365 INFO [ReceiverListener:hidraw0] solaar.listener: status_changed <UnifyingReceiver(/dev/hidraw0,16)>: present, No paired devices. (0)
17:57:28,367 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) <= w[10 FF 83B5 400000]
17:57:28,369 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) => r[11 FF 83B5 40044D35313000000000000000000000]
17:57:28,370 INFO [ReceiverListener:hidraw0] logitech_receiver.receiver: <UnifyingReceiver(/dev/hidraw0,16)>: found new device 1 (0440)
17:57:28,370 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) <= w[10 FF 83B5 300000]
17:57:28,372 DEBUG [ReceiverListener:hidraw0] logitech_receiver.base: (16) => r[11 FF 83B5 3060E4B97D0400000001000000000000]
17:57:28,370 INFO [ReceiverListener:hidraw0] solaar.listener: Notification(1,41,01,5140040000000000000000) triggered new device <PairedDevice(1,0440,M510,60E4B97D)> (keyboard)
17:57:28,372 DEBUG [ReceiverListener:hidraw0] solaar.configuration: load => {'0410:93E8527F': {'_name': 'Wireless Mouse M510', 'side-scroll': True}, '1025:93E8527F': {'_name': 'Wireless Mouse M510', 'side-scroll': True}, '4051:60E4B97D': {'_name': 'Wireless Mouse M510', 'lowres-smooth-scroll': False, 'pointer_speed': 302, 'reprogrammable-keys': {'80': 80, '81': 81, '82': 93, '83': 93, '86': 93, '91': 83, '93': 83}}, '_version': '1.0.3rc1'}
17:57:28,373 INFO [ReceiverListener:hidraw0] solaar.listener: status_changed <UnifyingReceiver(/dev/hidraw0,16)>: present, 1 paired device. (0)
17:57:28,373 DEBUG [ReceiverListener:hidraw0] logitech_receiver.notifications: <PairedDevice(1,0440,M510,60E4B97D)> (1.0) DJ notification Notification(1,41,01,5140040000000000000000)
17:57:28,373 INFO [ReceiverListener:hidraw0] logitech_receiver.notifications: <PairedDevice(1,0440,M510,60E4B97D)>: ignoring DJ paired: Notification(1,41,01,5140040000000000000000)
17:57:28,373 WARNING [ReceiverListener:hidraw0] solaar.listener: Unexpected device number (0) in notification Notification(0,41,02,0000000000000000000000).
17:57:28,373 INFO [ReceiverListener:hidraw0] solaar.listener: Notification(1,41,04,125140) triggered new device <PairedDevice(1,0440,M510,60E4B97D)> (keyboard)
17:57:28,373 INFO [ReceiverListener:hidraw0] solaar.listener: status_changed <UnifyingReceiver(/dev/hidraw0,16)>: present, 1 paired device. (0)
17:57:28,374 ERROR [ReceiverListener:hidraw0] logitech_receiver.listener: processing Notification(1,41,04,125140)
Traceback (most recent call last):
File "/home/local/SoftwareDownloads/Solaar/lib/logitech_receiver/listener.py", line 193, in run
self._notifications_callback(n)
File "/home/local/SoftwareDownloads/Solaar/lib/solaar/listener.py", line 237, in _notifications_handler
_notifications.process(dev, n)
File "/home/local/SoftwareDownloads/Solaar/lib/logitech_receiver/notifications.py", line 60, in process
return _process_device_notification(device, status, notification)
File "/home/local/SoftwareDownloads/Solaar/lib/logitech_receiver/notifications.py", line 110, in _process_device_notification
return _process_hidpp10_notification(device, status, n)
File "/home/local/SoftwareDownloads/Solaar/lib/logitech_receiver/notifications.py", line 207, in _process_hidpp10_notification
assert wpid == device.wpid, '%s wpid mismatch, got %s' % (device, wpid)
AssertionError: <PairedDevice(1,0440,M510,60E4B97D)> wpid mismatch, got 4051
|
AssertionError
|
def update(device, need_popup=False):
if _window is None:
return
assert device is not None
if need_popup:
popup()
selected_device_id = _find_selected_device_id()
if device.kind is None:
# receiver
is_alive = bool(device)
item = _receiver_row(device.path, device if is_alive else None)
if is_alive and item:
was_pairing = bool(_model.get_value(item, _COLUMN.STATUS_ICON))
is_pairing = bool(device.status.lock_open)
_model.set_value(
item,
_COLUMN.STATUS_ICON,
"network-wireless" if is_pairing else _CAN_SET_ROW_NONE,
)
if selected_device_id == (device.path, 0):
full_update = need_popup or was_pairing != is_pairing
_update_info_panel(device, full=full_update)
elif item:
if _TREE_SEPATATOR:
separator = _model.iter_next(item)
_model.remove(separator)
_model.remove(item)
else:
# peripheral
is_paired = bool(device)
assert device.receiver
assert device.number is not None and device.number > 0, (
"invalid device number" + str(device.number)
)
item = _device_row(
device.receiver.path, device.number, device if is_paired else None
)
if is_paired and item:
was_online = _model.get_value(item, _COLUMN.ACTIVE)
is_online = bool(device.online)
_model.set_value(item, _COLUMN.ACTIVE, is_online)
battery_level = device.status.get(_K.BATTERY_LEVEL)
if battery_level is None:
_model.set_value(item, _COLUMN.STATUS_TEXT, _CAN_SET_ROW_NONE)
_model.set_value(item, _COLUMN.STATUS_ICON, _CAN_SET_ROW_NONE)
else:
if isinstance(battery_level, _NamedInt):
status_text = _("%(battery_level)s") % {
"battery_level": _(str(battery_level))
}
else:
status_text = _("%(battery_percent)d%%") % {
"battery_percent": battery_level
}
_model.set_value(item, _COLUMN.STATUS_TEXT, status_text)
charging = device.status.get(_K.BATTERY_CHARGING)
icon_name = _icons.battery(battery_level, charging)
_model.set_value(item, _COLUMN.STATUS_ICON, icon_name)
if selected_device_id is None or need_popup:
select(device.receiver.path, device.number)
elif selected_device_id == (device.receiver.path, device.number):
full_update = need_popup or was_online != is_online
_update_info_panel(device, full=full_update)
elif item:
_model.remove(item)
_config_panel.clean(device)
# make sure all rows are visible
_tree.expand_all()
|
def update(device, need_popup=False):
if _window is None:
return
assert device is not None
if need_popup:
popup()
selected_device_id = _find_selected_device_id()
if device.kind is None:
# receiver
is_alive = bool(device)
item = _receiver_row(device.path, device if is_alive else None)
assert item
if is_alive and item:
was_pairing = bool(_model.get_value(item, _COLUMN.STATUS_ICON))
is_pairing = bool(device.status.lock_open)
_model.set_value(
item,
_COLUMN.STATUS_ICON,
"network-wireless" if is_pairing else _CAN_SET_ROW_NONE,
)
if selected_device_id == (device.path, 0):
full_update = need_popup or was_pairing != is_pairing
_update_info_panel(device, full=full_update)
elif item:
if _TREE_SEPATATOR:
separator = _model.iter_next(item)
_model.remove(separator)
_model.remove(item)
else:
# peripheral
is_paired = bool(device)
assert device.receiver
assert device.number is not None and device.number > 0, (
"invalid device number" + str(device.number)
)
item = _device_row(
device.receiver.path, device.number, device if is_paired else None
)
if is_paired and item:
was_online = _model.get_value(item, _COLUMN.ACTIVE)
is_online = bool(device.online)
_model.set_value(item, _COLUMN.ACTIVE, is_online)
battery_level = device.status.get(_K.BATTERY_LEVEL)
if battery_level is None:
_model.set_value(item, _COLUMN.STATUS_TEXT, _CAN_SET_ROW_NONE)
_model.set_value(item, _COLUMN.STATUS_ICON, _CAN_SET_ROW_NONE)
else:
if isinstance(battery_level, _NamedInt):
status_text = _("%(battery_level)s") % {
"battery_level": _(str(battery_level))
}
else:
status_text = _("%(battery_percent)d%%") % {
"battery_percent": battery_level
}
_model.set_value(item, _COLUMN.STATUS_TEXT, status_text)
charging = device.status.get(_K.BATTERY_CHARGING)
icon_name = _icons.battery(battery_level, charging)
_model.set_value(item, _COLUMN.STATUS_ICON, icon_name)
if selected_device_id is None or need_popup:
select(device.receiver.path, device.number)
elif selected_device_id == (device.receiver.path, device.number):
full_update = need_popup or was_online != is_online
_update_info_panel(device, full=full_update)
elif item:
_model.remove(item)
_config_panel.clean(device)
# make sure all rows are visible
_tree.expand_all()
|
https://github.com/pwr-Solaar/Solaar/issues/651
|
peter.patel@2030008406L:~/Samsung/github/Solaar$ bin/solaar
(solaar:5523): Gdk-CRITICAL **: 09:58:43.893:
gdk_window_thaw_toplevel_updates: assertion 'window-
update_and_descendants_freeze_count > 0' failed
Traceback (most recent call last):
File "/home/peter.patel/Samsung/github/Solaar/lib/solaar/ui/__init__.py", line 165, in _status_changed
window.update(device, need_popup)
File "/home/peter.patel/Samsung/github/Solaar/lib/solaar/ui/window.py", line 771, in update
assert item
AssertionError
Traceback (most recent call last):
File "/home/peter.patel/Samsung/github/Solaar/lib/solaar/ui/__init__.py", line 165, in _status_changed
window.update(device, need_popup)
File "/home/peter.patel/Samsung/github/Solaar/lib/solaar/ui/window.py", line 771, in update
assert item
AssertionError
Traceback (most recent call last):
File "/home/peter.patel/Samsung/github/Solaar/lib/solaar/ui/__init__.py", line 165, in _status_changed
window.update(device, need_popup)
File "/home/peter.patel/Samsung/github/Solaar/lib/solaar/ui/window.py", line 771, in update
assert item
AssertionError
Traceback (most recent call last):
File "/home/peter.patel/Samsung/github/Solaar/lib/solaar/ui/pair_window.py", line 75, in _check_lock_state
if receiver.status.get(_K.ERROR):
AttributeError: 'unicode' object has no attribute 'get'
Traceback (most recent call last):
File "/home/peter.patel/Samsung/github/Solaar/lib/solaar/ui/pair_window.py", line 120, in _finish
receiver.status.new_device = None
AttributeError: 'unicode' object has no attribute 'new_device'
|
AttributeError
|
def __call__(self, device):
assert not hasattr(self, "_value")
# combined keyboards and touchpads (e.g., K400) break this assertion so don't use it
# assert self.device_kind is None or device.kind in self.device_kind
p = device.protocol
if p == 1.0:
# HID++ 1.0 devices do not support features
assert self._rw.kind == RegisterRW.kind
elif p >= 2.0:
# HID++ 2.0 devices do not support registers
assert self._rw.kind == FeatureRW.kind
o = _copy(self)
o._value = None
o._device = device
return o
|
def __call__(self, device):
assert not hasattr(self, "_value")
assert self.device_kind is None or device.kind in self.device_kind
p = device.protocol
if p == 1.0:
# HID++ 1.0 devices do not support features
assert self._rw.kind == RegisterRW.kind
elif p >= 2.0:
# HID++ 2.0 devices do not support registers
assert self._rw.kind == FeatureRW.kind
o = _copy(self)
o._value = None
o._device = device
return o
|
https://github.com/pwr-Solaar/Solaar/issues/266
|
Traceback (most recent call last):
File "/home/user/upstream/hw/Solaar/lib/logitech_receiver/listener.py", line 185, in run
self._notifications_callback(n)
File "/home/user/upstream/hw/Solaar/lib/solaar/listener.py", line 207, in _notifications_handler
_notifications.process(dev, n)
File "/home/user/upstream/hw/Solaar/lib/logitech_receiver/notifications.py", line 54, in process
return _process_device_notification(device, status, notification)
File "/home/user/upstream/hw/Solaar/lib/logitech_receiver/notifications.py", line 97, in _process_device_notification
return _process_hidpp10_notification(device, status, n)
File "/home/user/upstream/hw/Solaar/lib/logitech_receiver/notifications.py", line 174, in _process_hidpp10_notification
status.changed(active=link_established)
File "/home/user/upstream/hw/Solaar/lib/logitech_receiver/status.py", line 265, in changed
for s in d.settings:
File "/home/user/upstream/hw/Solaar/lib/logitech_receiver/receiver.py", line 254, in settings
_check_feature_settings(self, self._settings)
File "/home/user/upstream/hw/Solaar/lib/logitech_receiver/settings_templates.py", line 268, in check_feature_settings
check_feature(_SMOOTH_SCROLL[0], _F.HI_RES_SCROLLING)
File "/home/user/upstream/hw/Solaar/lib/logitech_receiver/settings_templates.py", line 266, in check_feature
already_known.append(feature(device))
File "/home/user/upstream/hw/Solaar/lib/logitech_receiver/settings.py", line 63, in __call__
assert self.device_kind is None or self.device_kind == device.kind
AssertionError
|
AssertionError
|
def __init__(self, **kwargs):
def _readable_name(n):
if not isinstance(n, str) and not isinstance(n, unicode):
raise TypeError("expected string, got " + type(n))
if n == n.upper():
n = n.lstrip("_")
return n.replace("__", "/").replace("_", " ")
values = {k: NamedInt(v, _readable_name(k)) for (k, v) in kwargs.items()}
self.__dict__ = values
self._values = sorted(list(values.values()))
self._indexed = {int(v): v for v in self._values}
self._fallback = None
|
def __init__(self, **kwargs):
def _readable_name(n):
if not isinstance(n, str) and not isinstance(n, unicode):
raise TypeError("expected string, got " + type(n))
if n == n.upper():
n.lstrip("_")
return n.replace("__", "/").replace("_", " ")
values = {k: NamedInt(v, _readable_name(k)) for (k, v) in kwargs.items()}
self.__dict__ = values
self._values = sorted(list(values.values()))
self._indexed = {int(v): v for v in self._values}
self._fallback = None
|
https://github.com/pwr-Solaar/Solaar/issues/10
|
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 552, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 505, in run
self.__target(*self.__args, **self.__kwargs)
File "/opt/extras.ubuntu.com/solaar/share/solaar/lib/solaar/ui/config_panel.py", line 36, in _process_apply_queue
value = setting.write(value)
File "/opt/extras.ubuntu.com/solaar/share/solaar/lib/logitech/unifying_receiver/settings.py", line 58, in write
data_bytes = self._validator.prepare_write(value)
File "/opt/extras.ubuntu.com/solaar/share/solaar/lib/logitech/unifying_receiver/settings.py", line 164, in prepare_write
raise ValueError("invalid choice " + repr(value))
ValueError: invalid choice ' 1500'
|
ValueError
|
def _readable_name(n):
if not isinstance(n, str) and not isinstance(n, unicode):
raise TypeError("expected string, got " + type(n))
if n == n.upper():
n = n.lstrip("_")
return n.replace("__", "/").replace("_", " ")
|
def _readable_name(n):
if not isinstance(n, str) and not isinstance(n, unicode):
raise TypeError("expected string, got " + type(n))
if n == n.upper():
n.lstrip("_")
return n.replace("__", "/").replace("_", " ")
|
https://github.com/pwr-Solaar/Solaar/issues/10
|
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 552, in __bootstrap_inner
self.run()
File "/usr/lib/python2.7/threading.py", line 505, in run
self.__target(*self.__args, **self.__kwargs)
File "/opt/extras.ubuntu.com/solaar/share/solaar/lib/solaar/ui/config_panel.py", line 36, in _process_apply_queue
value = setting.write(value)
File "/opt/extras.ubuntu.com/solaar/share/solaar/lib/logitech/unifying_receiver/settings.py", line 58, in write
data_bytes = self._validator.prepare_write(value)
File "/opt/extras.ubuntu.com/solaar/share/solaar/lib/logitech/unifying_receiver/settings.py", line 164, in prepare_write
raise ValueError("invalid choice " + repr(value))
ValueError: invalid choice ' 1500'
|
ValueError
|
def index_pass(self):
def safe_update_index(path):
chunk_id = path.rsplit("/", 1)[-1]
if len(chunk_id) != STRLEN_CHUNKID:
return
for c in chunk_id:
if c not in hexdigits:
return
try:
self.update_index(path)
self.successes += 1
self.logger.debug("Updated %s", path)
except OioNetworkException as exc:
self.errors += 1
self.logger.warn("ERROR while updating %s: %s", path, exc)
except VolumeException as exc:
self.errors += 1
self.logger.error("Cannot index %s: %s", path, exc)
# All chunks of this volume are indexed in the same service,
# no need to try another chunk, it will generate the same
# error. Let the upper level retry later.
raise
except Exception:
self.errors += 1
self.logger.exception("ERROR while updating %s", path)
self.total_since_last_reported += 1
def report(tag):
total = self.errors + self.successes
now = time.time()
elapsed = (now - start_time) or 0.000001
self.logger.info(
"%(tag)s=%(current_time)s "
"elapsed=%(elapsed).02f "
"pass=%(pass)d "
"errors=%(errors)d "
"chunks=%(nb_chunks)d %(c_rate).2f/s"
% {
"tag": tag,
"current_time": datetime.fromtimestamp(int(now)).isoformat(),
"pass": self.passes,
"errors": self.errors,
"nb_chunks": total,
"c_rate": self.total_since_last_reported / (now - self.last_reported),
"elapsed": elapsed,
}
)
self.last_reported = now
self.total_since_last_reported = 0
start_time = time.time()
self.last_reported = start_time
self.errors = 0
self.successes = 0
paths = paths_gen(self.volume)
report("started")
for path in paths:
safe_update_index(path)
self.chunks_run_time = ratelimit(
self.chunks_run_time, self.max_chunks_per_second
)
now = time.time()
if now - self.last_reported >= self.report_interval:
report("running")
report("ended")
|
def index_pass(self):
def safe_update_index(path):
chunk_id = path.rsplit("/", 1)[-1]
if len(chunk_id) != STRLEN_CHUNKID:
return
for c in chunk_id:
if c not in hexdigits:
return
try:
self.update_index(path)
self.successes += 1
self.logger.debug("Updated %s", path)
except OioNetworkException as exc:
self.errors += 1
self.logger.warn("ERROR while updating %s: %s", path, exc)
except Exception:
self.errors += 1
self.logger.exception("ERROR while updating %s", path)
self.total_since_last_reported += 1
def report(tag):
total = self.errors + self.successes
now = time.time()
elapsed = (now - start_time) or 0.000001
self.logger.info(
"%(tag)s=%(current_time)s "
"elapsed=%(elapsed).02f "
"pass=%(pass)d "
"errors=%(errors)d "
"chunks=%(nb_chunks)d %(c_rate).2f/s"
% {
"tag": tag,
"current_time": datetime.fromtimestamp(int(now)).isoformat(),
"pass": self.passes,
"errors": self.errors,
"nb_chunks": total,
"c_rate": self.total_since_last_reported / (now - self.last_reported),
"elapsed": elapsed,
}
)
self.last_reported = now
self.total_since_last_reported = 0
start_time = time.time()
self.last_reported = start_time
self.errors = 0
self.successes = 0
paths = paths_gen(self.volume)
report("started")
for path in paths:
safe_update_index(path)
self.chunks_run_time = ratelimit(
self.chunks_run_time, self.max_chunks_per_second
)
now = time.time()
if now - self.last_reported >= self.report_interval:
report("running")
report("ended")
|
https://github.com/open-io/oio-sds/issues/1512
|
11573 7F920C7A0730 log ERROR ERROR while updating /var/lib/oio/sds/OPENIO/rawx-0/7BD/7BDE49669C6616A0656097C6544923F1BB0EE711A23D9D18B7D23071997AB9F7
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/oio/blob/indexer.py", line 65, in safe_update_index
self.update_index(path)
File "/usr/lib/python2.7/dist-packages/oio/blob/indexer.py", line 132, in update_index
**data)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 298, in chunk_push
json=body, headers=headers)
File "/usr/lib/python2.7/dist-packages/oio/common/utils.py", line 586, in ensure_headers_wrapper
return func(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/common/utils.py", line 596, in ensure_request_id_wrapper
return func(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 273, in _rdir_request
uri = self._make_uri(action, volume)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 264, in _make_uri
rdir_host = self._get_rdir_addr(volume_id)
File "/usr/lib/python2.7/dist-packages/oio/rdir/client.py", line 261, in _get_rdir_addr
raise VolumeException('No rdir assigned to volume %s' % volume_id)
VolumeException: No rdir assigned to volume 172.18.0.1:6004
|
VolumeException
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.